目录

我给大家带来的是 50行代码,生成一张素描图。让自己也是一个素描“大师”。那废话不多说,我们直接先来看看效果吧。

上图的右边就是我们的效果,那具体有哪些步骤呢?

1. 流程分析

对于上面的流程来说是非常简单的,接下来我们来看看具体的实现。

2. 具体实现

安装所需要的库:

pip install opencv-python

导入所需要的库:

import cv2

编写主体代码也是非常的简单的,代码如下:

import cv2
src = 'images/image_1.jpg'

image_rgb = cv2.imread(src)
image_gray = cv2.cvtcolor(image_rgb, cv2.color_bgr2gray)
image_blur = cv2.gaussianblur(image_gray, ksize=(21, 21), sigmax=0, sigmay=0)
image_blend = cv2.divide(image_gray, image_blur, scale=255)
cv2.imwrite('result.jpg', image_blend)

那上面的代码其实并不难,那接下来为了让小伙伴们能更好的理解,我编写了如下代码:

"""
project = 'code', file_name = 'study.py', author = 'ai悦创'
time = '2020/5/19 8:35', product_name = pycharm, 公众号:ai悦创
code is far away from bugs with the god animal protecting
    i love animals. they taste delicious.
"""
import cv2

# 原图路径
src = 'images/image_1.jpg'

# 读取图片
image_rgb = cv2.imread(src)
# cv2.imshow('rgb', image_rgb) # 原图
# cv2.waitkey(0)
# exit()
image_gray = cv2.cvtcolor(image_rgb, cv2.color_bgr2gray)
# cv2.imshow('gray', image_gray) # 灰度图
# cv2.waitkey(0)
# exit()
image_bulr = cv2.gaussianblur(image_gray, ksize=(21, 21), sigmax=0, sigmay=0)
cv2.imshow('image_blur', image_bulr) # 高斯虚化
cv2.waitkey(0)
exit()

# divide: 提取两张差别较大的线条和内容
image_blend = cv2.divide(image_gray, image_bulr, scale=255)
# cv2.imshow('image_blend', image_blend) # 素描
cv2.waitkey(0)
# cv2.imwrite('result1.jpg', image_blend)

那上面的代码,我们是在原有的基础上添加了,一些实时展示的代码,来方便同学们理解。
其实有同学会问,我用软件不就可以直接生成素描图吗?
那程序的好处是什么?
程序的好处就是如果你的图片量多的话,这个时候使用程序批量生成也是非常方便高效的。
这样我们的就完成,把小姐姐的图片变成了素描,skr~。

3. 百度图片爬虫+生成素描图

不过,这还不是我们的海量图片,为了达到海量这个词呢,我写了一个百度图片爬虫,不过本文不是教如何写爬虫代码的,这里我就直接放出爬虫代码,符和软件工程规范:

# crawler.spider.py
import re
import os
import time
import collections
from collections import namedtuple

import requests
from concurrent import futures
from tqdm import tqdm
from enum import enum

base_url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryword={keyword}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word={keyword}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn={page}&rn=30&gsm=&1568638554041='

headers = {
 'referer': 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fr=&sf=1&fmq=1567133149621_r&pv=&ic=0&nc=1&z=0&hd=0&latest=0©right=0&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&sid=&word=%e5%a3%81%e7%ba%b8',
 'user-agent': 'mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/75.0.3770.100 safari/537.36',
 'x-requested-with': 'xmlhttprequest', }


class baiduspider:
 def __init__(self, max_works, images_type):
  self.max_works = max_works
  self.httpstatus = enum('status', ['ok', 'not_found', 'error'])
  self.result = namedtuple('result', 'status data')
  self.session = requests.session()
  self.img_type = images_type
  self.img_num = none
  self.headers = headers
  self.index = 1
 
 def get_img(self, img_url):
  res = self.session.get(img_url)
  if res.status_code != 200:
   res.raise_for_status()
  
  return res.content
 
 def download_one(self, img_url, verbose):
  try:
   image = self.get_img(img_url)
  except requests.exceptions.httperror as e:
   res = e.response
   if res.status_code == 404:
    status = self.httpstatus.not_found
    msg = 'not_found'
   else:
    raise
  else:
   self.save_img(self.img_type, image)
   status = self.httpstatus.ok
   msg = 'ok'
  
  if verbose:
   print(img_url, msg)
  
  return self.result(status, msg)
 
 def get_img_url(self):
  urls = [base_url.format(keyword=self.img_type, page=page) for page in self.img_num]
  for url in urls:
   res = self.session.get(url, headers=self.headers)
   if res.status_code == 200:
    img_list = re.findall(r'"thumburl":"(.*?)"', res.text)
    # 返回出图片地址,配合其他函数运行
    yield {img_url for img_url in img_list}
   elif res.status_code == 404:
    print('-----访问失败,找不到资源-----')
    yield none
   elif res.status_code == 403:
    print('*****访问失败,服务器拒绝访问*****')
    yield none
   else:
    print('>>> 网络连接失败 <<<')
    yield none
 
 def download_many(self, img_url_set, verbose=false):
  if img_url_set:
   counter = collections.counter()
   with futures.threadpoolexecutor(self.max_works) as executor:
    to_do_map = {}
    for img in img_url_set:
     future = executor.submit(self.download_one, img, verbose)
     to_do_map[future] = img
    done_iter = futures.as_completed(to_do_map)
   
   if not verbose:
    done_iter = tqdm(done_iter, total=len(img_url_set))
   for future in done_iter:
    try:
     res = future.result()
    except requests.exceptions.httperror as e:
     error_msg = 'http error {res.status_code} - {res.reason}'
     error_msg = error_msg.format(res=e.response)
    except requests.exceptions.connectionerror:
     error_msg = 'connectionerror error'
    else:
     error_msg = ''
     status = res.status
    
    if error_msg:
     status = self.httpstatus.error
    
    counter[status] += 1
    
    if verbose and error_msg:
     img = to_do_map[future]
     print('***error for {} : {}'.format(img, error_msg))
   return counter
  else:
   pass
 
 def save_img(self, img_type, image):
  with open('{}/{}.jpg'.format(img_type, self.index), 'wb') as f:
   f.write(image)
  self.index += 1
 
 def what_want2download(self):
  # self.img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')
  try:
   os.mkdir(self.img_type)
  except fileexistserror:
   pass
  img_num = input('请输入要下载的数量(1位数代表30张,列如输入1就是下载30张,2就是60张):>>> ')
  while true:
   if img_num.isdigit():
    img_num = int(img_num) * 30
    self.img_num = range(30, img_num + 1, 30)
    break
   else:
    img_num = input('输入错误,请重新输入要下载的数量>>> ')
 
 def main(self):
  # 获取图片类型和下载的数量
  total_counter = {}
  self.what_want2download()
  for img_url_set in self.get_img_url():
   if img_url_set:
    counter = self.download_many(img_url_set, false)
    for key in counter:
     if key in total_counter:
      total_counter[key] += counter[key]
     else:
      total_counter[key] = counter[key]
   
   else:
    # 可以为其添加报错功能
    pass
  
  time.sleep(.5)
  return total_counter

if __name__ == '__main__':
 max_works = 20
 bd_spider = baiduspider(max_works)
 print(bd_spider.main())
# sketch_the_generated_code.py
import cv2
def drawing(src, id=none):
 image_rgb = cv2.imread(src)
 image_gray = cv2.cvtcolor(image_rgb, cv2.color_bgr2gray)
 image_blur = cv2.gaussianblur(image_gray, ksize=(21, 21), sigmax=0, sigmay=0)
 image_blend = cv2.divide(image_gray, image_blur, scale=255)
 cv2.imwrite(f'drawing_images/result-{id}.jpg', image_blend)

# image_list.image_list_path.py
import os
from natsort import natsorted

images_list = []

def image_list(path):
 global images_list
 for root, dirs, files in os.walk(path):
  # 按文件名排序
  # files.sort()
  files = natsorted(files)
  # 遍历所有文件
  for file in files:
   # 如果后缀名为 .jpg
   if os.path.splitext(file)[1] == '.jpg':
    # 拼接成完整路径
    # print(file)
    filepath = os.path.join(root, file)
    print(filepath)
    # 添加到数组
    images_list.append(filepath)
 return images_list

# main.py
import time

from sketch_the_generated_code import drawing
from crawler.spider import baiduspider
from image_list.image_list_path import image_list
import os

max_words = 20

if __name__ == '__main__':
 # now_path = os.getcwd()
 # img_type = 'ai'
 img_type = input('请输入你想下载的图片类型,什么都可以哦~ >>> ')
 bd_spider = baiduspider(max_words, img_type)
 print(bd_spider.main())
 time.sleep(10) # 这里设置睡眠时间,让有足够的时间去添加,这样读取就,去掉或者太短会报错,所以
 for index, path in enumerate(image_list(img_type)):
  drawing(src = path, id = index)

所以最终的目录结构如下所示:

c:.
│  main.py
│  sketch_the_generated_code.py
│
├─crawler
│  │  spider.py
│  │
│  └─__pycache__
│          spider.cpython-37.pyc
│
├─drawing
│  │  result.jpg
│  │  result1.jpg
│  │  sketch_the_generated_code.py
│  │  study.py
│  │
│  ├─images
│  │      image_1.jpg
│  │
│  └─__pycache__
│          sketch_the_generated_code.cpython-37.pyc
│
├─drawing_images
├─image_list
│  │  image_list_path.py
│  │
│  └─__pycache__
│          image_list_path.cpython-37.pyc
│
└─__pycache__
        sketch_the_generated_code.cpython-37.pyc

至此,全部代码已经完成。

到此这篇关于python使用5行代码批量做小姐姐的素描图的文章就介绍到这了,更多相关python 批量做素描图内容请搜索www.887551.com以前的文章或继续浏览下面的相关文章希望大家以后多多支持www.887551.com!