# @UpdateTime : 2020-12-08 16:53
# @Author : wz
# @File : Get_WebDetails
# @Software: PyCharm
# @used: 爬取任意页面中任意数据
import re
import urllib.request
from Utils.Log import Logger
Logger_message = Logger()
# 爬取gbk网页(爬取html页面文件)
html = urllib.request.urlopen("https://www.78zw.com/4_4107/").read()
html = html.decode('utf-8')
# print(html)
# 爬取链接和目录(通过正则表达式进行过滤)
reg = r'<a href="(.*?)">(.*?) (.*?) </a>'
urls = re.findall(reg, html)    # 这是获取的链接和目录时没有规律的(杂乱无章)
# print(urls)
for url in urls:
    chapter_titles = url[2]
    chapter_url = 'https://www.78zw.com' + str(url[0])
    # print(url[0])
    # Logger_message.loginfo(chapter_url + '\t' + chapter_titles)
    htmls = urllib.request.urlopen(chapter_url).read()
    htmls = htmls.decode ('utf-8')
    # print(htmls)
    content = r'<div id="content">(.*?)</div>'
    content = re.findall(content, htmls)
    # print(content)
    for next in content:
        strs = next.replace("<br><br>", "")
        stres = strs.replace("  ","")
        nextes = (('%s' % chapter_titles) + "\t" +stres)
        print("正在下载章节名称:" + chapter_titles)
        # 打印内容文本(保存到一个文件内)
        fn = open('Name.txt', 'a')
        fn.write(chapter_titles + "\n" + nextes)
        # 分章节保存到不同的txt文件内
        fn = open(chapter_titles + '.txt', 'w', encoding='utf-8')
        fn.write(nextes)

首次发布文章,不足的地方请大家指点。

本文地址:https://blog.csdn.net/cherey/article/details/110926891