这是本系列第一个实战项目的第七节,与前六节相关的内容请访问小远的主页。
(上一节的链接)

上一节我们已经对前程无忧爬虫岗位中前十页的工作的详细信息经处理后存入Excel表,具体效果请参见上一节的链接

原本打算这一篇文章做画图的,但是小远另一个关于pyecharts系列的文章还没写完(刚刚写到柱状图),而我的打算是将这个项目与那一个系列的文章串起来说的,所以先放一放。

今天的内容,是小远临时起意想到的。前面敲了那么多代码,劈里啪啦从上写到下,不好看,用我Python老师的话说就是“不优雅”。所以今天的任务就是改进代码,除了模块化重构,还有各种小远能够想到的异常处理,由于改进得较多(原来只是默认爬虫岗位的前十页,现在我想爬所有的后端岗位,如图,有多少爬多少),所以加了很多try…except结构和其他代码,不好怎么讲解,直接上代码:

# -*- coding: utf-8 -*-
# @Time: 2020/11/29 14:21
# @Author: 胡志远
# @Software: PyCharm
# 导入re包
import re
# 导入requests包
import requests
# 导入lxml中的html模块
from lxml import html
# 导入xlsxwriter包
import xlsxwriter
# 需要用到时间
import datetime
# 解析json文件的库
import json
# 请求头
headers = { 
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "guid=7e8a970a750a4e74ce237e74ba72856b; partner=blog_csdn_net",
"Host": "jobs.51job.com",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
title_url = { 
"高级软件工程师": "https://jobs.51job.com/gaojiruanjian/p{}/",
"软件工程师": "https://jobs.51job.com/ruanjian/p{}/",
"Java开发工程师": "https://jobs.51job.com/javakaifa/p{}/",
"PHP开发工程师": "https://jobs.51job.com/phpkaifa/p{}/",
"C(C++)开发工程师": "https://jobs.51job.com/cyuyankaifa/p{}/",
"Python开发工程师": "https://jobs.51job.com/pythonkaifa/p{}/",
".NET开发工程师": "https://jobs.51job.com/netkaifa/p{}/",
"Ruby开发工程师": "https://jobs.51job.com/rubykaifa/p{}/",
"Go开发工程师": "https://jobs.51job.com/gokaifa/p{}/",
"大数据开发工程师": "https://jobs.51job.com/dashujukaifa/p{}/",
"Hadoop工程师": "https://jobs.51job.com/hadoopkaifa/p{}/",
"爬虫开发工程师": "https://jobs.51job.com/pachongkaifa/p{}/",
"脚本开发工程师": "https://jobs.51job.com/jiaobenkaifa/p{}/",
"多媒体开发工程师": "https://jobs.51job.com/duomeitikaifa/p{}/",
"ERP技术开发": "https://jobs.51job.com/erp-jishukaifa/p{}/",
"区块链开发": "https://jobs.51job.com/qukuailian/p{}/",
"系统架构设计师": "https://jobs.51job.com/xitongjiagou/p{}/",
"系统分析员": "https://jobs.51job.com/xitongfenxi/p{}/",
"技术文员(助理)": "https://jobs.51job.com/jishuwenyuan/p{}/",
"技术文档工程师": "https://jobs.51job.com/wendanggongchengshi/p{}/"
}
# 获取每一页的信息列表,如果url访问失败返回空列表
def getContentPerUrl(url):
content = list()
try:
# 有请求头写法
res = requests.get(url=url, headers=headers)
except requests.RequestException:
print("没有这个链接了")
return content, False
res.encoding = "gbk"
# 生成Element对象
etree = html.etree
html_elements = etree.HTML(res.text)
# 根据Xpath路径获取每一个工作的对象
Jobs_element = html_elements.xpath('//div[@class="detlist gbox"]/div')
# 正则
regex = '.*href="(.*)">(.*)</a.*href=.*">(.*)</a.*me">(.*)</span.*on">(.*)' \
'</span.*me">(.*)</span.*:(.*)<s.*:(.*)<s.*:(.*)<s.*:(.*)</p.*title='
for aJob in Jobs_element:
content += re.findall(regex, etree.tostring(aJob, encoding="utf-8").decode(), re.S)
# 将元组列表改为列表列表
return ([list(tup) for tup in content], True) if content else (content, False)
# 对工资数据进行处理,返回规整之后的工资和平均工资
def wage(w):
try:
try:
temp = re.findall(r'(\d+\.?\d*)-(\d+\.?\d*)(.+)/(.+)', w)[0]
wMin = eval(temp[0])
wMax = eval(temp[1])
except IndexError:
temp = re.findall(r'(\d+\.?\d*)(-?)(.+)/(.+)', w)[0]
wMin = eval(temp[0])
wMax = wMin
# 还找不到
except Exception:
return "这个有问题!!!", "这个有问题!!!"
if temp[2] == '万':
wMin, wMax = wMin * 10, wMax * 10
elif temp[2] == '千以下':
wMin, wMax = wMin / 2, wMax / 2
elif temp[2] == '元':
wMin, wMax = wMin / 1000, wMax / 1000
if temp[3] == '年':
wMin, wMax = wMin / 12, wMax / 12
elif temp[3] == '天' or temp[3] == '日':  # 正常休假情况法定一个月约上21.5天班
wMin, wMax = wMin * 21.5, wMax * 21.5
elif temp[3] == '小时':  # 正常情况下法定8小时一天,一个月大约160小时
wMin, wMax = wMin * 160, wMax * 160
return "{:.1f}-{:.1f}千/月".format(wMin, wMax), "{:.1f}千/月".format(0.5 * (wMin + wMax))
# 数据处理
def treatment(Information):
j = open('城市经纬度.json', 'rb')
city = json.load(j)
# 创建六个空字典
Education, Experience, Type, Count, Wages, Locale = { }, { }, { }, { }, { }, { }
for content in Information:
# 将经纬度坐标加在后面
try:
cityName = content[3].split('-')[0] if '-' in content[3] else content[3]
content.append(str(tuple(city[cityName])))
except:
print("{}的{}工作地点异常".format(content[2], content[1]))
cityName = "异常的工作地点"
content.append("找不到城市经纬度信息")
# 将工资统一格式化并将平均工资加在后面
content[4], aveWg = wage(content[4])
content.append(aveWg)
# 默认无学历要求,无需经验,少于50人
content[6] = content[6] if content[6] != "" else "无学历要求"
content[7] = content[7] if content[7] != "" else "无需经验"
content[9] = content[9] if content[9] != "" else "少于50人"
# 对学历要求、需要资历、公司类型、招聘人数、工作地点经纬度进行词频统计
Education = returnDict(Education, content[6])
Experience = returnDict(Experience, content[7])
Type = returnDict(Type, content[8])
Count = returnDict(Count, content[9])
Locale = returnDict(Locale, (cityName, content[10]))
# 对高低平均工资水平进行阶段性统计
try:
a = eval(re.findall(r'(\d+\.?\d*)千/月', aveWg)[0])
if a <= 3.0:
Wages = returnDict(Wages, "平均工资小于等于3000元/月")
elif a <= 6.0:
Wages = returnDict(Wages, "平均工资大于3000且小于等于6000元/月")
elif a <= 9.0:
Wages = returnDict(Wages, "平均工资大于6000且小于等于9000元/月")
elif a <= 12.0:
Wages = returnDict(Wages, "平均工资大于9000且小于等于12000元/月")
elif a <= 15.0:
Wages = returnDict(Wages, "平均工资大于12000且小于等于15000元/月")
else:
Wages = returnDict(Wages, "平均工资大于15000元/月")
except:
Wages = returnDict(Wages, "工资信息异常")
print("{}的{}工资信息异常".format(content[2], content[1]))
j.close()
return Education, Experience, Type, Count, Wages, Locale, Information
# 统计生成字典
def returnDict(dic, c):
dic[c] = 1 if c not in dic else dic[c]+1
return dic
# 写各种文件
def Write(Filename, WorksheetName, inform):
ws = wb.add_worksheet(WorksheetName)
# 先打印一个时间
ws.write(0, 0, "数据截止时间", wb.add_format({ 'bold': True}))  # 加粗
ws.write(0, 1, datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'))
# 再写上列名
columnNames = ("网址", "岗位名称", "所属公司", "工作地点", "工资待遇", "招聘信息发布时间", "学历要求", "需要资历", "公司类型", "招聘人数", "工作地点经纬度", "平均工资")
for columnName in columnNames:
ws.write(1, columnNames.index(columnName), columnName, wb.add_format({ 'bold': True}))
# 查看总列表
for item in inform[6]:
for each in item:
ws.write(inform[6].index(item) + 2, item.index(each), each)
# 暂时先将字典打印出来,之后还有用的
print(inform[0])
print(inform[1])
print(inform[2])
print(inform[3])
print(inform[4])
print(inform[5])
def getUrl(wk, pg):
return title_url[wk].format(pg)
def main():
# 打印需要的岗位 可在title_url中自行选择和添加 注意名字中不能含有'[]:*?/\'这些字符
for name in title_url.keys():
page = 1
Table = list()
while getContentPerUrl(getUrl(name, page))[1]:
print("{}的第{}页已完成".format(name, page))
Table += getContentPerUrl(getUrl(name, page))[0]
page += 1
Write(file, name, treatment(Table))
if __name__ == '__main__':
file = ""  # 这里添上路径,默认放在本项目路径下
wb = xlsxwriter.Workbook(file + '前程无忧招聘信息.xlsx')
main()
wb.close()

运行结果

跑完一遍大概需要一到两个小时,不便演示运行截图
如果有什么疑问欢迎评论区交流,谢谢

如果觉得博主写的还不错的,欢迎点赞、评论、加关注,大家的访问就是博主更新文章不竭的源动力!

本文地址:https://blog.csdn.net/I_am_Tony_Stark/article/details/111028923