本篇文章小编给大家分享一下python爬虫判断招聘信息是否存在代码示例,文章代码介绍的很详细,小编觉得挺不错的,现在分享给大家供大家参考,有需要的小伙伴们可以来看看。
代码:
{ "_id" : ObjectId("5a30ad2068504386f47d9a4b"), "city" : "苏州", "companyShortName" : "蓝海彤翔", "companySize" : "100-499人", "education" : "本科", "financeStage" : "B轮", "industryField" : "互联网", "level" : 3, "pid" : "11889834", "positionLables" : [ "PHP", "ThinkPHP" ], "positionName" : "php研发工程师", "salary" : { "avg" : 7500.0, "low" : 7000, "high" : 8000 }, "time" : "2017-06-06", "updated_at" : "2017-12-13 18:31:15", "workYear" : "1-3年", "detail" : "1、处理landcloud云计算相关系统的各类开发和调研工作;2、处理coms高性能计算的各类开发和调研工作岗位要求:1、本科学历,两年以上工作经验,熟悉PHP开发,了解常用的php开发技巧和框架;2、了解C++,python及Java开发;3、有一定的研发能力和钻研精神;4、有主动沟通能力和吃苦耐劳的精神。", "location" : "苏州市高新区科技城锦峰路158号101park8幢"
实例扩展:
python爬虫爬取腾讯招聘信息 (静态爬虫)
import requests from bs4 import BeautifulSoup from math import ceil header = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'} # 获取岗位页数 def getJobPage(url): ret = requests.get(url, headers=header) ret.encoding = "utf-8" # 解决乱码问题 html = ret.text soup = BeautifulSoup(html, 'html.parser') # 获取岗位总数,< span class ="lightblue total" > 512 < / span > totalJob = soup.select('span[class="lightblue total"]')[0].text jobPage = ceil(int(totalJob) / 10) return jobPage def getJobOrder(url): ret = requests.get(url, headers=header) ret.encoding = "utf-8" # 解决乱码问题 html = ret.text soup = BeautifulSoup(html, 'html.parser') # 工作职责 jobRequests = soup.select('ul[class="squareli"]')[0].text # 工作要求 jobOrder = soup.select('ul[class="squareli"]')[1].text return jobRequests, jobOrder # 获取岗位信息 def getJobInfo(url): myfile = open("tencent_job.txt", "a", encoding='gb18030', errors='ignore') # 解决乱码问题 ret = requests.get(url, headers=header) ret.encoding = "utf-8" # 解决乱码问题 html = ret.text soup = BeautifulSoup(html, 'html.parser') jobList = soup.find_all('tr', class_=['even', 'odd']) for job in jobList: # url jobUrl = "https://hr.tencent.com/" + job.select('td:nth-of-type(1) > a')[0]['href'] # 职位名称 jobName = job.select('td:nth-of-type(1) > a')[0].text # 人数 jobPeople = job.select('td:nth-of-type(3)')[0].text # 地点 jobAddre = job.select('td:nth-of-type(4)')[0].text # 发布时间 jobTime = job.select('td:nth-of-type(5)')[0].text # 工作职责 jobRequests = getJobOrder(jobUrl)[0] # 工作要求 jobOrder = getJobOrder(jobUrl)[1] #print(jobName, jobUrl, jobAddre, jobPeople, jobTime, jobRequests, jobOrder) tt = jobName + " " + jobUrl + " " + jobAddre + " " + jobPeople + " " + jobTime + " " + jobRequests + " " + jobOrder myfile.write(tt + "n") if __name__ == '__main__': mainurl = 'https://hr.tencent.com/position.php?keywords=python' jobPage = getJobPage(mainurl) print(jobPage) for page in range(jobPage): pageUrl = 'https://hr.tencent.com/position.php?keywords=python&start=' + str(page * 10) + '#a' print("第" + str(page + 1) + "页") getJobInfo(pageUrl)