本文主要是介绍猎聘爬虫(附源码),希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
废话不多说直接附源码
cookies需要替换成自己的 , 该网站在不登录的情况下只能请求到10页数据 , 想要获得完整数据需要携带登录后的cookies
import requests
import json
from lxml import etree
import os
import openpyxlheaders = {"Accept": "application/json, text/plain, */*","Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6","Cache-Control": "no-cache","Connection": "keep-alive","Content-Type": "application/json;charset=UTF-8","Origin": "https://www.liepin.com","Pragma": "no-cache","Referer": "https://www.liepin.com/","Sec-Fetch-Dest": "empty","Sec-Fetch-Mode": "cors","Sec-Fetch-Site": "same-site","User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0","X-Client-Type": "web","X-Fscp-Bi-Stat": "{\"location\": \"https://www.liepin.com/zhaopin/?inputFrom=head_navigation&scene=init&workYearCode=0&ckId=jrkiappybgyczm7c2sk5zmfzwgpqpqia\"}","X-Fscp-Fe-Version": "","X-Fscp-Std-Info": "{\"client_id\": \"40108\"}","X-Fscp-Trace-Id": "f22eb671-3c8f-4f94-8b14-e5e7d176be52","X-Fscp-Version": "1.1","X-Requested-With": "XMLHttpRequest","X-XSRF-TOKEN": "hCnGTNiJQfe47qu4x2OChA","sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Microsoft Edge\";v=\"122\"","sec-ch-ua-mobile": "?0","sec-ch-ua-platform": "\"Windows\""
}def spiderData():# 循环每一页for i in range(1, 21):# 配置账号参数cookies = ''# 网页链接url = "https://api-c.liepin.com/api/com.liepin.searchfront4c.pc-search-job"# 参数data = {"data": {"mainSearchPcConditionForm": {"city": "410","dq": "410","pubTime": "","currentPage": f"{i}","pageSize": 40,"key": "","suggestTag": "","workYearCode": "0","compId": "","compName": "","compTag": "","industry": "H01$H01","salary": "","jobKind": 2,"compScale": "","compKind": "","compStage": "","eduLevel": ""},"passThroughForm": {"scene": "init","ckId": "0nbwaavz2gngc40f8xmfp59in6ymulua","skId": "pf8wezdo0ezilzl4tyd1g4tcoyh43qe9","fkId": "0nbwaavz2gngc40f8xmfp59in6ymulua","suggest": None}}}data = json.dumps(data, separators=(',', ':'))response = requests.post(url, headers=headers, cookies=cookies, data=data).json()print(f"正在爬取第{i + 1}页")praseData(response)# 这段代码主要用于解析和保存来自招聘网站的职位信息。下面是加上注释后的代码:
def praseData(data):for z in range(0,40):job_card_list = data.get('data', {}).get('data', {}).get('jobCardList', [])if 0 <= z < len(job_card_list):res_json_item = job_card_list[z]# 公司名称comp_name = res_json_item.get('comp', {}).get('compName')# 职位链接job_link = res_json_item.get('job', {}).get('link')# 工作地点place = res_json_item.get('job', {}).get('dq')# 薪资salary = res_json_item.get('job', {}).get('salary')# 职位名称job = res_json_item.get('job', {}).get('title')else:# 如果z不是有效索引或job_card_list为空,则处理错误或设置默认值comp_name = Nonejob_link = Noneplace = Nonesalary = Nonejob = None# 解析职位详情页面sub_data = requests.get(job_link, headers=headers).text# 使用 etree 解析 HTML 数据xml = etree.HTML(sub_data)# 尝试从详情页面中提取公司简介try:details = xml.xpath('//dl[@class="paragraph"]/dd/text()')[0]except:details = None# 公司简介companyProfile = xml.xpath("//div[@class='paragraph-box']/div/text()")company_profile = ','.join(companyProfile)# 公司信息try:intorduct = details.split('截止日期')[0].split()intorducts = ','.join(intorduct)except:intorducts = None# 保存到 excle 表格job_list = [job,place,salary,comp_name,company_profile,intorducts]print(job_list)save_data_to_xlsx(job_list)# 保存到excle表格
def save_data_to_xlsx(data ):filename = f'job.xlsx'name_headers = ['职位', '地点', '薪资', '公司名称', '公司简介','描述']if os.path.exists(filename):workbook = openpyxl.load_workbook(filename)sheet = workbook.activesheet.append(data)else:workbook = openpyxl.Workbook()sheet = workbook.active# 添加表头sheet.append(name_headers)sheet.append(data)# 保存 Excel 文件workbook.save(filename)if __name__ == '__main__':spiderData()
这篇关于猎聘爬虫(附源码)的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!