本文主要是介绍异步爬虫实战——爬取西游记小说,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
Python异步爬虫基础知识:异步爬虫
使用异步爬取西游记
import jsonimport requests
import asyncio
import aiohttp # pip install aiohttp
import aiofiles # pip install aiofilesasync def getCatalog(url):"""获取小说的章节目录:param url: 获取小说的章节目录的URL:return: """resp = requests.get(url)data = resp.json()['data']['novel']print(data)tasks = [] # 异步任务列表# 获取每一个章节的id获取小说内容for item in data['items']:cid = item['cid']title = item['title']tasks.append(asyncio.create_task(getChapterContent(title, cid)))# breakawait asyncio.wait(tasks)async def getChapterContent(title, cid):"""根据章节id获取小说内容:param title: 章节名称:param cid: 章节id:return:"""data = {'book_id': book_id,'cid': book_id + '|' +cid,'need_bookinfo': 1}# 获取章节内容的urlurl = "https://dushu.baidu.com/api/pc/getChapterContent?data=" + json.dumps(data)async with aiohttp.ClientSession() as session:async with session.get(url) as resp:data = await resp.json()print(data)# 将章节内容写入文件(异步形式写入)async with aiofiles.open(f'西游记/{title}', mode='w', encoding='utf-8') as f:await f.write(data['data']['novel']['content'])if __name__ == '__main__':book_id = '4306063500' # 书本iddata = {'book_id': book_id}# 获取章节目录的URLurl = 'https://dushu.baidu.com/api/pc/getCatalog?data=' + json.dumps(data)asyncio.run(getCatalog(url))
这篇关于异步爬虫实战——爬取西游记小说的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!