本文主要是介绍用python爬取小说,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
从天籁上爬的,本来应该在加个多文本合成的.因为懒就没加.目前只是把每一章都保存为单独文本.想爬别的书就自己改链接.只要是在天籁里面的.理论上都可以爬.
运行删除并创建同书名的空文件夹,支持从哪里开始爬,自己改下标
import threading
import time
import os
import re
import requests
from bs4 import BeautifulSoup
import sys
import importlib
importlib.reload(sys)class downLoader():def __init__(self):self.chapterList = []self.chapterName = []self.chapterNum = 0self.bookName = ''self.session = requests.session()def deleteFile(self):path = os.path.join(os.getcwd(), self.bookName)if os.path.exists(path):files = os.listdir(path)for pFile in files:try:os.unlink(os.path.join(path, pFile))except IOError:print('系统错误,无法删除文件-' + pFile + ',可能被占用')try:os.rmdir(path)except IOError:print('系统错误,无法删除文件夹-' + self.bookName + ',里面有文件被占用')time.sleep(1)self.deleteFile()returnprint('删除' + self.bookName + '文件夹完成')try:os.mkdir(self.bookName)except IOError:time.sleep(1)self.deleteFile()returnprint('创建' + self.bookName + '成功')def getChapter(self):print('获取章节')html = self.getHtml('https://www.23txt.com/files/article/html/22/22295/')bf = BeautifulSoup(html, "html.parser")divs = bf.find_all('div', class_='box_con')self.bookName = divs[0].find('div', id='maininfo')self.bookName = self.bookName.find('div', id='info')self.bookName = self.bookName.find('h1').textprint(self.bookName)self.deleteFile()aList = divs[1].find_all('a')for a in aList:self.chapterName.append(a.string)self.chapterList.append('https://www.23txt.com' + a.get('href'))self.chapterNum = len(aList)def getHtml(self, url):req = self.session.get(url=url)req.raise_for_status()req.encoding = req.apparent_encodingreturn req.textdef begin(self, begin = 0):print('开始获取全部章节内容')for index in range(begin, self.chapterNum):thread = threading.Thread(target=self.getContent, args=(index,))thread.start()thread.join()def getContent(self, index):html = self.getHtml(self.chapterList[index])bf = BeautifulSoup(html, "html.parser")div = bf.find('div', id='wrapper')div = div.find('div', class_='content_read')div = div.find('div', class_='box_con')div = div.find('div', id='content')content = div.textcontent = re.sub(r'\s+', '\n', content)content = self.chapterName[index] + '\n' + contentself.saveFile(content, self.chapterName[index])def saveFile(self, content, chapterName):print('写入章节:' + chapterName)chapterName = self.checkChapterName(chapterName)path = os.path.join(os.path.join(os.getcwd(), self.bookName), chapterName)f = open(path + '.txt', 'w+', encoding='utf-8')f.write(content)f.close()def checkChapterName(self, chapterName):# '\t' 是转义字符:空格# 在windows系统当中的文件命名,文件名称中不能包含 \ / : * ? " < > | 一共9个特殊字符strs = ['\t', '\\', '/', ':', '*', '?', '"', '<', '>', '|']for s in strs:chapterName = chapterName.replace(s, '')return chapterNamedef main():dlObj = downLoader()dlObj.getChapter()dlObj.begin(0) #数字为从第几章开始sys.exit()if __name__ == '__main__':main()
这篇关于用python爬取小说的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!