本文主要是介绍scrapy简书整站爬取,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
- 数据同步及异步存储到MySQL
- 对于ajax 加载的数据用selenium辅助加载解析
- 整站爬取提取url规则
使用了scrapy 提供的 crawlspider 提取规则的url列表,scrapy会自动帮我们提取。
jianshu.py 文件
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu_spider.items import JianshuSpiderItemclass JianshuSpider(CrawlSpider):name = 'jianshu'allowed_domains = ['jianshu.com']start_urls = ['https://www.jianshu.com/']rules = (# 观察url发现,前缀都一样,后面是12个数字加小写字母的组合Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_detial', follow=True),)def parse_detial(self, response):title = response.xpath('//h1[@class="title"]/text()').extract_first('') # 提取标题avatar = response.xpath('//a[@class="avatar"]/img/@src').extract_first('') # 提取头像author = response.xpath('//span[@class="name"]/a/text()').extract_first('') # 提取作者publish_time = response.xpath('//span[@class="publish-time"]/text()').extract_first('') # 提取发布时间content = response.xpath('//div[@class="show-content"]').get() # 提取文章内容# 提取文章ip,也就是url上面的不一样的字符串process_url = response.url.split('?')[0] # 以问号分割取前一部分article_id = process_url.split('/')[-1] # 以 ‘/’ 分割获取最后一个字符串即为文章的idorigin_url = response.urlprint(title)item = JianshuSpiderItem(title=title,avatar=avatar,author=author,publish_time=publish_time,content=content,article_id=article_id,origin_url=origin_url)return item
item.py文件
这里主要定义了一些要爬取的字段。还可以扩展爬取更多的字段。
class JianshuSpiderItem(scrapy.Item):# define the fields for your item here like:# name = scrapy.Field()title = scrapy.Field()avatar = scrapy.Field()author = scrapy.Field()publish_time = scrapy.Field()content = scrapy.Field()article_id = scrapy.Field()origin_url = scrapy.Field()
settings.py文件
这里主要是一些设置的文件,包括请求头,打开pipelines,middleware.
ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 1
DEFAULT_REQUEST_HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Language': 'en','User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
DOWNLOADER_MIDDLEWARES = {#'jianshu_spider.middlewares.JianshuSpiderDownloaderMiddleware': 543,'jianshu_spider.middlewares.SeleniumDownloadMiddleware': 543,}
ITEM_PIPELINES = {'jianshu_spider.pipelines.JianshuSpiderPipeline': 300,#'jianshu_spider.pipelines.JianshuTwistedPipeline': 300,}
pipelines.py文件
这里把数据存储到数据库中。同步和异步两种方式。
import pymysql
from twisted.enterprise import adbapi # 使用异步数据库处理连接池
from pymysql import cursors # 数据库游标类class JianshuSpiderPipeline(object):def __init__(self):params = {'host':'127.0.0.1','port':3306,'user':'root','password':'1326628437','database':'jianshu','charset':'utf8'}self.conn = pymysql.connect(**params)self.sursor = self.conn.cursor()self._sql = None@property # 属性操作,可直接调用def sql(self):if not self._sql:self._sql = '''insert into article(title,author,avatar,publish_time,article_id,origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''return self._sqlreturn self._sqldef process_item(self, item, spider):self.sursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],item['article_id'],item['origin_url'],item['content']))self.conn.commit()return item# 异步实现插入数据库,插入操作是io操作,数据量大时,会出现堵塞,异步插入很有必要class JianshuTwistedPipeline(object):def __init__(self):params = {'host':'127.0.0.1','port':3306,'user':'root','password':'1326628437','database':'jianshu','charset':'utf8','cursorclass':cursors.DictCursor}# 调用异步连接池实现异步插入数据库self.dbpool = adbapi.ConnectionPool("pymysql",**params)self._sql = None@propertydef sql(self):if not self._sql:self._sql = '''insert into article(title,author,avatar,publish_time,article_id,origin_url,content) value(%s,%s,%s,%s,%s,%s,%s)'''return self._sqlreturn self._sqldef process_item(self,item,spider):# 异步插入数据defer = self.dbpool.runInteraction(self.insert_item,item)# 错误处理defer.addErrback(self.handle_error,item,spider)def insert_item(self,item,cursor):cursor.execute(self.sql,(item['title'],item['author'],item['avatar'],item['publish_time'],item['article_id'],item['origin_url'],item['content']))def handle_error(self,item,error,spider):print('+'*30 + 'error' + '+'*30)print(error)print('+'*30 + 'error' + '+'*30)
middleware.py文件
这里修改请求过程,目的为了获取ajax加载的url
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from scrapy.http.response.html import HtmlResponse# 用selenium重写请求过程,实现去爬取一些用ajax加载的页面
# 一些点赞数,评论数,喜欢数,推荐阅读的文章链接都是ajax加载的。
class SeleniumDownloadMiddleware(object):def __init__(self):self.browser = webdriver.Chrome() #self.wait = WebDriverWait(self.browser,10)def process_request(self,request,spider):self.browser.get(request.url)print('我正在用selenium自动化工具下载url')time.sleep(1)try:while True:# 这里因为有些文章下方有许多加载更多,在文章被一下专栏收录里,所以要重复点击showmore = self.browser.find_element_by_class_name('show-more')showmore.click()time.sleep(0.3)if not showmore:breakexcept:passsource = self.browser.page_sourceresponse = HtmlResponse(url=self.browser.current_url,request=request,body=source,encoding='utf-8')return response
这篇关于scrapy简书整站爬取的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!