本文主要是介绍python爬虫,抓取新浪科技的文章(beautifulsoup+mysql),希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
这几天的辛苦没有白费,总算完成了对新浪科技的文章抓取,除非没有新的内容了,否则会一直爬取新浪科技的文章。
想了解更多可以关注我的github:https://github.com/libp/WebSpider
如果想要数据库表结构可以留下邮箱~
# -*- coding: utf-8 -*-__author__ = 'Peng'
from bs4 import BeautifulSoup,Comment
import urllib2
from urllib2 import urlopen,HTTPError
import MySQLdb
import json
import datetime
import logging
import sys
import re
import time#配置日志输出位置为控制台
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',stream=sys.stdout)def spiderSinaTech(url,webname):conn = getConn();cur = conn.cursor()data = getSinaArticle(url,webname)if (data == None):#不能解析目标网页return -1try:sqlInsertArticle="insert into tbl_peng_article (title,author,content,createTime,getTime,url,webname) values (%s,%s,%s,%s,%s,%s,%s)"result = cur.execute(sqlInsertArticle,(data['title'],data['author'],data['article'],data['published_time'],data['getTime'],data['url'],data['webname']))except MySQLdb.Error,e:print "Mysql Error %d: %s" % (e.args[0], e.args[1])conn.commit()cur.close()conn.close()return resultdef getSinaArticle(url,webname):#创建字典用来储存函数的返回结果dict={'url':url,'title':'','published_time':'','getTime':'','author':'','article':'','webname':webname}#创建请求头headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 ""(KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36","Accept":"*/*"}#打开网页try:dict['url']=urlrequest = urllib2.Request(url,headers=headers)html = urlopen(request)except HTTPError as e:print(e)#读取网页内容并转换成树形文档结构soup = BeautifulSoup(html.read(),"lxml")#去除html注释for element in soup(text=lambda text: isinstance(text, Comment)):element.extract()#过滤JavaScript[s.extract() for s in soup('script')]try:#获取标题title = soup.find(id="main_title").get_text();# print(title)dict['title'] = titleexcept:return None#获取发布时间published_time = soup.find(property="article:published_time")['content'];#2017-06-03T11:31:53+08:00 这种时间格式叫UTC时间格式...很恶心# print(published_time)UTC_FORMAT = "%Y-%m-%dT%H:%M:%S+08:00"dict['published_time'] = datetime.datetime.strptime(published_time, UTC_FORMAT)#获取作者author = soup.find(property="article:author")['content'];# print(author)dict['author'] = author#获取文章主体content = soup.find(id="artibody");img = content.find_all(class_="img_wrapper")#删除文档书中图片标签for del_img in img:del_img.decompose()#获取文章主体各个段落paragraph = soup.find(id="artibody").contents;#最终入库的文章内容article =""for child in paragraph:article += str(child)# print(article)dict['article'] = article# print json.dumps(dict)# date在转换成json的时候包括,需要重构date转换的函数# return json.dumps(dict)#文章抓取时间dict['getTime']=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))return dictdef getConn():conn= MySQLdb.connect(host='localhost',port = 3306,user='root',passwd='root',db ='nichuiniu',charset='utf8',)return conndef GOSina(url,webname):#创建链接集合# pages = set()#创建字典用来储存函数的返回结果# dict={'url':url,'title':'','published_time':'','getTime':'','author':'','article':'','webname':webname}#创建请求头headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 ""(KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36","Accept":"*/*"}#打开网页try:request = urllib2.Request(url,headers=headers)html = urlopen(request)except HTTPError as e:print(e)#读取网页内容并转换成树形文档结构soup = BeautifulSoup(html.read(),"lxml")conn = getConn();cur = conn.cursor()#声明一个数组用来存储入库的文章链接L = []for link in soup.findAll("a",href=re.compile(r'(.*?)(tech)(.*?)(\d{4}-\d{2}-\d{2})(/doc-ify)')):if 'href' in link.attrs:#提取href中的url,并规范格式去除分页参数xurl = re.compile(r'(.*?shtml)').search(link.attrs['href']).group(1)sqlQueryUrl="select * from tbl_peng_article where url='%s'"%xurl# print link.attrs['href']result = cur.execute(sqlQueryUrl)conn.commit()if ( result == 0 ):# data = getSinaArticle(url,webname)rs = spiderSinaTech(xurl,webname)if( rs > 0 ):logging.info("----URL has insert into database :%s"%xurl)L.append(xurl)time.sleep( 2 )elif( rs == -1):logging.info("****URL content cannt be understand %s"%xurl)else :logging.info("&&&&URL already in database %s"%xurl)cur.close()conn.close()#如果不为空就返回最后一个url,为空则停止抓取if L:return L[-1]else:return 0logging.info("begin spider sina tech")
url="http://tech.sina.com.cn/it/2017-06-07/doc-ifyfuzny3756083.shtml"
webname="sina"
x = GOSina(url,webname)
if x!= 0:GOSina(x,webname)logging.info("end spider sina tech")
这篇关于python爬虫,抓取新浪科技的文章(beautifulsoup+mysql)的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!