本文主要是介绍Python爬虫——豆瓣电影Top250,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
#爬取 豆瓣电影Top250
#250个电影 ,分为10个页显示,1页有25个电影import urllib.request
from bs4 import BeautifulSoupurl = "http://movie.douban.com/top250"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'}
#headers 要根据自己的网页抓取情况修改targetPath = "storage path" #填写自己想要存储的地址def saveText(f,text):f.write(text)
#获取网页源码
def getData(url,headers):req = urllib.request.Request(url = url , headers = headers)res = urllib.request.urlopen(req)data = res.read()return data#解析网页
def praseHtml(f,url,headers):currenturl = urli = 1 #序号#flag = Truewhile currenturl :#解析当前页,获取想要的内容html = getData(currenturl,headers)soup = BeautifulSoup(html,'lxml')moveList = soup.find('ol',attrs = {'class':'grid_view'})for moveLi in moveList.find_all('li'):detail = moveLi.find('div',attrs = {'class':'hd'})moveName = detail.find('span',attrs = {'class':'title'})saveText(f,str(i)+ moveName.getText()+'\n')i += 1print(moveName.getText())#下一页nextpage = soup.find('span',attrs = {'class':'next'}).find('a')#next = nextpage['href'] #这样写报错:NoneType object is not subscriptableif nextpage:currenturl = url + nextpage['href']else :currenturl = Nonef = open(targetPath,"w")
praseHtml(f,url,headers)
这篇关于Python爬虫——豆瓣电影Top250的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!