本文主要是介绍Python抓取电视剧《天盛长歌》豆瓣短评,并制作成词云。,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
最近在看《天盛长歌》,才看了30多集,感觉里边的剧情还是很有深度,每个反派都是智商在线,剧情也是环环相扣,以至于每个镜头给了哪些特写我都要细细斟酌一番。不过可能剧情是根据小说改编,所以部分剧情有些老套,而且因为节奏有点慢,剧情过多,光是大皇子领盒饭就用了20集。目前来说不喜欢韶宁公主有关的剧情,不知道她后边的剧情怎么发展,配角选的也是十分用心了,喜欢珠茵姐姐,可惜十几集就领盒饭了,而且还有点不值,兰香院的姑娘们颜值也是在线的!和别的剧比起来,真真是美的各有千秋。
一、抓取数据
首先要抓取豆瓣影评,豆瓣比较奇怪,即使登陆了也只能抓取480条短评,不登陆可以抓200条短评,所以我就抓取了不登陆的200条(还不会用登陆抓取)。
上代码:
#coding=utf-8
import requests
from lxml import etree
import random
import pymysql# 获取网页内容
def geturl(url,IP_pools):USER_AGENTS = ["Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"]Agent_Value = random.choice(USER_AGENTS)headers = {"User-Agent":Agent_Value ,"Host": "movie.douban.com","Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",}try:# 我使用了代理,在代理池中随机获取验证成功的IPip_one = random.choice(IP_pools)print(ip_one)proxies1 = {'http': "http://" + ip_one}print(url)r = requests.get(url=url, headers=headers, proxies=proxies1, timeout=5)print(r.status_code)assert r.status_code == 200return etree.HTML(r.content)except:print("**"*20+"错误!"+"**"*20)
# 从数据库的代理池取出全部验证成功的IP,并存入列表中
def get_IP():con = pymysql.connect(host='192.111.111.111', user='root', passwd='111111', db='databace', port=3306,charset='utf8')if con:print("ok")cur = con.cursor()if cur:sql_read = "select IP,port from ip_pool where score = %s "cur.execute(sql_read, "T")con.commit()lines = cur.fetchall()a_list = []for i in lines:li = i[0] + ":" + i[1]# print(li)a_list.append(li)return a_listelse:print("打开游标失败!")cur.close()else:print("数据库打开失败!")con.close()IP_pools = get_IP()def TSCG():for i in range(0,220,20):url = "https://movie.douban.com/subject/26761328/comments?start=" + str(i)# 直接获取每页的短评res = geturl(url,IP_pools).xpath('//span[@class="short"]/text()')for a in res:a = a.strip().replace("\n","")with open("output/tianshengchangge_200.txt","a",encoding="utf-8") as fw:fw.write(a + "\n")# 抓取短评
TSCG()
二、处理数据
1、抓到数据后要先制作停止词;停止词的txt文件在下一篇文章里
# 制作停止词
def make_stopdict():stopdict = set()#网上下载来的停止词文本with open("stopwords.txt","r",encoding="utf-8") as fr:lines = fr.readlines()for l in lines:stopdict.add(l.strip())print(type(stopdict))return stopdict
2、去掉停止词,使用了 jieba(停止词就是类似于“的、啊、是”等这类型的词。详情请百度)
import jieba.analyse
import redef removeStopwords():stopdict = make_stopdict()# 使用jieba分词去掉停止词zhongwen_pat = re.compile(r'^[\u4e00-\u9fa5a-zA-Z]+$')all_content = []# 从文件中取出短评内容with open("output/tianshengchangge_200.txt","r",encoding="utf-8") as fr:review = fr.readlines()for t in review: cut_list = [c for c in jieba.cut(t) if zhongwen_pat.search(c)]cut_set = set(cut_list)res_set = cut_set - stopdictres_list = list(res_set)all_content.extend(res_list)for a in res_list:a = a.strip().replace("\n","")with open("output/douban1.txt","a",encoding="utf-8") as fw:fw.write(a + "\n")return("output/douban1.txt")
3、使用jieba分词分析短评中各个词的出现频率
def get_top_keywords():file = "output/douban1.txt"top_word_lists = [] # 关键词列表,待填充with open(file,'r',encoding="utf-8") as f:texts = f.read() # 读取整个文件作为一个字符串result = jieba.analyse.textrank(texts,topK=100,withWeight=True) #保留最高频的100个词keywords = dict()for i in result:keywords[i[0]]=i[1]print(keywords)return keywords
4、制作词云
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# 制作词云
def makeWordcloud():keywords = get_top_keywords()color_mask = imread("output/mark2.jpg") # 读取背景图片,cloud = WordCloud(# 设置字体,不指定就会出现乱码,文件名不支持中文(选择系统默认就好)font_path="C:/Windows/Fonts/simfang.ttf",# 设置背景色,默认为黑,可根据需要自定义为颜色background_color='white',# 词云背景,mask=color_mask,# 允许最大词汇max_words=100,# 最大号字体,如果不指定则为图像高度max_font_size=80,# 画布宽度和高度,如果设置了msak则不会生效width=600, height=400, margin=2,# 词语水平摆放的频率,默认为0.9.即竖直摆放的频率为0.1prefer_horizontal=0.4)wc = cloud.generate_from_frequencies(keywords) # 产生词云wc.to_file("output/Wc_tscg4.png") # 保存图片# 显示词云图片plt.imshow(wc)# 不现实坐标轴plt.axis('off')# 绘制词云plt.show()# 制作词云
makeWordcloud()
5、最终效果
这篇关于Python抓取电视剧《天盛长歌》豆瓣短评,并制作成词云。的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!