本文主要是介绍北邮 python 爬虫爬取链家的新房数据进行数据处理,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
博主声明:用途仅供学习
items.py
import scrapyclass MyItem(scrapy.Item):# define the fields for your item here like:name = scrapy.Field() # 名称place1 = scrapy.Field() # 地理位置place2 = scrapy.Field()place3 = scrapy.Field()model = scrapy.Field() # 房型aera = scrapy.Field() # 面积totalprice = scrapy.Field() # 总价UnitPrice = scrapy.Field() # 单价unit = scrapy.Field() # 价格单位
spider.py
import scrapy
from linajia.items import MyItem # 从items.py中引入MyItem对象class mySpider(scrapy.spiders.Spider):name = "linajia" # 爬虫的名字是linajiaallowed_domains = ["bj.lianjia.com/"] # 允许爬取的网站域名start_urls = ["https://bj.fang.lianjia.com/loupan/"]# 多页爬取for pg in range(2, 20):start_urls.append("https://bj.fang.lianjia.com/loupan/pg{}/".format(pg))# 减慢爬虫速度,保证顺序不乱序download_delay = 1def parse(self, response): # 解析爬取的内容item = MyItem() # 生成一个在items.py中定义好的Myitem对象,用于接收爬取的数据for each in response.xpath('/html/body/div[4]/ul[2]/li'):try:item['name'] = each.xpath("div/div[1]/a/text()").extract()[0]item['place1'] = each.xpath("div/div[2]/span[1]/text()").extract()[0]item['place2'] = each.xpath("div/div[2]/span[2]/text()").extract()[0]item['place3'] = each.xpath("div/div[2]/a/text()").extract()[0]# 取最小户型l = each.xpath("div/a/span[1]/text()").extract()if len(l) == 0: # 最小户型的数据可能不存在,进行判断,如果不存在,那么赋值为''item['model'] = ''else:item['model'] = l[0]# item['aera']取最小面积l1 = each.xpath("div/div[3]/span/text()").extract()if len(l1): # 最小面积的数据存在时,进行提取最小值str = l1[0]startpos = str.find(" ") + 1endpos = str.find("-")if endpos == -1:endpos = str.find("m")item['aera'] = str[startpos: endpos]else: # 最小面积不存在时,赋值为空串''item['aera'] = ''# item['totalprice']l2 = each.xpath("div/div[6]/div[2]/text()").extract()# item['UnitPrice']l3 = each.xpath("div/div[6]/div[1]/span[1]/text()").extract()unit = each.xpath("div/div[6]/div/span[2]/text()").extract()# 由于存在网页显示均值的位置可能出现总价,那么进行如果进行不处理读取,会导致某些行的数据# 在均值的位置显示总价,而总价的位置显示为空if -1 != unit[0].find("总价"):item['totalprice'] = l3[0] # 将均值处显示的总价放置于总价的位置item['UnitPrice'] = ''else:if len(l3) == 0:item['UnitPrice'] = ''else:item['UnitPrice'] = l3[0]if len(l2) == 0:item['totalprice'] = ''else:item['totalprice'] = l2[0]yield itemexcept ValueError:pass
DataProcess.py
import numpy as np
import pandas as pd# 打开CSV文件
fileNameStr = 'MyData.csv'
orig_df = pd.read_csv(fileNameStr, encoding='gbk', dtype=str)# 1.将字符串的列前后空格去掉
orig_df['name'] = orig_df['name'].str.strip()
orig_df['place1'] = orig_df['place1'].str.strip()
orig_df['place2'] = orig_df['place2'].str.strip()
orig_df['place3'] = orig_df['place3'].str.strip()
orig_df['model'] = orig_df['model'].str.strip()
orig_df['aera'] = orig_df['aera'].str.strip()
orig_df['totalprice'] = orig_df['totalprice'].str.strip()
orig_df['UnitPrice'] = orig_df['UnitPrice'].str.strip()# 2.将aera变为整型
orig_df['aera'] = orig_df['aera'].fillna(0).astype(np.int)# 3.将单价变为整型
orig_df['UnitPrice'] = orig_df['UnitPrice'].fillna(0).astype(np.int)# 3.价格处理
orig_df['totalprice'] = orig_df['totalprice'].str.replace("总价", "")
orig_df['totalprice'] = orig_df['totalprice'].str.replace("万/套", "")
orig_df['totalprice'] = orig_df['totalprice'].fillna(0).astype(np.int)# 4.总价计算
for idx, row in orig_df.iterrows():if orig_df.loc[idx, 'totalprice'] == 0:orig_df.loc[idx, 'totalprice'] = (orig_df.loc[idx, 'aera'] * orig_df.loc[idx, 'UnitPrice']) // 10000if orig_df.loc[idx, 'UnitPrice'] != 0:orig_df.loc[idx, 'UnitPrice'] = '%.4f' % (orig_df.loc[idx, 'UnitPrice'] / 10000)elif orig_df.loc[idx, 'UnitPrice'] == 0:orig_df.loc[idx, 'UnitPrice'] = '%.4f' % (orig_df.loc[idx, 'totalprice'] / orig_df.loc[idx, 'aera'])# 将填补的aera为空处复原# 5.面积复原,将填充的0去掉
orig_df['aera'] = orig_df['aera'].astype(np.str)
for idx, row in orig_df.iterrows():if orig_df.loc[idx, 'aera'] == '0':orig_df.loc[idx, 'aera'] = ''# 6.总价
# 最大值
print("总价:")
imaxpos = orig_df['totalprice'].idxmax()
print("最贵房屋", orig_df.loc[imaxpos, "totalprice"], orig_df.loc[imaxpos, "name"])
# 最小值
iminpos = orig_df['totalprice'].idxmin()
print("最便宜房屋", orig_df.loc[iminpos, "totalprice"], orig_df.loc[iminpos, "name"])
# 中位数
print("中位数", orig_df['totalprice'].median())# 7.单价
# 最大值
print("单价:")
idmaxpos = orig_df['UnitPrice'].astype(float).idxmax()
print("最贵房屋", orig_df.loc[idmaxpos, "UnitPrice"], orig_df.loc[idmaxpos, "name"])
# 最小值
idminpos = orig_df['UnitPrice'].astype(float).idxmin()
print("最便宜房屋", orig_df.loc[idminpos, "UnitPrice"], orig_df.loc[idminpos, "name"])
# 中位数
print("中位数", orig_df['UnitPrice'].median())orig_df.to_csv("NewMydata.csv", header=True, encoding="gbk", mode='w+', index=False)
处理结果
这篇关于北邮 python 爬虫爬取链家的新房数据进行数据处理的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!