Python爬虫某云音乐歌手及下载其免费音乐

2024-02-05 13:52

本文主要是介绍Python爬虫某云音乐歌手及下载其免费音乐,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

import os
import re
import threading
import timefrom lxml import etreeimport requests
from bs4 import BeautifulSoup
from database import MyDataBase
from utils import make_user_agent

注意:database和utils是自己写的没有注释,不懂就问

先运行CrawlWangYiYunSinger,不然数据库没有歌手的表!!!!!!!!!!!!!

大概几万个,很快下载完

utils.make_agent()返回的是{"Agent":"..."}

database数据库的包,复制即用:Python操作Mysql数据库-CSDN博客

声明:内容只用于学习交流,不可用于任何商业用途!

一、日志

方便查看爬取情况

class Logger:def __init__(self, path):self.path = pathself.log_path = path + "/logs.txt"self.create()def create_parent(self):if not os.path.exists(self.path):os.makedirs(self.path)def create(self):self.create_parent()if not os.path.exists(self.log_path):with open(self.log_path, "w", encoding='utf-8') as f:passdef clear(self):with open(self.log_path, "w", encoding='utf-8') as f:passdef delete(self):os.remove(self.log_path)def info(self, content):with open(self.log_path, "a", encoding="utf-8") as f:t = time.strftime("[%Y-%m-%d %H:%M:%S]")s = f"{t}\t{content}"f.write(f"{s}\n")print(s)

二、爬取歌手到数据库

class CrawlWangYiYunSinger(threading.Thread):def __init__(self):super().__init__(target=self.run)self.cookie = '_iuqxldmzr_=32; _ntes_nnid=0e6e1606eb78758c48c3fc823c6c57dd,1527314455632; ' \'_ntes_nuid=0e6e1606eb78758c48c3fc823c6c57dd; __utmc=94650624; __utmz=94650624.1527314456.1.1.' \'utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); WM_TID=blBrSVohtue8%2B6VgDkxOkJ2G0VyAgyOY;' \' JSESSIONID-WYYY=Du06y%5Csx0ddxxx8n6G6Dwk97Dhy2vuMzYDhQY8D%2BmW3vlbshKsMRxS%2BJYEnvCCh%5CKY' \'x2hJ5xhmAy8W%5CT%2BKqwjWnTDaOzhlQj19AuJwMttOIh5T%5C05uByqO%2FWM%2F1ZS9sqjslE2AC8YD7h7Tt0Shufi' \'2d077U9tlBepCx048eEImRkXDkr%3A1527321477141; __utma=94650624.1687343966.1527314456.1527314456' \'.1527319890.2; __utmb=94650624.3.10.1527319890'self.agent = make_user_agent()['User-Agent']self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Accept-Encoding': 'gzip, deflate','Accept-Language': 'zh-CN,zh;q=0.9','Connection': 'keep-alive','Cookie': self.cookie,'Host': 'music.163.com','Referer': 'http://music.163.com/','Upgrade-Insecure-Requests': '1','User-Agent': self.agent}self.DB = MyDataBase()self.artists_sheet = "artists"def create_artists_table(self):self.DB.connect()kwargs = {"id": "int primary key auto_increment","artist_id": "varchar(128)","artist": "varchar(128)",}self.DB.create_table(self.artists_sheet, kwargs)def get_artist(self, url):r = requests.get(url, headers=self.headers)soup = BeautifulSoup(r.text, 'html.parser')for artist in soup.find_all('a', attrs={'class': 'nm nm-icn f-thide s-fc0'}):artist_name = artist.stringartist_id = artist['href'].replace('/artist?id=', '').strip()data = [artist_id, artist_name]self.DB.insert_data(self.artists_sheet, field=("artist_id", "artist"), data=data)def get_artist_url(self):ids = [1001, 1002, 1003, 2001, 2002, 2003, 6001, 6002, 6003, 7001, 7002, 7003, 4001, 4002, 4003]  # id的值initials = [-1, 0, 65, 66, 67, 68, 69, 70,71, 72, 73, 74, 75, 76, 77, 78, 79, 80,81, 82, 83, 84, 85, 86, 87, 88, 89, 90]  # initial的值for _id in ids:for initial in initials:url = 'http://music.163.com/discover/artist/cat?id=' + str(_id) + '&initial=' + str(initial)try:self.get_artist(url)except Exception as err:print("获取错误:", err)def run(self):self.create_artists_table()try:self.get_artist_url()except Exception as err:print(err)

三、爬取单个歌手的音乐的子线程

class CrawlWangYiYunSingerMusic(threading.Thread):def __init__(self, artist_id, artist, database, num=None, save_path="F:/wyy/"):super().__init__(target=self.run)self.artist_id = artist_idself.artist = artistself.headers = {'Referer': 'http://music.163.com','Host': 'music.163.com','Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','User-Agent': make_user_agent()["User-Agent"]}self.url = f'https://music.163.com/song?id='self.download_url = f'https://link.hhtjim.com/163/'self.artist_url = f'https://music.163.com/artist?id={self.artist_id}'self.save_path = save_pathself.unknown_singer_songs_path = self.save_path + "/未知/"self.Logger = Logger(self.save_path)self.num = num  # 歌手的数据库编号self.flag = Falseself.DB = MyDataBase()self.downloaded_sheet = "downloaded_sheet"self.undownload_sheet = "undownload_sheet"def make_file(self):if not os.path.exists(self.save_path):os.makedirs(self.save_path)self.Logger.info(f"文件夹{self.save_path}\t创建成功!")if not os.path.exists(self.unknown_singer_songs_path):os.makedirs(self.unknown_singer_songs_path)self.Logger.info(f"文件夹{self.unknown_singer_songs_path}\t创建成功!")def make_artist_file(self):artist_path = self.save_path + "/" + self.artisttry:if not os.path.exists(artist_path):os.makedirs(artist_path)return artist_pathexcept Exception as err:self.Logger.info(f"{artist_path}创建失败:{err}!")return self.unknown_singer_songs_pathdef create_downloaded_table(self):kwargs = {"id": "int primary key auto_increment","artist_id": "varchar(128)","music_id": "varchar(128)","artist": "varchar(128)","title": "varchar(128)",}if self.downloaded_sheet in self.DB.get_tables():returnself.DB.create_table(self.downloaded_sheet, kwargs)def create_undownload_table(self):kwargs = {"id": "int primary key auto_increment","artist_id": "varchar(128)","music_id": "varchar(128)","artist": "varchar(128)","title": "varchar(128)",}if self.undownload_sheet in self.DB.get_tables():returnself.DB.create_table(self.undownload_sheet, kwargs)def save_downloaded_music_info(self, data):filed = ("artist_id", "music_id", "artist", "title")self.DB.insert_data(self.downloaded_sheet, filed, data)def save_undownload_music_info(self, data):filed = ("artist_id", "music_id", "artist", "title")self.DB.insert_data(self.undownload_sheet, filed, data)def check_save(self, tbname, music_id, title, artist):records = self.DB.select_table_record(tbname, f"where music_id={str(music_id)}")for record in records:if music_id in record:self.Logger.info(f"已下载:{music_id}\t<<{title}>>\t{artist}")return Trueelse:return Falsedef process_music_url_path(self, music_id, title):artist_path = self.make_artist_file()music_url = f"{self.download_url}{music_id}.mp3"music_path = f"{artist_path}/{title}_{self.artist}.mp3"return music_url, music_path, artist_pathdef process_music_id(self):resp = requests.get(self.artist_url, headers=self.headers)html = etree.HTML(resp.text)href_xpath = "//*[@id='hotsong-list']//a/@href"hrefs = html.xpath(href_xpath)for href in hrefs:music_id = href.split("=")[1]vip, title, artist = self.process_url(music_id)if vip == "播放":music_url, music_path, artist_path = self.process_music_url_path(music_id, title)if not self.check_save(self.downloaded_sheet, music_id, title, artist):self.download_music(music_id, title, artist_path)data = [self.artist_id, music_id, self.artist, title]self.save_downloaded_music_info(data)else:if not self.check_save(self.undownload_sheet, music_id, title, artist):data = [self.artist_id, music_id, self.artist, title]self.save_undownload_music_info(data)def process_url(self, music_id):url = f"{self.url}{music_id}"response = requests.get(url, headers=make_user_agent()).textresp = response.replace('<!--', '').replace('-->', '')soup = BeautifulSoup(resp, "html.parser")vip_h = soup.find("a", attrs={"data-res-action": "play"})  # 播放 /VIP尊享/Nonetitle_h = soup.find("div", attrs={"class": "tit"})  # 歌名singer_h = soup.find_all("a", attrs={"class": "s-fc7"})  # 作者vip = vip_h.text if vip_h else ""title = title_h.text if title_h else "无"artist = singer_h[1].text if singer_h else "无"vip = re.sub(r'[\s]+', '', vip)title = re.sub(r'[\s]+', '', title).replace("/", "-").replace("*", "x")artist = re.sub(r'[\s]+', '', artist).replace("/", "-")return vip, title, artistdef download_music(self, music_id, title, artist_path):music_url = f"https://link.hhtjim.com/163/{music_id}.mp3"music_data = requests.get(music_url).contentmusic_path = f"{artist_path}/{title}_{self.artist}.mp3"with open(music_path, 'wb') as file:file.write(music_data)self.Logger.info(f"【{self.num}】ARTIST_ID:{self.artist_id}\tMUSIC_ID:{music_id}:\t<<{title}>>\t{self.artist}")def run(self):self.make_file()self.DB.connect()self.create_downloaded_table()self.create_undownload_table()try:self.process_music_id()except Exception as err:print(err)finally:self.DB.close()self.flag = True

四、写一个控制子线程的主线程

class ThreadController:def __init__(self, save_path: str, start=1, end=10, size=10, length=10):self.save_path = save_pathself.start = startself.end = endself.size = sizeself.length = length  # 单线程获取数据数量self.thread_dict = {}self.Logger = Logger(self.save_path)self.tag = 1self.db = MyDataBase()self.Logger.info(f"\n已开启线程管理!\n前线程上限:{size}!\n线程数据上限:{length}!\n线程起始位置:{self.start}-{self.end}!")def add_thread(self, tag, t):self.thread_dict[tag] = tdef remove_thread(self):for kv in list(self.thread_dict.items()):if kv[1].flag:del self.thread_dict[kv[0]]self.Logger.info(f"{kv[0]}号线程已结束!")def operation(self):if self.start < self.end:data = self.db.select_table_record("artists", f"where id={self.start}")i, artist_id, artist = data[0]wyys = CrawlWangYiYunSingerMusic(database=self.db, artist_id=artist_id, artist=artist, num=i,save_path=self.save_path)wyys.start()self.Logger.info(f"{self.tag}号线程已开启!")self.add_thread(self.tag, wyys)self.tag += 1self.start += 1else:if not len(self.thread_dict):return Trueself.remove_thread()def run(self):self.db.connect()while True:if len(self.thread_dict) >= self.size:self.remove_thread()continueif self.operation():self.db.close()self.Logger.info("线程全部结束!")break

这篇关于Python爬虫某云音乐歌手及下载其免费音乐的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/681119

相关文章

Python MySQL如何通过Binlog获取变更记录恢复数据

《PythonMySQL如何通过Binlog获取变更记录恢复数据》本文介绍了如何使用Python和pymysqlreplication库通过MySQL的二进制日志(Binlog)获取数据库的变更记录... 目录python mysql通过Binlog获取变更记录恢复数据1.安装pymysqlreplicat

利用Python编写一个简单的聊天机器人

《利用Python编写一个简单的聊天机器人》这篇文章主要为大家详细介绍了如何利用Python编写一个简单的聊天机器人,文中的示例代码讲解详细,感兴趣的小伙伴可以跟随小编一起学习一下... 使用 python 编写一个简单的聊天机器人可以从最基础的逻辑开始,然后逐步加入更复杂的功能。这里我们将先实现一个简单的

基于Python开发电脑定时关机工具

《基于Python开发电脑定时关机工具》这篇文章主要为大家详细介绍了如何基于Python开发一个电脑定时关机工具,文中的示例代码讲解详细,感兴趣的小伙伴可以跟随小编一起学习一下... 目录1. 简介2. 运行效果3. 相关源码1. 简介这个程序就像一个“忠实的管家”,帮你按时关掉电脑,而且全程不需要你多做

Python实现高效地读写大型文件

《Python实现高效地读写大型文件》Python如何读写的是大型文件,有没有什么方法来提高效率呢,这篇文章就来和大家聊聊如何在Python中高效地读写大型文件,需要的可以了解下... 目录一、逐行读取大型文件二、分块读取大型文件三、使用 mmap 模块进行内存映射文件操作(适用于大文件)四、使用 pand

python实现pdf转word和excel的示例代码

《python实现pdf转word和excel的示例代码》本文主要介绍了python实现pdf转word和excel的示例代码,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价... 目录一、引言二、python编程1,PDF转Word2,PDF转Excel三、前端页面效果展示总结一

Python xmltodict实现简化XML数据处理

《Pythonxmltodict实现简化XML数据处理》Python社区为提供了xmltodict库,它专为简化XML与Python数据结构的转换而设计,本文主要来为大家介绍一下如何使用xmltod... 目录一、引言二、XMLtodict介绍设计理念适用场景三、功能参数与属性1、parse函数2、unpa

Python中使用defaultdict和Counter的方法

《Python中使用defaultdict和Counter的方法》本文深入探讨了Python中的两个强大工具——defaultdict和Counter,并详细介绍了它们的工作原理、应用场景以及在实际编... 目录引言defaultdict的深入应用什么是defaultdictdefaultdict的工作原理

Python中@classmethod和@staticmethod的区别

《Python中@classmethod和@staticmethod的区别》本文主要介绍了Python中@classmethod和@staticmethod的区别,文中通过示例代码介绍的非常详细,对大... 目录1.@classmethod2.@staticmethod3.例子1.@classmethod

Python手搓邮件发送客户端

《Python手搓邮件发送客户端》这篇文章主要为大家详细介绍了如何使用Python手搓邮件发送客户端,支持发送邮件,附件,定时发送以及个性化邮件正文,感兴趣的可以了解下... 目录1. 简介2.主要功能2.1.邮件发送功能2.2.个性签名功能2.3.定时发送功能2. 4.附件管理2.5.配置加载功能2.6.

使用Python进行文件读写操作的基本方法

《使用Python进行文件读写操作的基本方法》今天的内容来介绍Python中进行文件读写操作的方法,这在学习Python时是必不可少的技术点,希望可以帮助到正在学习python的小伙伴,以下是Pyth... 目录一、文件读取:二、文件写入:三、文件追加:四、文件读写的二进制模式:五、使用 json 模块读写