运用Python——爬取网易云歌单的全部歌曲热评_爬虫的简单应用
@[toc]
免责声明:本程序仅供个人学习使用
功能目标:方便摘抄网易云音乐的热评,写一个爬虫程序,通过输入歌单号,爬取并保存歌单内歌曲的热评。
记录第一个自己完成的简单爬虫程序
1.展示截图
顺便安利一下这个日语歌单
2.代码
库:requests、bs4 、json、csv、time(可选)
import requests
from bs4 import BeautifulSoup
import json
from csv import writer
import time
t1=time.clock()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}
print("===========================\n获取网易云音乐歌单中所有歌曲热评\n===========================")
id= input("请输入歌单id:")
print("正在搜索。。。\n")
playlist_url = f'http://music.163.com/playlist?id={id}'
rs = requests.session()
r = rs.get(playlist_url, headers=headers)
soup = BeautifulSoup(r.content, 'lxml')
main = soup.find('ul', {'class': 'f-hide'})
name_list= []
id_list= []
for music in main.find_all('a'):
music_id= music['href'][9:]
music_name= music.text
name_list.append(music_name)
id_list.append(music_id)
total= len(name_list)
cout=1
allcout=0
file= open("网易云音乐歌单热评.csv","w",encoding="gb18030",newline="")
writer= writer(file)
for i in id_list:
list=[cout,name_list[cout-1]]
writer.writerow(list)
get_url= "http://music.163.com/api/v1/resource/comments/R_SO_4_"+i+"?limit=0&offset=0"
r= requests.get(get_url,headers=headers)
json_dict=json.loads(r.content.decode("utf-8"))
hotcomments= json_dict["hotComments"]
allcout+=len(hotcomments)
no=1
for j in hotcomments:
nickname= j["user"]["nickname"]
content= j["content"].replace("\n"," ")
liked= str(j["likedCount"])+"赞"
list=["","",no,nickname,liked,content]
writer.writerow(list)
no+=1
cout+=1
writer.writerow('')
file.close()
t2= time.clock()-t1
print(f"共找到{total}首歌 {allcout}条热评 用时{t2:.2f}s",end="\n===========================\n")
input("按任意键退出。。。")
3.分析
常量的定义、界面设计
#计时开始
t1=time.clock()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
}
print("===========================\n获取网易云音乐歌单中所有歌曲热评\n===========================")
id= input("请输入歌单id:")
print("正在搜索。。。\n")
playlist_url = f'http://music.163.com/playlist?id={id}'#得到歌单的url
依次爬取歌曲信息
#得到歌单内的歌曲名称和歌曲id
rs = requests.session()
r = rs.get(playlist_url, headers=headers)
soup = BeautifulSoup(r.content, 'lxml')
#可通过歌曲网页的源码得到有关信息
main = soup.find('ul', {'class': 'f-hide'})
name_list= []
id_list= []
for music in main.find_all('a'):
music_id= music['href'][9:]
music_name= music.text
name_list.append(music_name)
id_list.append(music_id)
#计数君
total= len(name_list)
cout=1
allcout=0
创建表格
#易错点:encoding,newline应设为""
file= open("网易云音乐歌单热评.csv","w",encoding="gb18030",newline="")
writer= writer(file)
爬取评论、保存
核心:此api会返回歌曲评论 "music.163.com/api/v1/reso… id +"?limit=0&offset=0"
#用id查找评论,按照评论顺序记录
for i in id_list:
list=[cout,name_list[cout-1]]
writer.writerow(list)
get_url= "http://music.163.com/api/v1/resource/comments/R_SO_4_"+i+"?limit=0&offset=0"
r= requests.get(get_url,headers=headers)
json_dict=json.loads(r.content.decode("utf-8"))
hotcomments= json_dict["hotComments"]
allcout+=len(hotcomments)
no=1
#从返回的json获取有关信息
for j in hotcomments:
nickname= j["user"]["nickname"]
content= j["content"].replace("\n"," ")
liked= str(j["likedCount"])+"赞"
list=["","",no,nickname,liked,content]
writer.writerow(list)
no+=1
cout+=1
writer.writerow('')
file.close()
结束显示
#计时结束
t2= time.clock()-t1
#计数君
print(f"共找到{total}首歌 {allcout}条热评 用时{t2:.2f}s",end="\n===========================\n")
input("按任意键退出。。。")
缺点分析
当歌曲数量多会导致用时特别长
拓展:多线程爬虫
完
生活的最好状态是冷冷清清的风风火火——木心
欢迎在评论区交流 感谢浏览
转载自:https://juejin.cn/post/7139043877285019661