1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-16 22:56:40 +00:00
youtube-dl/youtube_dl/extractor/kuwo.py

323 lines
11 KiB
Python
Raw Normal View History

2015-06-18 06:15:54 +00:00
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import (
get_element_by_id,
clean_html,
ExtractorError,
remove_start,
2015-06-18 06:15:54 +00:00
)
2015-07-10 15:46:44 +00:00
class KuwoBaseIE(InfoExtractor):
2015-06-18 06:15:54 +00:00
_FORMATS = [
{'format': 'ape', 'ext': 'ape', 'preference': 100},
{'format': 'mp3-320', 'ext': 'mp3', 'br': '320kmp3', 'abr': 320, 'preference': 80},
{'format': 'mp3-192', 'ext': 'mp3', 'br': '192kmp3', 'abr': 192, 'preference': 70},
{'format': 'mp3-128', 'ext': 'mp3', 'br': '128kmp3', 'abr': 128, 'preference': 60},
{'format': 'wma', 'ext': 'wma', 'preference': 20},
{'format': 'aac', 'ext': 'aac', 'abr': 48, 'preference': 10}
]
def _get_formats(self, song_id):
formats = []
for file_format in self._FORMATS:
song_url = self._download_webpage(
2015-07-10 15:48:48 +00:00
'http://antiserver.kuwo.cn/anti.s?format=%s&br=%s&rid=MUSIC_%s&type=convert_url&response=url' %
2015-06-18 06:15:54 +00:00
(file_format['ext'], file_format.get('br', ''), song_id),
2015-07-10 15:48:48 +00:00
song_id, note='Download %s url info' % file_format['format'],
2015-06-18 06:15:54 +00:00
)
2016-02-03 17:26:25 +00:00
if song_url == 'IPDeny':
raise ExtractorError('This song is blocked in this region', expected=True)
2015-06-18 06:15:54 +00:00
if song_url.startswith('http://') or song_url.startswith('https://'):
formats.append({
'url': song_url,
'format_id': file_format['format'],
'format': file_format['format'],
'preference': file_format['preference'],
'abr': file_format.get('abr'),
})
self._sort_formats(formats)
return formats
2015-07-10 15:46:44 +00:00
class KuwoIE(KuwoBaseIE):
IE_NAME = 'kuwo:song'
2015-07-10 16:51:14 +00:00
IE_DESC = '酷我音乐'
2015-07-10 15:56:51 +00:00
_VALID_URL = r'http://www\.kuwo\.cn/yinyue/(?P<id>\d+?)/'
2015-07-10 15:46:44 +00:00
_TESTS = [{
'url': 'http://www.kuwo.cn/yinyue/635632/',
'info_dict': {
'id': '635632',
'ext': 'ape',
'title': '爱我别走',
'creator': '张震岳',
'upload_date': '20080122',
'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c'
},
'skip': 'this song has been offline because of copyright issues',
2015-07-10 15:46:44 +00:00
}, {
'url': 'http://www.kuwo.cn/yinyue/6446136/',
'info_dict': {
'id': '6446136',
'ext': 'mp3',
'title': '',
'creator': 'IU',
'upload_date': '20150518',
},
'params': {
'format': 'mp3-320'
},
}]
2015-06-18 06:15:54 +00:00
def _real_extract(self, url):
song_id = self._match_id(url)
webpage = self._download_webpage(
url, song_id, note='Download song detail info',
errnote='Unable to get song detail info')
if '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage:
raise ExtractorError('this song has been offline because of copyright issues', expected=True)
2015-06-18 06:15:54 +00:00
song_name = self._html_search_regex(
r'(?s)class="(?:[^"\s]+\s+)*title(?:\s+[^"\s]+)*".*?<h1[^>]+title="([^"]+)"', webpage, 'song name')
2015-06-18 06:15:54 +00:00
singer_name = self._html_search_regex(
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
webpage, 'singer name', fatal=False)
2015-07-10 15:48:48 +00:00
lrc_content = clean_html(get_element_by_id('lrcContent', webpage))
2015-06-18 06:15:54 +00:00
if lrc_content == '暂无': # indicates no lyrics
lrc_content = None
formats = self._get_formats(song_id)
album_id = self._html_search_regex(
r'<p[^>]+class="album"[^<]+<a[^>]+href="http://www\.kuwo\.cn/album/(\d+)/"',
webpage, 'album id', fatal=False)
2015-06-18 06:15:54 +00:00
publish_time = None
if album_id is not None:
album_info_page = self._download_webpage(
2015-07-10 15:48:48 +00:00
'http://www.kuwo.cn/album/%s/' % album_id, song_id,
2015-06-18 06:15:54 +00:00
note='Download album detail info',
errnote='Unable to get album detail info')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', album_info_page,
'publish time', fatal=False)
2015-06-18 06:15:54 +00:00
if publish_time:
publish_time = publish_time.replace('-', '')
return {
'id': song_id,
'title': song_name,
'creator': singer_name,
'upload_date': publish_time,
'description': lrc_content,
'formats': formats,
}
class KuwoAlbumIE(InfoExtractor):
IE_NAME = 'kuwo:album'
2015-07-10 17:21:04 +00:00
IE_DESC = '酷我音乐 - 专辑'
2015-07-10 15:56:51 +00:00
_VALID_URL = r'http://www\.kuwo\.cn/album/(?P<id>\d+?)/'
2015-06-18 06:15:54 +00:00
_TEST = {
'url': 'http://www.kuwo.cn/album/502294/',
'info_dict': {
'id': '502294',
'title': 'M',
'description': 'md5:6a7235a84cc6400ec3b38a7bdaf1d60c',
},
'playlist_count': 2,
}
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(
url, album_id, note='Download album info',
errnote='Unable to get album info')
album_name = self._html_search_regex(
r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage,
'album name')
album_intro = remove_start(
2015-07-10 15:48:48 +00:00
clean_html(get_element_by_id('intro', webpage)),
'%s简介:' % album_name)
2015-06-18 06:15:54 +00:00
entries = [
2015-07-10 15:53:48 +00:00
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<p[^>]+class="listen"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+/)"',
2015-06-18 06:15:54 +00:00
webpage)
]
return self.playlist_result(entries, album_id, album_name, album_intro)
class KuwoChartIE(InfoExtractor):
IE_NAME = 'kuwo:chart'
2015-07-10 17:21:04 +00:00
IE_DESC = '酷我音乐 - 排行榜'
_VALID_URL = r'http://yinyue\.kuwo\.cn/billboard_(?P<id>[^.]+).htm'
2015-06-18 06:15:54 +00:00
_TEST = {
'url': 'http://yinyue.kuwo.cn/billboard_香港中文龙虎榜.htm',
'info_dict': {
'id': '香港中文龙虎榜',
'title': '香港中文龙虎榜',
2015-07-10 15:56:51 +00:00
'description': 're:\d{4}\d{2}',
2015-06-18 06:15:54 +00:00
},
'playlist_mincount': 10,
}
def _real_extract(self, url):
chart_id = self._match_id(url)
webpage = self._download_webpage(
url, chart_id, note='Download chart info',
errnote='Unable to get chart info')
chart_name = self._html_search_regex(
r'<h1[^>]+class="unDis">([^<]+)</h1>', webpage, 'chart name')
2015-06-18 06:15:54 +00:00
chart_desc = self._html_search_regex(
r'<p[^>]+class="tabDef">(\d{4}\d{2}期)</p>', webpage, 'chart desc')
2015-06-18 06:15:54 +00:00
entries = [
2015-07-10 15:53:48 +00:00
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)/"', webpage)
2015-06-18 06:15:54 +00:00
]
return self.playlist_result(entries, chart_id, chart_name, chart_desc)
class KuwoSingerIE(InfoExtractor):
IE_NAME = 'kuwo:singer'
2015-07-10 17:21:04 +00:00
IE_DESC = '酷我音乐 - 歌手'
_VALID_URL = r'http://www\.kuwo\.cn/mingxing/(?P<id>[^/]+)'
_TESTS = [{
2015-06-18 06:15:54 +00:00
'url': 'http://www.kuwo.cn/mingxing/bruno+mars/',
'info_dict': {
'id': 'bruno+mars',
'title': 'Bruno Mars',
},
'playlist_count': 10,
}, {
2015-06-18 06:15:54 +00:00
'url': 'http://www.kuwo.cn/mingxing/Ali/music.htm',
'info_dict': {
'id': 'Ali',
'title': 'Ali',
2015-06-18 06:15:54 +00:00
},
'playlist_mincount': 95,
2015-09-06 02:00:16 +00:00
'skip': 'Regularly stalls travis build', # See https://travis-ci.org/rg3/youtube-dl/jobs/78878540
}]
2015-06-18 06:15:54 +00:00
def _real_extract(self, url):
singer_id = self._match_id(url)
webpage = self._download_webpage(
url, singer_id, note='Download singer info',
errnote='Unable to get singer info')
singer_name = self._html_search_regex(
r'<div class="title clearfix">\s*<h1>([^<]+)<span', webpage, 'singer name'
)
2015-06-18 06:15:54 +00:00
entries = []
2015-07-10 15:56:51 +00:00
first_page_only = False if re.search(r'/music(?:_\d+)?\.htm', url) else True
2015-06-18 06:15:54 +00:00
for page_num in itertools.count(1):
webpage = self._download_webpage(
'http://www.kuwo.cn/mingxing/%s/music_%d.htm' % (singer_id, page_num),
singer_id, note='Download song list page #%d' % page_num,
errnote='Unable to get song list page #%d' % page_num)
entries.extend([
2015-07-10 15:53:48 +00:00
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<p[^>]+class="m_name"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)/',
2015-06-18 06:15:54 +00:00
webpage)
][:10 if first_page_only else None])
if first_page_only or not re.search(r'<a[^>]+href="[^"]+">下一页</a>', webpage):
2015-06-18 06:15:54 +00:00
break
return self.playlist_result(entries, singer_id, singer_name)
2015-06-18 06:15:54 +00:00
class KuwoCategoryIE(InfoExtractor):
IE_NAME = 'kuwo:category'
2015-07-10 17:21:04 +00:00
IE_DESC = '酷我音乐 - 分类'
2015-07-10 15:56:51 +00:00
_VALID_URL = r'http://yinyue\.kuwo\.cn/yy/cinfo_(?P<id>\d+?).htm'
2015-06-18 06:15:54 +00:00
_TEST = {
'url': 'http://yinyue.kuwo.cn/yy/cinfo_86375.htm',
'info_dict': {
'id': '86375',
'title': '八十年代精选',
'description': '这些都是属于八十年代的回忆!',
},
'playlist_count': 30,
}
def _real_extract(self, url):
category_id = self._match_id(url)
webpage = self._download_webpage(
url, category_id, note='Download category info',
errnote='Unable to get category info')
category_name = self._html_search_regex(
r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name')
2015-07-10 11:13:52 +00:00
category_desc = remove_start(
2015-07-10 15:48:48 +00:00
get_element_by_id('intro', webpage).strip(),
'%s简介:' % category_name)
2015-07-10 11:13:52 +00:00
2015-06-18 06:15:54 +00:00
jsonm = self._parse_json(self._html_search_regex(
r'var\s+jsonm\s*=\s*([^;]+);', webpage, 'category songs'), category_id)
2015-06-18 06:15:54 +00:00
entries = [
2015-07-10 15:53:48 +00:00
self.url_result('http://www.kuwo.cn/yinyue/%s/' % song['musicrid'], 'Kuwo')
2015-06-18 06:15:54 +00:00
for song in jsonm['musiclist']
]
return self.playlist_result(entries, category_id, category_name, category_desc)
2015-07-10 15:46:44 +00:00
class KuwoMvIE(KuwoBaseIE):
2015-06-18 06:15:54 +00:00
IE_NAME = 'kuwo:mv'
2015-07-10 17:21:04 +00:00
IE_DESC = '酷我音乐 - MV'
2015-07-10 15:56:51 +00:00
_VALID_URL = r'http://www\.kuwo\.cn/mv/(?P<id>\d+?)/'
2015-07-10 15:46:44 +00:00
_TEST = {
2015-06-18 06:15:54 +00:00
'url': 'http://www.kuwo.cn/mv/6480076/',
'info_dict': {
'id': '6480076',
'ext': 'mkv',
'title': '我们家MV',
'creator': '2PM',
},
2015-07-10 15:46:44 +00:00
}
_FORMATS = KuwoBaseIE._FORMATS + [
2015-06-18 06:15:54 +00:00
{'format': 'mkv', 'ext': 'mkv', 'preference': 250},
{'format': 'mp4', 'ext': 'mp4', 'preference': 200},
]
def _real_extract(self, url):
song_id = self._match_id(url)
webpage = self._download_webpage(
url, song_id, note='Download mv detail info: %s' % song_id,
errnote='Unable to get mv detail info: %s' % song_id)
mobj = re.search(
r'<h1[^>]+title="(?P<song>[^"]+)">[^<]+<span[^>]+title="(?P<singer>[^"]+)"',
2015-06-18 06:15:54 +00:00
webpage)
if mobj:
song_name = mobj.group('song')
singer_name = mobj.group('singer')
else:
2015-07-10 15:48:48 +00:00
raise ExtractorError('Unable to find song or singer names')
2015-06-18 06:15:54 +00:00
formats = self._get_formats(song_id)
return {
'id': song_id,
'title': song_name,
'creator': singer_name,
'formats': formats,
}