1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-18 08:16:10 +00:00
youtube-dl/youtube_dl/extractor/tudou.py

88 lines
3 KiB
Python
Raw Normal View History

# coding: utf-8
2014-08-31 22:16:26 +00:00
from __future__ import unicode_literals
2013-06-25 17:48:08 +00:00
import re
import json
2013-06-25 17:48:08 +00:00
from .common import InfoExtractor
class TudouIE(InfoExtractor):
2015-01-08 16:50:46 +00:00
_VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/.*?/(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
_TESTS = [{
2014-08-31 22:16:26 +00:00
'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
'md5': '140a49ed444bd22f93330985d8475fcb',
'info_dict': {
'id': '159448201',
'ext': 'f4v',
'title': '卡马乔国足开大脚长传冲吊集锦',
'thumbnail': 're:^https?://.*\.jpg$',
2013-06-27 18:46:46 +00:00
}
2014-08-31 22:20:12 +00:00
}, {
'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/',
'info_dict': {
'id': '117049447',
'ext': 'f4v',
'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
'thumbnail': 're:^https?://.*\.jpg$',
}
}]
2013-06-25 17:48:08 +00:00
2014-11-23 21:21:46 +00:00
def _url_for_id(self, id, quality=None):
2014-11-23 20:23:05 +00:00
info_url = "http://v2.tudou.com/f?id=" + str(id)
if quality:
info_url += '&hd' + quality
webpage = self._download_webpage(info_url, id, "Opening the info webpage")
2014-11-23 19:41:03 +00:00
final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
return final_url
2013-06-25 17:48:08 +00:00
def _real_extract(self, url):
2015-01-08 16:50:46 +00:00
video_id = self._match_id(url)
2013-06-25 17:48:08 +00:00
webpage = self._download_webpage(url, video_id)
m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
if m and m.group(1):
return {
'_type': 'url',
2014-08-31 22:16:26 +00:00
'url': 'youku:' + m.group(1),
'ie_key': 'Youku'
}
2013-10-18 09:16:11 +00:00
title = self._search_regex(
2014-08-31 22:16:26 +00:00
r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
thumbnail_url = self._search_regex(
2014-08-31 22:16:26 +00:00
r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
segments = json.loads(segs_json)
# It looks like the keys are the arguments that have to be passed as
# the hd field in the request url, we pick the higher
# Also, filter non-number qualities (see issue #3643).
quality = sorted(filter(lambda k: k.isdigit(), segments.keys()),
key=lambda k: int(k))[-1]
parts = segments[quality]
result = []
len_parts = len(parts)
if len_parts > 1:
2014-11-26 12:06:02 +00:00
self.to_screen('%s: found %s parts' % (video_id, len_parts))
for part in parts:
part_id = part['k']
final_url = self._url_for_id(part_id, quality)
ext = (final_url.split('?')[0]).split('.')[-1]
2014-08-31 22:16:26 +00:00
part_info = {
'id': '%s' % part_id,
'url': final_url,
'ext': ext,
'title': title,
'thumbnail': thumbnail_url,
}
result.append(part_info)
2015-01-08 16:50:46 +00:00
return {
'_type': 'multi_video',
'entries': result,
'id': video_id,
'title': title,
}