mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-12-22 08:47:41 +00:00
[wat] extract all formats
This commit is contained in:
parent
4a12077855
commit
c5f5155100
1 changed files with 59 additions and 70 deletions
|
@ -2,25 +2,26 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import hashlib
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unified_strdate,
|
||||
HEADRequest,
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
class WatIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:wat:(?P<real_id>\d{8})|https?://www\.wat\.tv/video/(?P<display_id>.*)-(?P<short_id>.*?)_.*?\.html)'
|
||||
_VALID_URL = r'(?:wat:|https?://(?:www\.)?wat\.tv/video/.*-)(?P<id>[0-9a-z]+)'
|
||||
IE_NAME = 'wat.tv'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html',
|
||||
'md5': 'ce70e9223945ed26a8056d413ca55dc9',
|
||||
'md5': '83d882d9de5c9d97f0bb2c6273cde56a',
|
||||
'info_dict': {
|
||||
'id': '11713067',
|
||||
'display_id': 'soupe-figues-l-orange-aux-epices',
|
||||
'ext': 'mp4',
|
||||
'title': 'Soupe de figues à l\'orange et aux épices',
|
||||
'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.',
|
||||
|
@ -33,7 +34,6 @@ class WatIE(InfoExtractor):
|
|||
'md5': 'fbc84e4378165278e743956d9c1bf16b',
|
||||
'info_dict': {
|
||||
'id': '11713075',
|
||||
'display_id': 'gregory-lemarchal-voix-ange',
|
||||
'ext': 'mp4',
|
||||
'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)',
|
||||
'description': 'md5:b7a849cf16a2b733d9cd10c52906dee3',
|
||||
|
@ -44,96 +44,85 @@ class WatIE(InfoExtractor):
|
|||
},
|
||||
]
|
||||
|
||||
def download_video_info(self, real_id):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_id = video_id if video_id.isdigit() and len(video_id) > 6 else compat_str(int(video_id, 36))
|
||||
|
||||
# 'contentv4' is used in the website, but it also returns the related
|
||||
# videos, we don't need them
|
||||
info = self._download_json('http://www.wat.tv/interface/contentv3/' + real_id, real_id)
|
||||
return info['media']
|
||||
|
||||
def _real_extract(self, url):
|
||||
def real_id_for_chapter(chapter):
|
||||
return chapter['tc_start'].split('-')[0]
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('display_id')
|
||||
real_id = mobj.group('real_id')
|
||||
if not real_id:
|
||||
short_id = mobj.group('short_id')
|
||||
webpage = self._download_webpage(url, display_id or short_id)
|
||||
real_id = self._search_regex(r'xtpage = ".*-(.*?)";', webpage, 'real id')
|
||||
|
||||
video_info = self.download_video_info(real_id)
|
||||
video_info = self._download_json(
|
||||
'http://www.wat.tv/interface/contentv3/' + video_id, video_id)['media']
|
||||
|
||||
error_desc = video_info.get('error_desc')
|
||||
if error_desc:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, error_desc), expected=True)
|
||||
|
||||
geo_list = video_info.get('geoList')
|
||||
country = geo_list[0] if geo_list else ''
|
||||
|
||||
chapters = video_info['chapters']
|
||||
first_chapter = chapters[0]
|
||||
files = video_info['files']
|
||||
first_file = files[0]
|
||||
|
||||
if real_id_for_chapter(first_chapter) != real_id:
|
||||
def video_id_for_chapter(chapter):
|
||||
return chapter['tc_start'].split('-')[0]
|
||||
|
||||
if video_id_for_chapter(first_chapter) != video_id:
|
||||
self.to_screen('Multipart video detected')
|
||||
chapter_urls = []
|
||||
for chapter in chapters:
|
||||
chapter_id = real_id_for_chapter(chapter)
|
||||
# Yes, when we this chapter is processed by WatIE,
|
||||
# it will download the info again
|
||||
chapter_info = self.download_video_info(chapter_id)
|
||||
chapter_urls.append(chapter_info['url'])
|
||||
entries = [self.url_result(chapter_url) for chapter_url in chapter_urls]
|
||||
return self.playlist_result(entries, real_id, video_info['title'])
|
||||
|
||||
upload_date = None
|
||||
if 'date_diffusion' in first_chapter:
|
||||
upload_date = unified_strdate(first_chapter['date_diffusion'])
|
||||
entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
|
||||
return self.playlist_result(entries, video_id, video_info['title'])
|
||||
# Otherwise we can continue and extract just one part, we have to use
|
||||
# the short id for getting the video url
|
||||
# the video id for getting the video url
|
||||
|
||||
formats = [{
|
||||
'url': 'http://wat.tv/get/android5/%s.mp4' % real_id,
|
||||
'format_id': 'Mobile',
|
||||
}]
|
||||
date_diffusion = first_chapter.get('date_diffusion')
|
||||
upload_date = unified_strdate(date_diffusion) if date_diffusion else None
|
||||
|
||||
fmts = [('SD', 'web')]
|
||||
if first_file.get('hasHD'):
|
||||
fmts.append(('HD', 'webhd'))
|
||||
def extract_url(path_template, url_type):
|
||||
req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
|
||||
head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type)
|
||||
red_url = head.geturl()
|
||||
if req_url == red_url:
|
||||
raise ExtractorError(
|
||||
'%s said: Sorry, this video is not available from your country.' % self.IE_NAME,
|
||||
expected=True)
|
||||
return red_url
|
||||
|
||||
def compute_token(param):
|
||||
timestamp = '%08x' % int(self._download_webpage(
|
||||
'http://www.wat.tv/servertime', real_id,
|
||||
'Downloading server time').split('|')[0])
|
||||
magic = '9b673b13fa4682ed14c3cfa5af5310274b514c4133e9b3a81e6e3aba009l2564'
|
||||
return '%s/%s' % (hashlib.md5((magic + param + timestamp).encode('ascii')).hexdigest(), timestamp)
|
||||
m3u8_url = extract_url('ipad/%s.m3u8', 'm3u8')
|
||||
http_url = extract_url('android5/%s.mp4', 'http')
|
||||
|
||||
for fmt in fmts:
|
||||
webid = '/%s/%s' % (fmt[1], real_id)
|
||||
video_url = self._download_webpage(
|
||||
'http://www.wat.tv/get%s?token=%s&getURL=1&country=%s' % (webid, compute_token(webid), country),
|
||||
real_id,
|
||||
'Downloading %s video URL' % fmt[0],
|
||||
'Failed to download %s video URL' % fmt[0],
|
||||
False)
|
||||
if not video_url:
|
||||
formats = []
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
|
||||
formats.extend(m3u8_formats)
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
m3u8_url.replace('ios.', 'web.').replace('.m3u8', '.f4m'),
|
||||
video_id, f4m_id='hds', fatal=False))
|
||||
for m3u8_format in m3u8_formats:
|
||||
mobj = re.search(
|
||||
r'audio.*?%3D(\d+)(?:-video.*?%3D(\d+))?', m3u8_format['url'])
|
||||
if not mobj:
|
||||
continue
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'format_id': fmt[0],
|
||||
abr, vbr = mobj.groups()
|
||||
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
|
||||
m3u8_format.update({
|
||||
'vbr': vbr,
|
||||
'abr': abr,
|
||||
})
|
||||
if not vbr or not abr:
|
||||
continue
|
||||
f = m3u8_format.copy()
|
||||
f.update({
|
||||
'url': re.sub(r'%s-\d+00-\d+' % video_id, '%s-%d00-%d' % (video_id, round(vbr / 100), round(abr)), http_url),
|
||||
'format_id': f['format_id'].replace('hls', 'http'),
|
||||
'protocol': 'http',
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': real_id,
|
||||
'display_id': display_id,
|
||||
'id': video_id,
|
||||
'title': first_chapter['title'],
|
||||
'thumbnail': first_chapter['preview'],
|
||||
'description': first_chapter['description'],
|
||||
'view_count': video_info['views'],
|
||||
'upload_date': upload_date,
|
||||
'duration': first_file['duration'],
|
||||
'duration': video_info['files'][0]['duration'],
|
||||
'formats': formats,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue