1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-18 08:16:10 +00:00
youtube-dl/youtube_dl/extractor/vbox7.py

65 lines
2.2 KiB
Python
Raw Normal View History

2014-02-04 22:02:53 +00:00
# encoding: utf-8
from __future__ import unicode_literals
2013-06-23 20:25:46 +00:00
from .common import InfoExtractor
from ..compat import (
2013-06-23 20:25:46 +00:00
compat_urllib_parse,
compat_urllib_request,
2015-06-15 10:49:27 +00:00
compat_urlparse,
)
from ..utils import (
2013-06-23 20:25:46 +00:00
ExtractorError,
)
class Vbox7IE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)'
2013-06-27 18:46:46 +00:00
_TEST = {
2014-02-04 22:02:53 +00:00
'url': 'http://vbox7.com/play:249bb972c2',
'md5': '99f65c0c9ef9b682b97313e052734c3f',
'info_dict': {
'id': '249bb972c2',
'ext': 'mp4',
2014-02-04 22:02:53 +00:00
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
},
2013-06-27 18:46:46 +00:00
}
2013-06-23 20:25:46 +00:00
2014-02-04 22:02:53 +00:00
def _real_extract(self, url):
video_id = self._match_id(url)
2013-06-23 20:25:46 +00:00
2015-06-15 10:49:27 +00:00
# need to get the page 3 times for the correct jsSecretToken cookie
# which is necessary for the correct title
def get_session_id():
redirect_page = self._download_webpage(url, video_id)
session_id_url = self._search_regex(
r'var\s*url\s*=\s*\'([^\']+)\';', redirect_page,
'session id url')
self._download_webpage(
compat_urlparse.urljoin(url, session_id_url), video_id,
'Getting session id')
get_session_id()
get_session_id()
webpage = self._download_webpage(url, video_id,
2014-11-23 20:39:15 +00:00
'Downloading redirect page')
2013-06-23 20:25:46 +00:00
title = self._html_search_regex(r'<title>(.*)</title>',
2014-11-23 20:39:15 +00:00
webpage, 'title').split('/')[0].strip()
2013-06-23 20:25:46 +00:00
info_url = "http://vbox7.com/play/magare.do"
2014-02-04 22:02:53 +00:00
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
2013-06-23 20:25:46 +00:00
info_request = compat_urllib_request.Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
2014-02-04 22:02:53 +00:00
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
2013-06-23 20:25:46 +00:00
if info_response is None:
2014-02-04 22:02:53 +00:00
raise ExtractorError('Unable to extract the media url')
2013-06-23 20:25:46 +00:00
(final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
2014-02-04 22:02:53 +00:00
return {
'id': video_id,
'url': final_url,
'title': title,
2013-06-23 20:25:46 +00:00
'thumbnail': thumbnail_url,
2014-02-04 22:02:53 +00:00
}