1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-23 18:51:51 +00:00
youtube-dl/youtube_dl/extractor/googleplus.py

74 lines
2.5 KiB
Python
Raw Normal View History

2013-06-27 18:31:27 +00:00
# coding: utf-8
2014-02-10 19:31:08 +00:00
from __future__ import unicode_literals
2013-06-27 18:31:27 +00:00
import re
import codecs
from .common import InfoExtractor
from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
2014-02-10 19:31:08 +00:00
IE_DESC = 'Google Plus'
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
2013-06-27 18:31:27 +00:00
_TEST = {
2014-02-10 19:31:08 +00:00
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'title': '嘆きの天使 降臨',
2014-02-10 19:31:08 +00:00
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
2013-06-27 18:31:27 +00:00
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
2014-02-10 19:31:08 +00:00
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
title = self._og_search_description(webpage).splitlines()[0]
upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
2013-10-05 14:38:33 +00:00
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
uploader = self._html_search_regex(
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
2014-02-10 19:31:08 +00:00
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
2014-02-10 19:31:08 +00:00
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
def unicode_escape(s):
decoder = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4,}',
lambda m: decoder(m.group(0))[0],
s)
# Extract video links all sizes
formats = [{
'url': unicode_escape(video_url),
'ext': 'flv',
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
2015-11-07 17:43:22 +00:00
r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)]
self._sort_formats(formats)
2014-02-10 19:31:08 +00:00
return {
'id': video_id,
'title': title,
'uploader': uploader,
2014-02-10 19:31:08 +00:00
'upload_date': upload_date,
'formats': formats,
2014-02-10 19:31:08 +00:00
}