1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-06-26 05:39:34 +00:00

Compare commits

...

14 commits

Author SHA1 Message Date
memo 923b247e93
Merge a53b4004cf into 0153b387e5 2024-06-14 13:56:26 +00:00
Paper 0153b387e5
[VidLii] Add 720p support (#30924)
* [VidLii] Add HD support  (yt-dlp backport-ish)

* Also fix a bug with the view count

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-06-11 13:21:39 +01:00
dirkf a48fe7491d [ORF] Skip tests with limited availability 2024-06-11 12:52:13 +01:00
dirkf e20ca543f0 [ORF] Re-factor and updateORFFM4StoryIE
* fix getting media via DASH instead of inaccessible mp4
* also get in-page YT media
2024-06-11 12:52:13 +01:00
dirkf e39466051f [ORF] Support sound.orf.at, updating ORFRadioIE
* maintain support for xx.orf.at/player/... URLs
* add `ORFRadioCollectionIE` to support playlists in ORF Sound
* back-port and re-work `ORFPodcastIE` from https://github.com/yt-dlp/yt-dlp/pull/8486, thx Esokrates
2024-06-11 12:52:13 +01:00
dirkf d95c0d203f [ORF] Support on.orf.at, replacing ORFTVthekIE
* add `ORFONIE`, back-porting yt-dlp PR https://github.com/yt-dlp/yt-dlp/pull/9113 and friends: thx HobbyistDev, TuxCoder, seproDev
* re-factor to support livestreams via new `ORFONliveIE`
2024-06-11 12:52:13 +01:00
dirkf 3bde6a5752 [test] Improve download test
* skip reason can't be unicode in Py2
* remove duplicate assert...Equal functions
2024-06-11 12:52:13 +01:00
dirkf 50f6c5668a [core] Re-factor with _fill_common_fields() as used in yt-dlp 2024-06-11 12:52:13 +01:00
dirkf b4ff08bd2d [core] Safer handling of nested playlist data 2024-06-11 12:52:13 +01:00
kmnx 88bd8b9f87
[mixcloud] updated mixcloud API server address (#32557)
* updated mixcloud API server address
* fix tests
* etc

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-06-11 12:38:24 +01:00
memo a53b4004cf [tagesschau] remove duplicates from playlists 2021-03-13 22:17:51 +01:00
memo ac5b267afe [tagesschau] make some generic titles more specific
This turns for example `Ganze Sendung` into `tagesschau 20 Uhr - 04.12.14 20:00`
as with the old extractor, which is especially important
for playlists of such videos.
2021-03-13 21:57:55 +01:00
memo 2a0d9305f4 [tagesschau] fix deprecation warning about inline regex flags
The warning during test_download:

    DeprecationWarning: Flags not at the start of the expression '(?s)<p[^>]+class="in' (truncated)
2021-03-13 12:05:05 +01:00
memo 74bb98431e [tagesschau] fix extraction
This fixes the extraction for taggeschau.de, the primary German news broadcaster.

Details:

- the new website sends the video meta data as JSON encoded as an attribute of the player
- some old pages (e.g. [1]) still use the old format of `TagesschauIE`, so the old extraction code is kept as fallback
- the old player format is not used anymore, so `TagesschauPlayerIE` is removed
- several optional fields are added, in particular subtitles
- crashes on empty playlists are fixed
- some test cases are updated, as some videos are not available anymore
- the video id in one test case is changed from `ts-5727` to `video-45741` because it appears to be the more permanent identifier;
  for example [1] is still available, while [2] cannot be accessed anymore
- the format identifiers are normalized to `s`,..,`xl` as previously used by `TagesschauPlayerIE` (instead of `webs.h264`,..,`webxl.h264`)

[1] https://www.tagesschau.de/multimedia/video/video-98529~_bab-sendung-209.html
[2] https://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html
2021-03-13 12:05:05 +01:00
8 changed files with 1087 additions and 727 deletions

View file

@ -5,9 +5,9 @@ import hashlib
import json
import os.path
import re
import types
import ssl
import sys
import types
import unittest
import youtube_dl.extractor
@ -181,18 +181,18 @@ def expect_value(self, got, expected, field):
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
assert_func = assertGreaterEqual
assert_func = self.assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
assert_func = assertLessEqual
assert_func = self.assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
assert_func = assertEqual
assert_func = self.assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
self, len(got), expected_num,
len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def assertLessEqual(self, got, expected, msg=None):
if not (got <= expected):
if msg is None:
msg = '%r not less than or equal to %r' % (got, expected)
self.assertTrue(got <= expected, msg)
def assertEqual(self, got, expected, msg=None):
if not (got == expected):
if msg is None:
msg = '%r not equal to %r' % (got, expected)
self.assertTrue(got == expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning

View file

@ -9,8 +9,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
assertLessEqual,
expect_warnings,
get_params,
gettestcases,
@ -36,12 +34,20 @@ from youtube_dl.utils import (
ExtractorError,
error_to_compat_str,
format_bytes,
IDENTITY,
preferredencoding,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
# Some unittest APIs require actual str
if not isinstance('TEST', str):
_encode_str = lambda s: s.encode(preferredencoding())
else:
_encode_str = IDENTITY
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
@ -102,7 +108,7 @@ def generator(test_case, tname):
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
self.skipTest(reason)
self.skipTest(_encode_str(reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
@ -187,16 +193,14 @@ def generator(test_case, tname):
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
self.assertGreaterEqual(
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_maxcount' in test_case:
assertLessEqual(
self,
self.assertLessEqual(
len(res_dict['entries']),
test_case['playlist_maxcount'],
'Expected at most %d in playlist %s, but got %d' % (
@ -243,8 +247,8 @@ def generator(test_case, tname):
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
self.assertGreaterEqual(
got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))

View file

@ -1039,8 +1039,8 @@ class YoutubeDL(object):
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
webpage_url = ie_result.get('webpage_url') # not all pl/mv have this
if webpage_url and webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
@ -1048,6 +1048,10 @@ class YoutubeDL(object):
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
if new_result:
new_result.update(ie_result)
ie_result = new_result
try:
return self.__process_playlist(ie_result, download)
finally:
@ -1593,6 +1597,28 @@ class YoutubeDL(object):
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _fill_common_fields(self, info_dict, final=True):
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
if final:
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
@ -1660,24 +1686,7 @@ class YoutubeDL(object):
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
self._fill_common_fields(info_dict)
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)

View file

@ -898,21 +898,13 @@ from .ooyala import (
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFONIE,
ORFONLiveIE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
ORFPodcastIE,
ORFRadioIE,
ORFRadioCollectionIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (
@ -1246,10 +1238,7 @@ from .svt import (
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .tagesschau import TagesschauIE
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE

View file

@ -1,3 +1,4 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
@ -10,7 +11,7 @@ from ..compat import (
compat_ord,
compat_str,
compat_urllib_parse_unquote,
compat_zip
compat_zip as zip,
)
from ..utils import (
int_or_none,
@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
lookup_key = object_type + 'Lookup'
return self._download_json(
'https://www.mixcloud.com/graphql', display_id, query={
'https://app.mixcloud.com/graphql', display_id, query={
'query': '''{
%s(lookup: {username: "%s"%s}) {
%s
@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
'ext': 'm4a',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
'uploader': 'Daniel Holbach',
'uploader': 'dholbach', # was: 'Daniel Holbach',
'uploader_id': 'dholbach',
'thumbnail': r're:https?://.*\.jpg',
'view_count': int,
@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
'ext': 'mp3',
'title': 'Caribou 7 inch Vinyl Mix & Chat',
'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
'timestamp': 1422987057,
'upload_date': '20150203',
},
'params': {
'skip_download': '404 not found',
},
}, {
'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
'info_dict': {
'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
'ext': 'm4a',
'title': 'Carnival Música Popular Brasileira Mix',
'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
'timestamp': 1454347174,
'upload_date': '20160201',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
'view_count': int,
},
}, {
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
'only_matching': True,
@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([
compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url):
username, slug = re.match(self._VALID_URL, url).groups()
username, slug = self._match_valid_url(url).groups()
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
track_id = '%s_%s' % (username, slug)

File diff suppressed because it is too large Load diff

View file

@ -5,127 +5,54 @@ import re
from .common import InfoExtractor
from ..utils import (
bool_or_none,
compat_str,
determine_ext,
js_to_json,
parse_iso8601,
ExtractorError,
parse_duration,
parse_filesize,
remove_quotes,
strip_or_none,
try_get,
unescapeHTML,
unified_timestamp,
url_or_none,
)
# Note that there are tagesschau.de/api and tagesschau.de/api2 endpoints, which
# may be useful, but not all pages and not all formats can be easily accessed
# by API.
class TagesschauPlayerIE(InfoExtractor):
IE_NAME = 'tagesschau:player'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
'md5': '8d09548d5c15debad38bee3a4d15ca21',
'info_dict': {
'id': '179517',
'ext': 'mp4',
'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD',
'thumbnail': r're:^https?:.*\.jpg$',
'formats': 'mincount:6',
},
}, {
'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html',
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
'info_dict': {
'id': '29417',
'ext': 'mp3',
'title': 'Trabi - Bye, bye Rennpappe',
'thumbnail': r're:^https?:.*\.jpg$',
'formats': 'mincount:2',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html',
'only_matching': True,
}]
_FORMATS = {
'xs': {'quality': 0},
's': {'width': 320, 'height': 180, 'quality': 1},
'sm': {'width': 480, 'height': 270, 'quality': 1},
'm': {'width': 512, 'height': 288, 'quality': 2},
'ml': {'width': 640, 'height': 360, 'quality': 2},
'l': {'width': 960, 'height': 540, 'quality': 3},
'xl': {'width': 1280, 'height': 720, 'quality': 4},
'xxl': {'quality': 5},
'mp3': {'abr': 64, 'vcodec': 'none', 'quality': 0},
'hi.mp3': {'abr': 192, 'vcodec': 'none', 'quality': 1},
}
_FORMATS = {
'xs': {'quality': 0},
's': {'width': 320, 'height': 180, 'quality': 1},
'm': {'width': 512, 'height': 288, 'quality': 2},
'l': {'width': 960, 'height': 540, 'quality': 3},
'xl': {'width': 1280, 'height': 720, 'quality': 4},
'xxl': {'quality': 5},
}
_FIELD_PREFERENCE = ('height', 'width', 'vbr', 'abr')
def _extract_via_api(self, kind, video_id):
info = self._download_json(
'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id),
video_id)
title = info['headline']
formats = []
for media in info['mediadata']:
for format_id, format_url in media.items():
if determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls'))
else:
formats.append({
'url': format_url,
'format_id': format_id,
'vcodec': 'none' if kind == 'audio' else None,
})
self._sort_formats(formats)
timestamp = parse_iso8601(info.get('date'))
return {
'id': video_id,
'title': title,
'timestamp': timestamp,
'formats': formats,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# kind = mobj.group('kind').lower()
# if kind == 'video':
# return self._extract_via_api(kind, video_id)
# JSON api does not provide some audio formats (e.g. ogg) thus
# extracting audio via webpage
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage).strip()
formats = []
for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage):
media = self._parse_json(js_to_json(media_json), video_id, fatal=False)
if not media:
continue
src = media.get('src')
if not src:
return
quality = media.get('quality')
kind = media.get('type', '').split('/')[0]
ext = determine_ext(src)
f = {
'url': src,
'format_id': '%s_%s' % (quality, ext) if quality else ext,
'ext': ext,
'vcodec': 'none' if kind == 'audio' else None,
}
f.update(self._FORMATS.get(quality, {}))
formats.append(f)
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
def _normalize_format_id(format_id, ext):
if format_id:
m = re.match(r"web([^.]+)\.[^.]+$", format_id)
if m:
format_id = m.group(1)
if format_id == 'hi' and ext:
# high-quality audio files
format_id = '%s.%s' % (format_id, ext)
return format_id
class TagesschauIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de(?:/?|/(?P<path>[^?#]+?(?:/(?P<id>[^/#?]+?(?:-?[0-9]+))(?:~_?[^/#?]+?)?(?:\.html)?)?))(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
@ -134,48 +61,111 @@ class TagesschauIE(InfoExtractor):
'id': 'video-102143',
'ext': 'mp4',
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
'description': '18.07.2015 20:10 Uhr',
'description': '18.07.2015 20:10',
'thumbnail': r're:^https?:.*\.jpg$',
'upload_date': '20150718',
'duration': 138,
'timestamp': 1437250200,
'uploader': 'ARD',
},
}, {
# with player
'url': 'http://www.tagesschau.de/multimedia/video/video-102143~player.html',
'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6',
'info_dict': {
'id': 'video-102143',
'ext': 'mp4',
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
'description': '18.07.2015 20:10',
'thumbnail': r're:^https?:.*\.jpg$',
'upload_date': '20150718',
'timestamp': 1437250200,
'uploader': 'ARD',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
'md5': '3c54c1f6243d279b706bde660ceec633',
'info_dict': {
'id': 'ts-5727',
'id': 'video-45741',
'ext': 'mp4',
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
'description': 'md5:695c01bfd98b7e313c501386327aea59',
'title': 'tagesschau 20 Uhr - 04.12.14 20:00',
'description': '04.12.2014 20:00',
'thumbnail': r're:^https?:.*\.jpg$',
'uploader': 'tagesschau',
'timestamp': 1417723200,
'upload_date': '20141204',
'subtitles': dict,
},
}, {
# exclusive audio
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html',
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
'url': 'https://www.tagesschau.de/multimedia/audio/audio-103205.html',
'md5': 'c8e7b72aeca664031db0ba198519b09a',
'info_dict': {
'id': 'audio-29417',
'id': 'audio-103205',
'ext': 'mp3',
'title': 'Trabi - Bye, bye Rennpappe',
'description': 'md5:8687dda862cbbe2cfb2df09b56341317',
'title': 'Die USA: ein Impfwunder?',
'description': '06.03.2021 06:07',
'timestamp': 1615010820,
'upload_date': '20210306',
'thumbnail': r're:^https?:.*\.jpg$',
'uploader': 'Jule Käppel, ARD Washington',
'creator': 'ARD',
'channel': 'tagesschau.de',
'is_live': False,
},
}, {
# audio in article
'url': 'http://www.tagesschau.de/inland/bnd-303.html',
'md5': 'e0916c623e85fc1d2b26b78f299d3958',
'url': 'https://www.tagesschau.de/ausland/amerika/biden-versoehnung-101.html',
'md5': '4c46b0283719d97aa976037e1ecb7b73',
'info_dict': {
'id': 'bnd-303',
'id': 'audio-103429',
'title': 'Bidens Versöhnungswerk kommt nicht voran',
'ext': 'mp3',
'title': 'Viele Baustellen für neuen BND-Chef',
'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4',
'thumbnail': r're:^https?:.*\.jpg$',
'timestamp': 1615444860,
'uploader': 'Sebastian Hesse, ARD Washington',
'description': '11.03.2021 06:41',
'upload_date': '20210311',
'creator': 'ARD',
'channel': 'tagesschau.de',
},
}, {
'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html',
# playlist in article
'url': 'https://www.tagesschau.de/ausland/impfungen-coronavirus-usa-101.html',
'info_dict': {
'id': 'afd-parteitag-135',
'title': 'Möchtegern-Underdog mit Machtanspruch',
'id': 'impfungen-coronavirus-usa-101',
'title': 'Kampf gegen das Coronavirus: Impfwunder USA?',
},
'playlist_count': 2,
}, {
# article without videos
'url': 'https://www.tagesschau.de/wirtschaft/ukraine-russland-kredit-101.html',
'info_dict': {
'id': 'ukraine-russland-kredit-101',
'title': 'Ukraine stoppt Rückzahlung russischer Kredite',
},
'playlist_count': 0,
}, {
# legacy website
'url': 'https://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
'md5': 'ab6d190c8147560d6429a467566affe6',
'info_dict': {
'id': 'video-102303',
'ext': 'mp4',
'title': 'Bericht aus Berlin: Sommerinterview mit Angela Merkel',
'description': '19.07.2015 19:05 Uhr',
}
}, {
# handling of generic title
'url': 'https://www.tagesschau.de/multimedia/video/video-835681.html',
'info_dict': {
'id': 'video-835681',
'ext': 'mp4',
'title': 'Tagesschau in 100 Sekunden - 13.03.21 17:35',
'upload_date': '20210313',
'uploader': 'Tagesschau24',
'description': '13.03.2021 17:35',
'timestamp': 1615656900,
}
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
'only_matching': True,
@ -204,13 +194,176 @@ class TagesschauIE(InfoExtractor):
# playlist article with collapsing sections
'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
'only_matching': True,
}, {
'url': 'https://www.tagesschau.de/',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url)
def _video_id_from_url(self, url):
if url:
mobj = re.match(self._VALID_URL, url)
if mobj:
return mobj.group('id')
def _extract_formats(self, download_text, media_kind):
def _handle_generic_titles(self, title, pixelConf):
if strip_or_none(title, '').lower() not in ('ganze sendung', '100 sekunden',
'tagesschau in 100 sekunden'):
return title
# otherwise find more meaningful title than the generic Ganze Sendung/100 Sekunden
for item in pixelConf:
if item.get('tracker') == 'AGFdebug':
s = try_get(item, lambda x: x['clipData']['program'], compat_str)
if s:
# extract date and time
parts = (try_get(item, lambda x: x['clipData']['title'], compat_str)
or '').split('_')[-2:]
if len(parts) == 2:
title = "%s - %s" % (s, ' '.join(parts))
else:
title = s
break
return title
def _extract_from_player(self, player_div, video_id_fallback, title_fallback):
player_data = unescapeHTML(self._search_regex(
r'data-config=(?P<quote>["\'])(?P<data>[^"\']*)(?P=quote)',
player_div, 'data-config', group='data'))
meta = self._parse_json(player_data, video_id_fallback, fatal=False)
mc = try_get(meta, lambda x: x['mc'], dict)
if not mc:
# fallback if parsing json fails, as tagesschau API sometimes sends
# invalid json
stream_hls = remove_quotes(self._search_regex(
r'"http[^"]+?\.m3u8"', player_data, '.m3u8-url', group=0))
formats = self._extract_m3u8_formats(stream_hls, video_id_fallback,
ext='mp4', m3u8_id='hls',
entry_protocol='m3u8_native')
self._sort_formats(formats, field_preference=_FIELD_PREFERENCE)
return {
'id': video_id_fallback,
'title': title_fallback,
'formats': formats,
}
# this url is more permanent than the original link
webpage_url = url_or_none(try_get(mc, lambda x: x['_sharing']['link']))
video_id = self._video_id_from_url(webpage_url)
duration = None
pixelConf = try_get(meta, lambda x: x['pc']['_pixelConfig'], list) or []
for item in pixelConf:
video_id = (video_id or try_get(item,
[lambda x: x['playerID'],
lambda x: x['clipData']['playerId']], compat_str))
duration = (duration or parse_duration(try_get(item,
[lambda x: x['clipData']['length'],
lambda x: x['clipData']['duration']])))
if not video_id:
video_id = video_id_fallback
formats = []
for elem in mc.get('_mediaArray', []):
for d in elem.get('_mediaStreamArray', []):
link_url = url_or_none(d.get('_stream'))
if not link_url:
continue
ext = determine_ext(link_url)
if ext == "m3u8":
formats.extend(self._extract_m3u8_formats(
link_url, video_id_fallback, ext='mp4',
entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == "f4m":
formats.extend(self._extract_f4m_formats(
link_url, video_id_fallback, f4m_id='hds', fatal=False))
else:
format_id = _normalize_format_id(self._search_regex(
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
default=ext, fatal=False),
ext)
fmt = {
'format_id': format_id,
'url': link_url,
'format_name': ext,
}
fmt.update(_FORMATS.get(format_id, {}))
formats.append(fmt)
self._sort_formats(formats, field_preference=_FIELD_PREFERENCE)
if not formats:
raise ExtractorError("could not extract formats from json")
# note that mc['_title'] can be very different from actual title,
# such as an image description in case of audio files
title = (try_get(mc, [lambda x: x['_info']['clipTitle'],
lambda x: x['_download']['title']], compat_str)
or title_fallback)
title = self._handle_generic_titles(title, pixelConf)
sub_url = url_or_none(mc.get('_subtitleUrl'))
subs = {'de': [{'ext': 'ttml', 'url': sub_url}]} if sub_url else None
images = try_get(mc, lambda x: x['_previewImage'], dict) or {}
thumbnails = [{
'url': url_or_none('https://www.tagesschau.de/%s'
% (images[format_id],)),
'preference': _FORMATS.get(format_id, {}).get('quality'),
} for format_id in images] or None
return {
'id': video_id,
'title': title,
'formats': formats,
'webpage_url': webpage_url,
'subtitles': subs,
'thumbnails': thumbnails,
'duration': duration,
'timestamp': unified_timestamp(try_get(mc, [lambda x: x['_download']['date'],
lambda x: x['_info']['clipDate']])),
'is_live': bool_or_none(mc.get('_isLive')),
'channel': try_get(mc, lambda x: x['_download']['channel'], compat_str),
'uploader': try_get(mc, lambda x: x['_info']['channelTitle'], compat_str),
'creator': try_get(mc, lambda x: x['_info']['clipContentSrc'], compat_str),
'description': try_get(mc, lambda x: x['_info']['clipDate'], compat_str),
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('path')
display_id = video_id.lstrip('-') if video_id else 'tagesschau.de'
webpage = self._download_webpage(url, display_id)
title = (self._og_search_title(webpage, default=None)
or self._html_search_regex(
[r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
r'<title[^>]*>(.+?)</title>'],
webpage, 'title'))
webpage_type = self._og_search_property('type', webpage, default=None)
player_pattern = r'<div[^>]+data-ts_component=(?P<quote>["\'])ts-mediaplayer(?P=quote)[^>]*>'
players = [m.group(0) for m in re.finditer(player_pattern, webpage)]
if not players:
# assume old website format
return self._legacy_extract(webpage, display_id, title, webpage_type)
elif (len(players) > 1
and not self._downloader.params.get('noplaylist')
and (webpage_type == 'website' or not mobj.group('id'))):
# article or playlist
entries = []
seen = set()
for player in players:
entry = self._extract_from_player(player, video_id, title)
if entry['id'] not in seen:
entries.append(entry)
seen.add(entry['id'])
return self.playlist_result(entries, display_id, title)
else:
# single video/audio
return self._extract_from_player(players[0], video_id, title)
def _legacy_extract_formats(self, download_text, media_kind):
links = re.finditer(
r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
download_text)
@ -219,9 +372,10 @@ class TagesschauIE(InfoExtractor):
link_url = l.group('url')
if not link_url:
continue
format_id = self._search_regex(
ext = determine_ext(link_url)
format_id = _normalize_format_id(self._search_regex(
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
default=determine_ext(link_url))
default=ext), ext)
format = {
'format_id': format_id,
'url': l.group('url'),
@ -262,39 +416,30 @@ class TagesschauIE(InfoExtractor):
self._sort_formats(formats)
return formats
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('path')
display_id = video_id.lstrip('-')
# Some old pages still use the old format, so we keep the previous
# extractor for now.
def _legacy_extract(self, webpage, display_id, title, webpage_type):
DOWNLOAD_REGEX = r'<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
webpage, 'title', default=None) or self._og_search_title(webpage)
DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'
webpage_type = self._og_search_property('type', webpage, default=None)
if webpage_type == 'website': # Article
entries = []
for num, (entry_title, media_kind, download_text) in enumerate(re.findall(
r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
webpage), 1):
r'<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
webpage, flags=re.S), 1):
entries.append({
'id': '%s-%d' % (display_id, num),
'title': '%s' % entry_title,
'formats': self._extract_formats(download_text, media_kind),
'formats': self._legacy_extract_formats(download_text, media_kind),
})
if len(entries) > 1:
if len(entries) != 1:
return self.playlist_result(entries, display_id, title)
formats = entries[0]['formats']
else: # Assume single video
download_text = self._search_regex(
DOWNLOAD_REGEX, webpage, 'download links', group='links')
DOWNLOAD_REGEX, webpage, 'download links', flags=re.S, group='links')
media_kind = self._search_regex(
DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind')
formats = self._extract_formats(download_text, media_kind)
DOWNLOAD_REGEX, webpage, 'media kind', default='Video', flags=re.S, group='kind')
formats = self._legacy_extract_formats(download_text, media_kind)
thumbnail = self._og_search_thumbnail(webpage)
description = self._html_search_regex(
r'(?s)<p class="teasertext">(.*?)</p>',

View file

@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_id,
@ -11,6 +12,7 @@ from ..utils import (
strip_or_none,
unified_strdate,
urljoin,
str_to_int,
)
@ -35,6 +37,26 @@ class VidLiiIE(InfoExtractor):
'categories': ['News & Politics'],
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
# HD
'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl',
'md5': '450e7da379c884788c3a4fa02a3ce1a4',
'info_dict': {
'id': '2Ng8Abj2Fkl',
'ext': 'mp4',
'title': 'test',
'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9',
'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg',
'uploader': 'VidLii',
'uploader_url': 'https://www.vidlii.com/user/VidLii',
'upload_date': '20200927',
'duration': 5,
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Film & Animation'],
'tags': list,
},
}, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor):
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
video_url = self._search_regex(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
'video url', group='url')
formats = []
title = self._search_regex(
def add_format(format_url, height=None):
height = int(self._search_regex(r'(\d+)\.mp4',
format_url, 'height', default=360))
formats.append({
'url': format_url,
'format_id': '%dp' % height if height else None,
'height': height,
})
sources = re.findall(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1',
webpage)
formats = []
if len(sources) > 1:
add_format(sources[1][1])
self._check_formats(formats, video_id)
if len(sources) > 0:
add_format(sources[0][1])
self._sort_formats(formats)
title = self._html_search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor):
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
(r'<strong>(\d+)</strong> views',
r'Views\s*:\s*<strong>(\d+)</strong>'),
view_count = str_to_int(self._html_search_regex(
(r'<strong>([\d,.]+)</strong> views',
r'Views\s*:\s*<strong>([\d,.]+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor):
return {
'id': video_id,
'url': video_url,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,