From d44a707fdde6c0138e9e275ed5b4ffb0b8f72966 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 5 Apr 2020 20:34:57 +0700
Subject: [PATCH 001/340] [spankwire] Fix extraction (closes #18924, closes
 #20648)

---
 youtube_dl/extractor/spankwire.py | 201 +++++++++++++++++++-----------
 1 file changed, 125 insertions(+), 76 deletions(-)

diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
index 44d8fa52f..8f67463ed 100644
--- a/youtube_dl/extractor/spankwire.py
+++ b/youtube_dl/extractor/spankwire.py
@@ -3,34 +3,47 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_urllib_parse_unquote,
-    compat_urllib_parse_urlparse,
-)
 from ..utils import (
-    sanitized_Request,
+    float_or_none,
+    int_or_none,
+    merge_dicts,
+    str_or_none,
     str_to_int,
-    unified_strdate,
+    url_or_none,
 )
-from ..aes import aes_decrypt_text
 
 
 class SpankwireIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)'
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:www\.)?spankwire\.com/
+                        (?:
+                            [^/]+/video|
+                            EmbedPlayer\.aspx/?\?.*?\bArticleId=
+                        )
+                        (?P<id>\d+)
+                    '''
     _TESTS = [{
         # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4
         'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
-        'md5': '8bbfde12b101204b39e4b9fe7eb67095',
+        'md5': '5aa0e4feef20aad82cbcae3aed7ab7cd',
         'info_dict': {
             'id': '103545',
             'ext': 'mp4',
             'title': 'Buckcherry`s X Rated Music Video Crazy Bitch',
             'description': 'Crazy Bitch X rated music video.',
+            'duration': 222,
             'uploader': 'oreusz',
             'uploader_id': '124697',
-            'upload_date': '20070507',
+            'timestamp': 1178587885,
+            'upload_date': '20070508',
+            'average_rating': float,
+            'view_count': int,
+            'comment_count': int,
             'age_limit': 18,
-        }
+            'categories': list,
+            'tags': list,
+        },
     }, {
         # download URL pattern: */mp4_<format_id>_<video_id>.mp4
         'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/',
@@ -45,83 +58,119 @@ class SpankwireIE(InfoExtractor):
             'upload_date': '20150822',
             'age_limit': 18,
         },
+        'params': {
+            'proxy': '127.0.0.1:8118'
+        },
+        'skip': 'removed',
+    }, {
+        'url': 'https://www.spankwire.com/EmbedPlayer.aspx/?ArticleId=156156&autostart=true',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
+        video_id = self._match_id(url)
 
-        req = sanitized_Request('http://www.' + mobj.group('url'))
-        req.add_header('Cookie', 'age_verified=1')
-        webpage = self._download_webpage(req, video_id)
+        video = self._download_json(
+            'https://www.spankwire.com/api/video/%s.json' % video_id, video_id)
 
-        title = self._html_search_regex(
-            r'<h1>([^<]+)', webpage, 'title')
-        description = self._html_search_regex(
-            r'(?s)<div\s+id="descriptionContent">(.+?)</div>',
-            webpage, 'description', fatal=False)
-        thumbnail = self._html_search_regex(
-            r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
-            webpage, 'thumbnail', fatal=False)
-
-        uploader = self._html_search_regex(
-            r'by:\s*<a [^>]*>(.+?)</a>',
-            webpage, 'uploader', fatal=False)
-        uploader_id = self._html_search_regex(
-            r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"',
-            webpage, 'uploader id', fatal=False)
-        upload_date = unified_strdate(self._html_search_regex(
-            r'</a> on (.+?) at \d+:\d+',
-            webpage, 'upload date', fatal=False))
-
-        view_count = str_to_int(self._html_search_regex(
-            r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
-            webpage, 'view count', fatal=False))
-        comment_count = str_to_int(self._html_search_regex(
-            r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>',
-            webpage, 'comment count', fatal=False))
-
-        videos = re.findall(
-            r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)
-        heights = [int(video[0]) for video in videos]
-        video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos]))
-        if webpage.find(r'flashvars\.encrypted = "true"') != -1:
-            password = self._search_regex(
-                r'flashvars\.video_title = "([^"]+)',
-                webpage, 'password').replace('+', ' ')
-            video_urls = list(map(
-                lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
-                video_urls))
+        title = video['title']
 
         formats = []
-        for height, video_url in zip(heights, video_urls):
-            path = compat_urllib_parse_urlparse(video_url).path
-            m = re.search(r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', path)
-            if m:
-                tbr = int(m.group('tbr'))
-                height = int(m.group('height'))
-            else:
-                tbr = None
-            formats.append({
-                'url': video_url,
-                'format_id': '%dp' % height,
-                'height': height,
-                'tbr': tbr,
+        videos = video.get('videos')
+        if isinstance(videos, dict):
+            for format_id, format_url in videos.items():
+                video_url = url_or_none(format_url)
+                if not format_url:
+                    continue
+                height = int_or_none(self._search_regex(
+                    r'(\d+)[pP]', format_id, 'height', default=None))
+                m = re.search(
+                    r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', video_url)
+                if m:
+                    tbr = int(m.group('tbr'))
+                    height = height or int(m.group('height'))
+                else:
+                    tbr = None
+                formats.append({
+                    'url': video_url,
+                    'format_id': '%dp' % height if height else format_id,
+                    'height': height,
+                    'tbr': tbr,
+                })
+        m3u8_url = url_or_none(video.get('HLS'))
+        if m3u8_url:
+            formats.extend(self._extract_m3u8_formats(
+                m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                m3u8_id='hls', fatal=False))
+        self._sort_formats(formats, ('height', 'tbr', 'width', 'format_id'))
+
+        view_count = str_to_int(video.get('viewed'))
+
+        thumbnails = []
+        for preference, t in enumerate(('', '2x'), start=0):
+            thumbnail_url = url_or_none(video.get('poster%s' % t))
+            if not thumbnail_url:
+                continue
+            thumbnails.append({
+                'url': thumbnail_url,
+                'preference': preference,
             })
-        self._sort_formats(formats)
 
-        age_limit = self._rta_search(webpage)
+        def extract_names(key):
+            entries_list = video.get(key)
+            if not isinstance(entries_list, list):
+                return
+            entries = []
+            for entry in entries_list:
+                name = str_or_none(entry.get('name'))
+                if name:
+                    entries.append(name)
+            return entries
 
-        return {
+        categories = extract_names('categories')
+        tags = extract_names('tags')
+
+        uploader = None
+        info = {}
+
+        webpage = self._download_webpage(
+            'https://www.spankwire.com/_/video%s/' % video_id, video_id,
+            fatal=False)
+        if webpage:
+            info = self._search_json_ld(webpage, video_id, default={})
+            thumbnail_url = None
+            if 'thumbnail' in info:
+                thumbnail_url = url_or_none(info['thumbnail'])
+                del info['thumbnail']
+            if not thumbnail_url:
+                thumbnail_url = self._og_search_thumbnail(webpage)
+            if thumbnail_url:
+                thumbnails.append({
+                    'url': thumbnail_url,
+                    'preference': 10,
+                })
+            uploader = self._html_search_regex(
+                r'(?s)by\s*<a[^>]+\bclass=["\']uploaded__by[^>]*>(.+?)</a>',
+                webpage, 'uploader', fatal=False)
+            if not view_count:
+                view_count = str_to_int(self._search_regex(
+                    r'data-views=["\']([\d,.]+)', webpage, 'view count',
+                    fatal=False))
+
+        return merge_dicts({
             'id': video_id,
             'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
+            'description': video.get('description'),
+            'duration': int_or_none(video.get('duration')),
+            'thumbnails': thumbnails,
             'uploader': uploader,
-            'uploader_id': uploader_id,
-            'upload_date': upload_date,
+            'uploader_id': str_or_none(video.get('userId')),
+            'timestamp': int_or_none(video.get('time_approved_on')),
+            'average_rating': float_or_none(video.get('rating')),
             'view_count': view_count,
-            'comment_count': comment_count,
+            'comment_count': int_or_none(video.get('comments')),
+            'age_limit': 18,
+            'categories': categories,
+            'tags': tags,
             'formats': formats,
-            'age_limit': age_limit,
-        }
+        }, info)

From 8fae1a04eb20279a76d6b1eccdb8249718ad9942 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 5 Apr 2020 20:42:10 +0700
Subject: [PATCH 002/340] [spankwire] Add support for generic embeds (refs
 #24633)

---
 youtube_dl/extractor/generic.py   | 6 ++++++
 youtube_dl/extractor/spankwire.py | 6 ++++++
 2 files changed, 12 insertions(+)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index a495ee15a..63b52306a 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -60,6 +60,7 @@ from .tnaflix import TNAFlixNetworkEmbedIE
 from .drtuber import DrTuberIE
 from .redtube import RedTubeIE
 from .tube8 import Tube8IE
+from .spankwire import SpankwireIE
 from .vimeo import VimeoIE
 from .dailymotion import DailymotionIE
 from .dailymail import DailyMailIE
@@ -2715,6 +2716,11 @@ class GenericIE(InfoExtractor):
         if tube8_urls:
             return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
 
+        # Look for embedded Spankwire player
+        spankwire_urls = SpankwireIE._extract_urls(webpage)
+        if spankwire_urls:
+            return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
+
         # Look for embedded Tvigle player
         mobj = re.search(
             r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
index 8f67463ed..35ab9ec37 100644
--- a/youtube_dl/extractor/spankwire.py
+++ b/youtube_dl/extractor/spankwire.py
@@ -67,6 +67,12 @@ class SpankwireIE(InfoExtractor):
         'only_matching': True,
     }]
 
+    @staticmethod
+    def _extract_urls(webpage):
+        return re.findall(
+            r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?spankwire\.com/EmbedPlayer\.aspx/?\?.*?\bArticleId=\d+)',
+            webpage)
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 

From 52c4c51556df15f98c9cda911e36995fe0fc0a47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 5 Apr 2020 20:56:14 +0700
Subject: [PATCH 003/340] [youporn] Add support form generic embeds

---
 youtube_dl/extractor/generic.py |  6 ++++++
 youtube_dl/extractor/youporn.py | 23 +++++++++++++++++------
 2 files changed, 23 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 63b52306a..0ada6354e 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -61,6 +61,7 @@ from .drtuber import DrTuberIE
 from .redtube import RedTubeIE
 from .tube8 import Tube8IE
 from .spankwire import SpankwireIE
+from .youporn import YouPornIE
 from .vimeo import VimeoIE
 from .dailymotion import DailymotionIE
 from .dailymail import DailyMailIE
@@ -2721,6 +2722,11 @@ class GenericIE(InfoExtractor):
         if spankwire_urls:
             return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
 
+        # Look for embedded YouPorn player
+        youporn_urls = YouPornIE._extract_urls(webpage)
+        if youporn_urls:
+            return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
+
         # Look for embedded Tvigle player
         mobj = re.search(
             r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py
index d4eccb4b2..e7fca22de 100644
--- a/youtube_dl/extractor/youporn.py
+++ b/youtube_dl/extractor/youporn.py
@@ -5,7 +5,6 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     int_or_none,
-    sanitized_Request,
     str_to_int,
     unescapeHTML,
     unified_strdate,
@@ -15,7 +14,7 @@ from ..aes import aes_decrypt_text
 
 
 class YouPornIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
+    _VALID_URL = r'https?://(?:www\.)?youporn\.com/(?:watch|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?'
     _TESTS = [{
         'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
         'md5': '3744d24c50438cf5b6f6d59feb5055c2',
@@ -57,16 +56,28 @@ class YouPornIE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
+    }, {
+        'url': 'https://www.youporn.com/embed/505835/sex-ed-is-it-safe-to-masturbate-daily/',
+        'only_matching': True,
+    }, {
+        'url': 'http://www.youporn.com/watch/505835',
+        'only_matching': True,
     }]
 
+    @staticmethod
+    def _extract_urls(webpage):
+        return re.findall(
+            r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?youporn\.com/embed/\d+)',
+            webpage)
+
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('id')
-        display_id = mobj.group('display_id')
+        display_id = mobj.group('display_id') or video_id
 
-        request = sanitized_Request(url)
-        request.add_header('Cookie', 'age_verified=1')
-        webpage = self._download_webpage(request, display_id)
+        webpage = self._download_webpage(
+            'http://www.youporn.com/watch/%s' % video_id, display_id,
+            headers={'Cookie': 'age_verified=1'})
 
         title = self._html_search_regex(
             r'(?s)<div[^>]+class=["\']watchVideoTitle[^>]+>(.+?)</div>',

From 4e7b5bba5fb73502476c61e4931284c9c3d3d232 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 5 Apr 2020 21:27:36 +0700
Subject: [PATCH 004/340] [mofosex] Add support for generic embeds (closes
 #24633)

---
 youtube_dl/extractor/extractors.py |  5 ++++-
 youtube_dl/extractor/generic.py    |  6 ++++++
 youtube_dl/extractor/mofosex.py    | 23 +++++++++++++++++++++++
 3 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index ef803b8a7..e407ab3d9 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -636,7 +636,10 @@ from .mixcloud import (
 from .mlb import MLBIE
 from .mnet import MnetIE
 from .moevideo import MoeVideoIE
-from .mofosex import MofosexIE
+from .mofosex import (
+    MofosexIE,
+    MofosexEmbedIE,
+)
 from .mojvideo import MojvideoIE
 from .morningstar import MorningstarIE
 from .motherless import (
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 0ada6354e..ce8252f6a 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -60,6 +60,7 @@ from .tnaflix import TNAFlixNetworkEmbedIE
 from .drtuber import DrTuberIE
 from .redtube import RedTubeIE
 from .tube8 import Tube8IE
+from .mofosex import MofosexEmbedIE
 from .spankwire import SpankwireIE
 from .youporn import YouPornIE
 from .vimeo import VimeoIE
@@ -2717,6 +2718,11 @@ class GenericIE(InfoExtractor):
         if tube8_urls:
             return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
 
+        # Look for embedded Mofosex player
+        mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
+        if mofosex_urls:
+            return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
+
         # Look for embedded Spankwire player
         spankwire_urls = SpankwireIE._extract_urls(webpage)
         if spankwire_urls:
diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py
index 1c652813a..5234cac02 100644
--- a/youtube_dl/extractor/mofosex.py
+++ b/youtube_dl/extractor/mofosex.py
@@ -1,5 +1,8 @@
 from __future__ import unicode_literals
 
+import re
+
+from .common import InfoExtractor
 from ..utils import (
     int_or_none,
     str_to_int,
@@ -54,3 +57,23 @@ class MofosexIE(KeezMoviesIE):
         })
 
         return info
+
+
+class MofosexEmbedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://www.mofosex.com/embed/?videoid=318131&referrer=KM',
+        'only_matching': True,
+    }]
+
+    @staticmethod
+    def _extract_urls(webpage):
+        return re.findall(
+            r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?mofosex\.com/embed/?\?.*?\bvideoid=\d+)',
+            webpage)
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self.url_result(
+            'http://www.mofosex.com/videos/{0}/{0}.html'.format(video_id),
+            ie=MofosexIE.ie_key(), video_id=video_id)

From 6a6e1a0cd8bacf5a23f731eedaa1783503470227 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 6 Apr 2020 02:05:06 +0700
Subject: [PATCH 005/340] [tele5] Fix extraction (closes #24553)

---
 youtube_dl/extractor/tele5.py | 61 ++++++++++++++++++++++++++++++-----
 1 file changed, 53 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/tele5.py b/youtube_dl/extractor/tele5.py
index 33a72083b..364556a1f 100644
--- a/youtube_dl/extractor/tele5.py
+++ b/youtube_dl/extractor/tele5.py
@@ -1,9 +1,19 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
+from .jwplatform import JWPlatformIE
 from .nexx import NexxIE
-from ..compat import compat_urlparse
+from ..compat import (
+    compat_str,
+    compat_urlparse,
+)
+from ..utils import (
+    NO_DEFAULT,
+    try_get,
+)
 
 
 class Tele5IE(InfoExtractor):
@@ -44,14 +54,49 @@ class Tele5IE(InfoExtractor):
         qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
         video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]
 
-        if not video_id:
+        NEXX_ID_RE = r'\d{6,}'
+        JWPLATFORM_ID_RE = r'[a-zA-Z0-9]{8}'
+
+        def nexx_result(nexx_id):
+            return self.url_result(
+                'https://api.nexx.cloud/v3/759/videos/byid/%s' % nexx_id,
+                ie=NexxIE.ie_key(), video_id=nexx_id)
+
+        nexx_id = jwplatform_id = None
+
+        if video_id:
+            if re.match(NEXX_ID_RE, video_id):
+                return nexx_result(video_id)
+            elif re.match(JWPLATFORM_ID_RE, video_id):
+                jwplatform_id = video_id
+
+        if not nexx_id:
             display_id = self._match_id(url)
             webpage = self._download_webpage(url, display_id)
-            video_id = self._html_search_regex(
-                (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)',
-                 r'\s+id\s*=\s*["\']player_(\d{6,})',
-                 r'\bdata-id\s*=\s*["\'](\d{6,})'), webpage, 'video id')
+
+            def extract_id(pattern, name, default=NO_DEFAULT):
+                return self._html_search_regex(
+                    (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](%s)' % pattern,
+                     r'\s+id\s*=\s*["\']player_(%s)' % pattern,
+                     r'\bdata-id\s*=\s*["\'](%s)' % pattern), webpage, name,
+                    default=default)
+
+            nexx_id = extract_id(NEXX_ID_RE, 'nexx id', default=None)
+            if nexx_id:
+                return nexx_result(nexx_id)
+
+            if not jwplatform_id:
+                jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')
+
+            media = self._download_json(
+                'https://cdn.jwplayer.com/v2/media/' + jwplatform_id,
+                display_id)
+            nexx_id = try_get(
+                media, lambda x: x['playlist'][0]['nexx_id'], compat_str)
+
+            if nexx_id:
+                return nexx_result(nexx_id)
 
         return self.url_result(
-            'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id,
-            ie=NexxIE.ie_key(), video_id=video_id)
+            'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
+            video_id=jwplatform_id)

From 13b08034b53efdcf7055df92199a0f35cf1e172e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 7 Apr 2020 22:54:34 +0700
Subject: [PATCH 006/340] [extractor/common] Skip malformed ISM manifest XMLs
 while extracting ISM formats (#24667)

---
 youtube_dl/extractor/common.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index eaae5e484..c51a3a07d 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2340,6 +2340,8 @@ class InfoExtractor(object):
         if res is False:
             return []
         ism_doc, urlh = res
+        if ism_doc is None:
+            return []
 
         return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
 

From 91bd3bd0194119fccc91b7eafb7afdcda646ad57 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 7 Apr 2020 22:55:36 +0700
Subject: [PATCH 007/340] [tv4] Fix ISM formats extraction (closes #24667)

---
 youtube_dl/extractor/tv4.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/tv4.py b/youtube_dl/extractor/tv4.py
index a819d048c..c498b0191 100644
--- a/youtube_dl/extractor/tv4.py
+++ b/youtube_dl/extractor/tv4.py
@@ -99,7 +99,7 @@ class TV4IE(InfoExtractor):
             manifest_url.replace('.m3u8', '.f4m'),
             video_id, f4m_id='hds', fatal=False))
         formats.extend(self._extract_ism_formats(
-            re.sub(r'\.ism/.+?\.m3u8', r'.ism/Manifest', manifest_url),
+            re.sub(r'\.ism/.*?\.m3u8', r'.ism/Manifest', manifest_url),
             video_id, ism_id='mss', fatal=False))
 
         if not formats and info.get('is_geo_restricted'):

From c9595ee78027ecf6bedbdc33c690228fa7d3a5bb Mon Sep 17 00:00:00 2001
From: Felix Stupp <felix.stupp@outlook.com>
Date: Tue, 7 Apr 2020 16:21:25 +0000
Subject: [PATCH 008/340] [twitch:clips] Extend _VALID_URL (closes #24290)
 (#24642)

---
 youtube_dl/extractor/twitch.py | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 0db2dca41..78ee0115c 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -643,7 +643,14 @@ class TwitchStreamIE(TwitchBaseIE):
 
 class TwitchClipsIE(TwitchBaseIE):
     IE_NAME = 'twitch:clips'
-    _VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:embed\?.*?\bclip=|(?:[^/]+/)*)|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)'
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            clips\.twitch\.tv/(?:embed\?.*?\bclip=|(?:[^/]+/)*)|
+                            (?:(?:www|go|m)\.)?twitch\.tv/[^/]+/clip/
+                        )
+                        (?P<id>[^/?#&]+)
+                    '''
 
     _TESTS = [{
         'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
@@ -669,6 +676,12 @@ class TwitchClipsIE(TwitchBaseIE):
     }, {
         'url': 'https://clips.twitch.tv/embed?clip=InquisitiveBreakableYogurtJebaited',
         'only_matching': True,
+    }, {
+        'url': 'https://m.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+        'only_matching': True,
+    }, {
+        'url': 'https://go.twitch.tv/rossbroadcast/clip/ConfidentBraveHumanChefFrank',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From dcc8522fdba4c9286ebc0548caf05b425bc68773 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 9 Apr 2020 02:11:19 +0700
Subject: [PATCH 009/340] [motherless] Fix extraction (closes #24699)

---
 youtube_dl/extractor/motherless.py | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py
index 43fd70f11..b1615b4d8 100644
--- a/youtube_dl/extractor/motherless.py
+++ b/youtube_dl/extractor/motherless.py
@@ -26,7 +26,7 @@ class MotherlessIE(InfoExtractor):
             'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
             'upload_date': '20100913',
             'uploader_id': 'famouslyfuckedup',
-            'thumbnail': r're:http://.*\.jpg',
+            'thumbnail': r're:https?://.*\.jpg',
             'age_limit': 18,
         }
     }, {
@@ -40,7 +40,7 @@ class MotherlessIE(InfoExtractor):
                            'game', 'hairy'],
             'upload_date': '20140622',
             'uploader_id': 'Sulivana7x',
-            'thumbnail': r're:http://.*\.jpg',
+            'thumbnail': r're:https?://.*\.jpg',
             'age_limit': 18,
         },
         'skip': '404',
@@ -54,7 +54,7 @@ class MotherlessIE(InfoExtractor):
             'categories': ['superheroine heroine  superher'],
             'upload_date': '20140827',
             'uploader_id': 'shade0230',
-            'thumbnail': r're:http://.*\.jpg',
+            'thumbnail': r're:https?://.*\.jpg',
             'age_limit': 18,
         }
     }, {
@@ -76,7 +76,8 @@ class MotherlessIE(InfoExtractor):
             raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
 
         title = self._html_search_regex(
-            r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+            (r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>',
+             r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
         video_url = (self._html_search_regex(
             (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
              r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
@@ -84,14 +85,15 @@ class MotherlessIE(InfoExtractor):
             or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
         age_limit = self._rta_search(webpage)
         view_count = str_to_int(self._html_search_regex(
-            r'<strong>Views</strong>\s+([^<]+)<',
+            (r'>(\d+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
             webpage, 'view count', fatal=False))
         like_count = str_to_int(self._html_search_regex(
-            r'<strong>Favorited</strong>\s+([^<]+)<',
+            (r'>(\d+)\s+Favorites<', r'<strong>Favorited</strong>\s+([^<]+)<'),
             webpage, 'like count', fatal=False))
 
         upload_date = self._html_search_regex(
-            r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload date')
+            (r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<',
+             r'<strong>Uploaded</strong>\s+([^<]+)<'), webpage, 'upload date')
         if 'Ago' in upload_date:
             days = int(re.search(r'([0-9]+)', upload_date).group(1))
             upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')

From 5caf88ccb4bfe3d1b53885b78b2bc509ba333f15 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 9 Apr 2020 03:52:29 +0700
Subject: [PATCH 010/340] [nova:embed] Fix extraction (closes #24700)

---
 youtube_dl/extractor/nova.py | 106 +++++++++++++++++++++++------------
 1 file changed, 71 insertions(+), 35 deletions(-)

diff --git a/youtube_dl/extractor/nova.py b/youtube_dl/extractor/nova.py
index 2850af5db..47b9748f0 100644
--- a/youtube_dl/extractor/nova.py
+++ b/youtube_dl/extractor/nova.py
@@ -6,6 +6,7 @@ import re
 from .common import InfoExtractor
 from ..utils import (
     clean_html,
+    determine_ext,
     int_or_none,
     js_to_json,
     qualities,
@@ -33,42 +34,76 @@ class NovaEmbedIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
-        bitrates = self._parse_json(
-            self._search_regex(
-                r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'),
-            video_id, transform_source=js_to_json)
-
-        QUALITIES = ('lq', 'mq', 'hq', 'hd')
-        quality_key = qualities(QUALITIES)
-
+        duration = None
         formats = []
-        for format_id, format_list in bitrates.items():
-            if not isinstance(format_list, list):
-                format_list = [format_list]
-            for format_url in format_list:
-                format_url = url_or_none(format_url)
-                if not format_url:
-                    continue
-                if format_id == 'hls':
-                    formats.extend(self._extract_m3u8_formats(
-                        format_url, video_id, ext='mp4',
-                        entry_protocol='m3u8_native', m3u8_id='hls',
-                        fatal=False))
-                    continue
-                f = {
-                    'url': format_url,
-                }
-                f_id = format_id
-                for quality in QUALITIES:
-                    if '%s.mp4' % quality in format_url:
-                        f_id += '-%s' % quality
-                        f.update({
-                            'quality': quality_key(quality),
-                            'format_note': quality.upper(),
+
+        player = self._parse_json(
+            self._search_regex(
+                r'Player\.init\s*\([^,]+,\s*({.+?})\s*,\s*{.+?}\s*\)\s*;',
+                webpage, 'player', default='{}'), video_id, fatal=False)
+        if player:
+            for format_id, format_list in player['tracks'].items():
+                if not isinstance(format_list, list):
+                    format_list = [format_list]
+                for format_dict in format_list:
+                    if not isinstance(format_dict, dict):
+                        continue
+                    format_url = url_or_none(format_dict.get('src'))
+                    format_type = format_dict.get('type')
+                    ext = determine_ext(format_url)
+                    if (format_type == 'application/x-mpegURL'
+                            or format_id == 'HLS' or ext == 'm3u8'):
+                        formats.extend(self._extract_m3u8_formats(
+                            format_url, video_id, 'mp4',
+                            entry_protocol='m3u8_native', m3u8_id='hls',
+                            fatal=False))
+                    elif (format_type == 'application/dash+xml'
+                          or format_id == 'DASH' or ext == 'mpd'):
+                        formats.extend(self._extract_mpd_formats(
+                            format_url, video_id, mpd_id='dash', fatal=False))
+                    else:
+                        formats.append({
+                            'url': format_url,
                         })
-                        break
-                f['format_id'] = f_id
-                formats.append(f)
+            duration = int_or_none(player.get('duration'))
+        else:
+            # Old path, not actual as of 08.04.2020
+            bitrates = self._parse_json(
+                self._search_regex(
+                    r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'),
+                video_id, transform_source=js_to_json)
+
+            QUALITIES = ('lq', 'mq', 'hq', 'hd')
+            quality_key = qualities(QUALITIES)
+
+            for format_id, format_list in bitrates.items():
+                if not isinstance(format_list, list):
+                    format_list = [format_list]
+                for format_url in format_list:
+                    format_url = url_or_none(format_url)
+                    if not format_url:
+                        continue
+                    if format_id == 'hls':
+                        formats.extend(self._extract_m3u8_formats(
+                            format_url, video_id, ext='mp4',
+                            entry_protocol='m3u8_native', m3u8_id='hls',
+                            fatal=False))
+                        continue
+                    f = {
+                        'url': format_url,
+                    }
+                    f_id = format_id
+                    for quality in QUALITIES:
+                        if '%s.mp4' % quality in format_url:
+                            f_id += '-%s' % quality
+                            f.update({
+                                'quality': quality_key(quality),
+                                'format_note': quality.upper(),
+                            })
+                            break
+                    f['format_id'] = f_id
+                    formats.append(f)
+
         self._sort_formats(formats)
 
         title = self._og_search_title(
@@ -81,7 +116,8 @@ class NovaEmbedIE(InfoExtractor):
             r'poster\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
             'thumbnail', fatal=False, group='value')
         duration = int_or_none(self._search_regex(
-            r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
+            r'videoDuration\s*:\s*(\d+)', webpage, 'duration',
+            default=duration))
 
         return {
             'id': video_id,

From 6b09401b0ba95da5669d249c8930b3adb873d96e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 9 Apr 2020 22:42:43 +0700
Subject: [PATCH 011/340] [youtube] Skip broken multifeed videos (closes
 #24711)

---
 youtube_dl/extractor/youtube.py | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 908defecd..633b839e0 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1840,15 +1840,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                         # fields may contain comma as well (see
                         # https://github.com/ytdl-org/youtube-dl/issues/8536)
                         feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
+
+                        def feed_entry(name):
+                            return try_get(feed_data, lambda x: x[name][0], compat_str)
+
+                        feed_id = feed_entry('id')
+                        if not feed_id:
+                            continue
+                        feed_title = feed_entry('title')
+                        title = video_title
+                        if feed_title:
+                            title += ' (%s)' % feed_title
                         entries.append({
                             '_type': 'url_transparent',
                             'ie_key': 'Youtube',
                             'url': smuggle_url(
                                 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
                                 {'force_singlefeed': True}),
-                            'title': '%s (%s)' % (video_title, feed_data['title'][0]),
+                            'title': title,
                         })
-                        feed_ids.append(feed_data['id'][0])
+                        feed_ids.append(feed_id)
                     self.to_screen(
                         'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
                         % (', '.join(feed_ids), video_id))

From b9e5f872916a7d753ae237459b10622c1c2c3471 Mon Sep 17 00:00:00 2001
From: tom <tomster954@gmail.com>
Date: Thu, 9 Apr 2020 21:50:45 +1000
Subject: [PATCH 012/340] [soundcloud] Extract AAC format

---
 youtube_dl/extractor/soundcloud.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index ff6be0b54..02d56184d 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -350,6 +350,8 @@ class SoundcloudIE(InfoExtractor):
             format_id_list = []
             if protocol:
                 format_id_list.append(protocol)
+            if f.get('ext') == 'aac':
+                f['abr'] = '256'
             for k in ('ext', 'abr'):
                 v = f.get(k)
                 if v:

From 75294a5ed03f4443970478f3f4eac572239cec45 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 10 Apr 2020 17:24:21 +0700
Subject: [PATCH 013/340] [soundcloud] Improve AAC format extraction (closes
 #19173, closes #24708)

---
 youtube_dl/extractor/soundcloud.py | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 02d56184d..422ce1626 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -246,7 +246,12 @@ class SoundcloudIE(InfoExtractor):
                 'comment_count': int,
                 'repost_count': int,
             },
-        }
+        },
+        {
+            # with AAC HQ format available via OAuth token
+            'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
+            'only_matching': True,
+        },
     ]
 
     _API_V2_BASE = 'https://api-v2.soundcloud.com/'
@@ -350,7 +355,8 @@ class SoundcloudIE(InfoExtractor):
             format_id_list = []
             if protocol:
                 format_id_list.append(protocol)
-            if f.get('ext') == 'aac':
+            ext = f.get('ext')
+            if ext == 'aac':
                 f['abr'] = '256'
             for k in ('ext', 'abr'):
                 v = f.get(k)
@@ -362,9 +368,13 @@ class SoundcloudIE(InfoExtractor):
             abr = f.get('abr')
             if abr:
                 f['abr'] = int(abr)
+            if protocol == 'hls':
+                protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
+            else:
+                protocol = 'http'
             f.update({
                 'format_id': '_'.join(format_id_list),
-                'protocol': 'm3u8_native' if protocol == 'hls' else 'http',
+                'protocol': protocol,
                 'preference': -10 if preview else None,
             })
             formats.append(f)

From 533f3e3557af85e28afd72d291cb51a769c7dd7a Mon Sep 17 00:00:00 2001
From: AndrewMBL <62922222+AndrewMBL@users.noreply.github.com>
Date: Tue, 31 Mar 2020 15:25:04 +1100
Subject: [PATCH 014/340] [thisoldhouse] Fix video id extraction (closes
 #24548)

Added support for:
with of without "www."
and either  ".chorus.build" or ".com"

It now validated correctly on older URL's
```
<iframe src="https://thisoldhouse.chorus.build/videos/zype/5e33baec27d2e50001d5f52f
```
and newer ones
```
<iframe src="https://www.thisoldhouse.com/videos/zype/5e2b70e95216cc0001615120
```
---
 youtube_dl/extractor/thisoldhouse.py | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/thisoldhouse.py b/youtube_dl/extractor/thisoldhouse.py
index 387f955ee..33269705f 100644
--- a/youtube_dl/extractor/thisoldhouse.py
+++ b/youtube_dl/extractor/thisoldhouse.py
@@ -19,6 +19,20 @@ class ThisOldHouseIE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
+    }, {
+        'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
+        'note': 'test for updated video URL',
+        'info_dict': {
+            'id': '5e2b70e95216cc0001615120',
+            'ext': 'mp4',
+            'title': 'E12 | The Westerly Project | Seaside Transformation',
+            'description': 'Kevin and Tommy take the tour with the homeowners and Jeff. Norm presents his pine coffee table. Jenn gives Tommy the garden tour. Everyone meets at the flagpole to raise the flags.',
+            'timestamp': 1579755600,
+            'upload_date': '20200123',
+        },
+        'params': {
+            'skip_download': True,
+        },
     }, {
         'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
         'only_matching': True,
@@ -38,6 +52,6 @@ class ThisOldHouseIE(InfoExtractor):
         display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
         video_id = self._search_regex(
-            r'<iframe[^>]+src=[\'"](?:https?:)?//thisoldhouse\.chorus\.build/videos/zype/([0-9a-f]{24})',
+            r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.|)thisoldhouse(?:\.chorus\.build|\.com)/videos/zype/([0-9a-f]{24})',
             webpage, 'video id')
         return self.url_result(self._ZYPE_TMPL % video_id, 'Zype', video_id)

From 2f1983572659415354c88743130a303af8188caf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 11 Apr 2020 20:07:12 +0700
Subject: [PATCH 015/340] [thisoldhouse] Improve video id extraction (closes
 #24549)

---
 youtube_dl/extractor/thisoldhouse.py | 20 +++++---------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/youtube_dl/extractor/thisoldhouse.py b/youtube_dl/extractor/thisoldhouse.py
index 33269705f..a3d9b4017 100644
--- a/youtube_dl/extractor/thisoldhouse.py
+++ b/youtube_dl/extractor/thisoldhouse.py
@@ -19,20 +19,6 @@ class ThisOldHouseIE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
-    }, {
-        'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
-        'note': 'test for updated video URL',
-        'info_dict': {
-            'id': '5e2b70e95216cc0001615120',
-            'ext': 'mp4',
-            'title': 'E12 | The Westerly Project | Seaside Transformation',
-            'description': 'Kevin and Tommy take the tour with the homeowners and Jeff. Norm presents his pine coffee table. Jenn gives Tommy the garden tour. Everyone meets at the flagpole to raise the flags.',
-            'timestamp': 1579755600,
-            'upload_date': '20200123',
-        },
-        'params': {
-            'skip_download': True,
-        },
     }, {
         'url': 'https://www.thisoldhouse.com/watch/arlington-arts-crafts-arts-and-crafts-class-begins',
         'only_matching': True,
@@ -45,6 +31,10 @@ class ThisOldHouseIE(InfoExtractor):
     }, {
         'url': 'https://www.thisoldhouse.com/21113884/s41-e13-paradise-lost',
         'only_matching': True,
+    }, {
+        # iframe www.thisoldhouse.com
+        'url': 'https://www.thisoldhouse.com/21083431/seaside-transformation-the-westerly-project',
+        'only_matching': True,
     }]
     _ZYPE_TMPL = 'https://player.zype.com/embed/%s.html?api_key=hsOk_yMSPYNrT22e9pu8hihLXjaZf0JW5jsOWv4ZqyHJFvkJn6rtToHl09tbbsbe'
 
@@ -52,6 +42,6 @@ class ThisOldHouseIE(InfoExtractor):
         display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
         video_id = self._search_regex(
-            r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.|)thisoldhouse(?:\.chorus\.build|\.com)/videos/zype/([0-9a-f]{24})',
+            r'<iframe[^>]+src=[\'"](?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})',
             webpage, 'video id')
         return self.url_result(self._ZYPE_TMPL % video_id, 'Zype', video_id)

From 00eb865b3c8002f47e73706b54f58feaee0b0ac2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 11 Apr 2020 23:05:08 +0700
Subject: [PATCH 016/340] [youtube] Fix DRM videos detection (refs #24736)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 633b839e0..afaa12b1b 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1930,7 +1930,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 }
 
             for fmt in streaming_formats:
-                if fmt.get('drm_families'):
+                if fmt.get('drmFamilies') or fmt.get('drm_families'):
                     continue
                 url = url_or_none(fmt.get('url'))
 

From 46d0baf9412eca5c9241fa55ddc133682e2f2ad7 Mon Sep 17 00:00:00 2001
From: willbeaufoy <will@willbeaufoy.net>
Date: Thu, 23 Apr 2020 20:31:38 +0100
Subject: [PATCH 017/340] [options] Clarify doc on --exec command (closes
 #19087) (#24883)

---
 youtube_dl/options.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 8826b382c..6d5ac62b3 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -853,7 +853,7 @@ def parseOpts(overrideArguments=None):
     postproc.add_option(
         '--exec',
         metavar='CMD', dest='exec_cmd',
-        help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
+        help='Execute a command on the file after downloading and post-processing, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
     postproc.add_option(
         '--convert-subs', '--convert-subtitles',
         metavar='FORMAT', dest='convertsubtitles', default=None,

From 2cdfe977d7e006ae93c427868ec1c5701d49cf1a Mon Sep 17 00:00:00 2001
From: Philipp Stehle <anderschwiedu@googlemail.com>
Date: Thu, 23 Apr 2020 21:44:13 +0200
Subject: [PATCH 018/340] [prosiebensat1] Improve extraction and remove 7tv.de
 support (#24948)

---
 youtube_dl/extractor/prosiebensat1.py | 29 +++++++++------------------
 1 file changed, 9 insertions(+), 20 deletions(-)

diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index 1bc4f9b6b..74074606e 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -175,7 +175,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
                         (?:
                             (?:beta\.)?
                             (?:
-                                prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
+                                prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|advopedia
                             )\.(?:de|at|ch)|
                             ran\.de|fem\.com|advopedia\.de|galileo\.tv/video
                         )
@@ -193,7 +193,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
             'info_dict': {
                 'id': '2104602',
                 'ext': 'mp4',
-                'title': 'Episode 18 - Staffel 2',
+                'title': 'CIRCUS HALLIGALLI - Episode 18 - Staffel 2',
                 'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
                 'upload_date': '20131231',
                 'duration': 5845.04,
@@ -300,7 +300,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
             'info_dict': {
                 'id': '2572814',
                 'ext': 'mp4',
-                'title': 'Andreas Kümmert: Rocket Man',
+                'title': 'The Voice of Germany - Andreas Kümmert: Rocket Man',
                 'description': 'md5:6ddb02b0781c6adf778afea606652e38',
                 'upload_date': '20131017',
                 'duration': 469.88,
@@ -310,7 +310,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
             },
         },
         {
-            'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
+            'url': 'http://www.fem.com/videos/beauty-lifestyle/kurztrips-zum-valentinstag',
             'info_dict': {
                 'id': '2156342',
                 'ext': 'mp4',
@@ -332,19 +332,6 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
             'playlist_count': 2,
             'skip': 'This video is unavailable',
         },
-        {
-            'url': 'http://www.7tv.de/circus-halligalli/615-best-of-circus-halligalli-ganze-folge',
-            'info_dict': {
-                'id': '4187506',
-                'ext': 'mp4',
-                'title': 'Best of Circus HalliGalli',
-                'description': 'md5:8849752efd90b9772c9db6fdf87fb9e9',
-                'upload_date': '20151229',
-            },
-            'params': {
-                'skip_download': True,
-            },
-        },
         {
             # title in <h2 class="subtitle">
             'url': 'http://www.prosieben.de/stars/oscar-award/videos/jetzt-erst-enthuellt-das-geheimnis-von-emma-stones-oscar-robe-clip',
@@ -421,7 +408,6 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
         r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
     ]
     _UPLOAD_DATE_REGEXES = [
-        r'<meta property="og:published_time" content="(.+?)">',
         r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
         r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
         r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
@@ -451,8 +437,11 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
         if description is None:
             description = self._og_search_description(webpage)
         thumbnail = self._og_search_thumbnail(webpage)
-        upload_date = unified_strdate(self._html_search_regex(
-            self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
+        upload_date = unified_strdate(
+            self._html_search_meta('og:published_time', webpage,
+                                   'upload date', default=None)
+            or self._html_search_regex(self._UPLOAD_DATE_REGEXES,
+                                       webpage, 'upload date', default=None))
 
         info.update({
             'id': clip_id,

From 38db9a405ada5ccf333d43004f4a5394f47ef905 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 24 Apr 2020 02:56:10 +0700
Subject: [PATCH 019/340] [prosiebensat1] Extract series metadata

---
 youtube_dl/extractor/prosiebensat1.py | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index 74074606e..e47088292 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -11,6 +11,7 @@ from ..utils import (
     determine_ext,
     float_or_none,
     int_or_none,
+    merge_dicts,
     unified_strdate,
 )
 
@@ -197,6 +198,10 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
                 'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
                 'upload_date': '20131231',
                 'duration': 5845.04,
+                'series': 'CIRCUS HALLIGALLI',
+                'season_number': 2,
+                'episode': 'Episode 18 - Staffel 2',
+                'episode_number': 18,
             },
         },
         {
@@ -302,6 +307,7 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
                 'ext': 'mp4',
                 'title': 'The Voice of Germany - Andreas Kümmert: Rocket Man',
                 'description': 'md5:6ddb02b0781c6adf778afea606652e38',
+                'timestamp': 1382041620,
                 'upload_date': '20131017',
                 'duration': 469.88,
             },
@@ -443,14 +449,15 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE):
             or self._html_search_regex(self._UPLOAD_DATE_REGEXES,
                                        webpage, 'upload date', default=None))
 
-        info.update({
+        json_ld = self._search_json_ld(webpage, clip_id, default={})
+
+        return merge_dicts(info, {
             'id': clip_id,
             'title': title,
             'description': description,
             'thumbnail': thumbnail,
             'upload_date': upload_date,
-        })
-        return info
+        }, json_ld)
 
     def _extract_playlist(self, url, webpage):
         playlist_id = self._html_search_regex(

From c97f5e934f30309237f5229b16dc9ea1da4e18f2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 26 Apr 2020 12:41:33 +0700
Subject: [PATCH 020/340] [tenplay] Relax _VALID_URL (closes #25001)

---
 youtube_dl/extractor/tenplay.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/tenplay.py b/youtube_dl/extractor/tenplay.py
index dff44a4e2..af325fea8 100644
--- a/youtube_dl/extractor/tenplay.py
+++ b/youtube_dl/extractor/tenplay.py
@@ -10,8 +10,8 @@ from ..utils import (
 
 
 class TenPlayIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/[^/]+/episodes/[^/]+/[^/]+/(?P<id>tpv\d{6}[a-z]{5})'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?10play\.com\.au/(?:[^/]+/)+(?P<id>tpv\d{6}[a-z]{5})'
+    _TESTS = [{
         'url': 'https://10play.com.au/masterchef/episodes/season-1/masterchef-s1-ep-1/tpv190718kwzga',
         'info_dict': {
             'id': '6060533435001',
@@ -27,7 +27,10 @@ class TenPlayIE(InfoExtractor):
             'format': 'bestvideo',
             'skip_download': True,
         }
-    }
+    }, {
+        'url': 'https://10play.com.au/how-to-stay-married/web-extras/season-1/terrys-talks-ep-1-embracing-change/tpv190915ylupc',
+        'only_matching': True,
+    }]
     BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s'
 
     def _real_extract(self, url):

From 700265bfcf77f19bdf56062865c1fdf18ca3c9da Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 29 Apr 2020 13:38:58 +0100
Subject: [PATCH 021/340] [tvplay] fix Viafree extraction(closes #15189)(closes
 #24473)(closes #24789)

---
 youtube_dl/extractor/tvplay.py | 131 +++++++++++----------------------
 1 file changed, 43 insertions(+), 88 deletions(-)

diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
index d82d48f94..3c2450dd0 100644
--- a/youtube_dl/extractor/tvplay.py
+++ b/youtube_dl/extractor/tvplay.py
@@ -6,7 +6,6 @@ import re
 from .common import InfoExtractor
 from ..compat import (
     compat_HTTPError,
-    compat_str,
     compat_urlparse,
 )
 from ..utils import (
@@ -15,9 +14,7 @@ from ..utils import (
     int_or_none,
     parse_iso8601,
     qualities,
-    smuggle_url,
     try_get,
-    unsmuggle_url,
     update_url_query,
     url_or_none,
 )
@@ -235,11 +232,6 @@ class TVPlayIE(InfoExtractor):
     ]
 
     def _real_extract(self, url):
-        url, smuggled_data = unsmuggle_url(url, {})
-        self._initialize_geo_bypass({
-            'countries': smuggled_data.get('geo_countries'),
-        })
-
         video_id = self._match_id(url)
         geo_country = self._search_regex(
             r'https?://[^/]+\.([a-z]{2})', url,
@@ -285,8 +277,6 @@ class TVPlayIE(InfoExtractor):
                     'ext': ext,
                 }
                 if video_url.startswith('rtmp'):
-                    if smuggled_data.get('skip_rtmp'):
-                        continue
                     m = re.search(
                         r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
                     if not m:
@@ -347,115 +337,80 @@ class ViafreeIE(InfoExtractor):
     _VALID_URL = r'''(?x)
                     https?://
                         (?:www\.)?
-                        viafree\.
-                        (?:
-                            (?:dk|no)/programmer|
-                            se/program
-                        )
-                        /(?:[^/]+/)+(?P<id>[^/?#&]+)
+                        viafree\.(?P<country>dk|no|se)
+                        /(?P<id>program(?:mer)?/(?:[^/]+/)+[^/?#&]+)
                     '''
     _TESTS = [{
-        'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
+        'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
         'info_dict': {
-            'id': '395375',
+            'id': '757786',
             'ext': 'mp4',
-            'title': 'Husräddarna S02E02',
-            'description': 'md5:4db5c933e37db629b5a2f75dfb34829e',
-            'series': 'Husräddarna',
-            'season': 'Säsong 2',
+            'title': 'Det beste vorspielet - Sesong 2 - Episode 1',
+            'description': 'md5:b632cb848331404ccacd8cd03e83b4c3',
+            'series': 'Det beste vorspielet',
             'season_number': 2,
-            'duration': 2576,
-            'timestamp': 1400596321,
-            'upload_date': '20140520',
+            'duration': 1116,
+            'timestamp': 1471200600,
+            'upload_date': '20160814',
         },
         'params': {
             'skip_download': True,
         },
-        'add_ie': [TVPlayIE.ie_key()],
     }, {
         # with relatedClips
         'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1',
-        'info_dict': {
-            'id': '758770',
-            'ext': 'mp4',
-            'title': 'Sommaren med YouTube-stjärnorna S01E01',
-            'description': 'md5:2bc69dce2c4bb48391e858539bbb0e3f',
-            'series': 'Sommaren med YouTube-stjärnorna',
-            'season': 'Säsong 1',
-            'season_number': 1,
-            'duration': 1326,
-            'timestamp': 1470905572,
-            'upload_date': '20160811',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'add_ie': [TVPlayIE.ie_key()],
+        'only_matching': True,
     }, {
         # Different og:image URL schema
         'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2',
         'only_matching': True,
     }, {
-        'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
+        'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
         'only_matching': True,
     }, {
         'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5',
         'only_matching': True,
     }]
+    _GEO_BYPASS = False
 
     @classmethod
     def suitable(cls, url):
         return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
+        country, path = re.match(self._VALID_URL, url).groups()
+        content = self._download_json(
+            'https://viafree-content.mtg-api.com/viafree-content/v1/%s/path/%s' % (country, path), path)
+        program = content['_embedded']['viafreeBlocks'][0]['_embedded']['program']
+        guid = program['guid']
+        meta = content['meta']
+        title = meta['title']
 
-        webpage = self._download_webpage(url, video_id)
+        try:
+            stream_href = self._download_json(
+                program['_links']['streamLink']['href'], guid,
+                headers=self.geo_verification_headers())['embedded']['prioritizedStreams'][0]['links']['stream']['href']
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+                self.raise_geo_restricted(countries=[country])
+            raise
 
-        data = self._parse_json(
-            self._search_regex(
-                r'(?s)window\.App\s*=\s*({.+?})\s*;\s*</script',
-                webpage, 'data', default='{}'),
-            video_id, transform_source=lambda x: re.sub(
-                r'(?s)function\s+[a-zA-Z_][\da-zA-Z_]*\s*\([^)]*\)\s*{[^}]*}\s*',
-                'null', x), fatal=False)
+        formats = self._extract_m3u8_formats(stream_href, guid, 'mp4')
+        self._sort_formats(formats)
+        episode = program.get('episode') or {}
 
-        video_id = None
-
-        if data:
-            video_id = try_get(
-                data, lambda x: x['context']['dispatcher']['stores'][
-                    'ContentPageProgramStore']['currentVideo']['id'],
-                compat_str)
-
-        # Fallback #1 (extract from og:image URL schema)
-        if not video_id:
-            thumbnail = self._og_search_thumbnail(webpage, default=None)
-            if thumbnail:
-                video_id = self._search_regex(
-                    # Patterns seen:
-                    #  http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/inbox/765166/a2e95e5f1d735bab9f309fa345cc3f25.jpg
-                    #  http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/seasons/15204/758770/4a5ba509ca8bc043e1ebd1a76131cdf2.jpg
-                    r'https?://[^/]+/imagecache/(?:[^/]+/)+(\d{6,})/',
-                    thumbnail, 'video id', default=None)
-
-        # Fallback #2. Extract from raw JSON string.
-        # May extract wrong video id if relatedClips is present.
-        if not video_id:
-            video_id = self._search_regex(
-                r'currentVideo["\']\s*:\s*.+?["\']id["\']\s*:\s*["\'](\d{6,})',
-                webpage, 'video id')
-
-        return self.url_result(
-            smuggle_url(
-                'mtg:%s' % video_id,
-                {
-                    'geo_countries': [
-                        compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1]],
-                    # rtmp host mtgfs.fplive.net for viafree is unresolvable
-                    'skip_rtmp': True,
-                }),
-            ie=TVPlayIE.ie_key(), video_id=video_id)
+        return {
+            'id': guid,
+            'title': title,
+            'thumbnail': meta.get('image'),
+            'description': meta.get('description'),
+            'series': episode.get('seriesTitle'),
+            'episode_number': int_or_none(episode.get('episodeNumber')),
+            'season_number': int_or_none(episode.get('seasonNumber')),
+            'duration': int_or_none(try_get(program, lambda x: x['video']['duration']['milliseconds']), 1000),
+            'timestamp': parse_iso8601(try_get(program, lambda x: x['availability']['start'])),
+            'formats': formats,
+        }
 
 
 class TVPlayHomeIE(InfoExtractor):

From 2468a6fa6416fbe617aea00d8203abc74f481d87 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 29 Apr 2020 14:56:32 +0100
Subject: [PATCH 022/340] [yahoo] fix GYAO Player extraction and relax title
 URL regex(closes #24178)(closes #24778)

---
 youtube_dl/extractor/yahoo.py | 40 ++++++++++++++++++++++++++---------
 1 file changed, 30 insertions(+), 10 deletions(-)

diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index 238d9cea0..e4615376c 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -12,6 +12,7 @@ from ..compat import (
 )
 from ..utils import (
     clean_html,
+    ExtractorError,
     int_or_none,
     mimetype2ext,
     parse_iso8601,
@@ -368,31 +369,47 @@ class YahooGyaOPlayerIE(InfoExtractor):
         'url': 'https://gyao.yahoo.co.jp/episode/%E3%81%8D%E3%81%AE%E3%81%86%E4%BD%95%E9%A3%9F%E3%81%B9%E3%81%9F%EF%BC%9F%20%E7%AC%AC2%E8%A9%B1%202019%2F4%2F12%E6%94%BE%E9%80%81%E5%88%86/5cb02352-b725-409e-9f8d-88f947a9f682',
         'only_matching': True,
     }]
+    _GEO_BYPASS = False
 
     def _real_extract(self, url):
         video_id = self._match_id(url).replace('/', ':')
-        video = self._download_json(
-            'https://gyao.yahoo.co.jp/dam/v1/videos/' + video_id,
-            video_id, query={
-                'fields': 'longDescription,title,videoId',
-            }, headers={
-                'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web',
-            })
+        headers = self.geo_verification_headers()
+        headers['Accept'] = 'application/json'
+        resp = self._download_json(
+            'https://gyao.yahoo.co.jp/apis/playback/graphql', video_id, query={
+                'appId': 'dj00aiZpPUNJeDh2cU1RazU3UCZzPWNvbnN1bWVyc2VjcmV0Jng9NTk-',
+                'query': '''{
+  content(parameter: {contentId: "%s", logicaAgent: PC_WEB}) {
+    video {
+      delivery {
+        id
+      }
+      title
+    }
+  }
+}''' % video_id,
+            }, headers=headers)
+        content = resp['data']['content']
+        if not content:
+            msg = resp['errors'][0]['message']
+            if msg == 'not in japan':
+                self.raise_geo_restricted(countries=['JP'])
+            raise ExtractorError(msg)
+        video = content['video']
         return {
             '_type': 'url_transparent',
             'id': video_id,
             'title': video['title'],
             'url': smuggle_url(
-                'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['videoId'],
+                'http://players.brightcove.net/4235717419001/SyG5P0gjb_default/index.html?videoId=' + video['delivery']['id'],
                 {'geo_countries': ['JP']}),
-            'description': video.get('longDescription'),
             'ie_key': BrightcoveNewIE.ie_key(),
         }
 
 
 class YahooGyaOIE(InfoExtractor):
     IE_NAME = 'yahoo:gyao'
-    _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title/[^/]+)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+    _VALID_URL = r'https?://(?:gyao\.yahoo\.co\.jp/(?:p|title(?:/[^/]+)?)|streaming\.yahoo\.co\.jp/p/y)/(?P<id>\d+/v\d+|[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
     _TESTS = [{
         'url': 'https://gyao.yahoo.co.jp/p/00449/v03102/',
         'info_dict': {
@@ -405,6 +422,9 @@ class YahooGyaOIE(InfoExtractor):
     }, {
         'url': 'https://gyao.yahoo.co.jp/title/%E3%81%97%E3%82%83%E3%81%B9%E3%81%8F%E3%82%8A007/5b025a49-b2e5-4dc7-945c-09c6634afacf',
         'only_matching': True,
+    }, {
+        'url': 'https://gyao.yahoo.co.jp/title/5b025a49-b2e5-4dc7-945c-09c6634afacf',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From 011e75e641ad2b7fa141a05fa35d3e5d6d0b736b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 1 May 2020 00:40:38 +0700
Subject: [PATCH 023/340] [youtube] Use redirected video id if any (closes
 #25063)

---
 youtube_dl/extractor/youtube.py | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index afaa12b1b..28886cff2 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1227,6 +1227,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
             'only_matching': True,
         },
+        {
+            # invalid -> valid video id redirection
+            'url': 'DJztXj2GPfl',
+            'info_dict': {
+                'id': 'DJztXj2GPfk',
+                'ext': 'mp4',
+                'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
+                'description': 'md5:bf577a41da97918e94fa9798d9228825',
+                'upload_date': '20090125',
+                'uploader': 'Prochorowka',
+                'uploader_id': 'Prochorowka',
+                'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
+                'artist': 'Panjabi MC',
+                'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
+                'album': 'Beware of the Boys (Mundian To Bach Ke)',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        }
     ]
 
     def __init__(self, *args, **kwargs):
@@ -1678,7 +1698,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
         # Get video webpage
         url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
-        video_webpage = self._download_webpage(url, video_id)
+        video_webpage, urlh = self._download_webpage_handle(url, video_id)
+
+        qs = compat_parse_qs(compat_urllib_parse_urlparse(urlh.geturl()).query)
+        video_id = qs.get('v', [None])[0] or video_id
 
         # Attempt to extract SWF player URL
         mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)

From e40c758c2a8f049cd254ad73f0e6ade00bd004d9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 2 May 2020 07:18:08 +0700
Subject: [PATCH 024/340] [youtube] Improve player id extraction and add tests

---
 test/test_youtube_signature.py  | 22 +++++++++++++++++++
 youtube_dl/extractor/youtube.py | 38 +++++++++++++++------------------
 2 files changed, 39 insertions(+), 21 deletions(-)

diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index f0c370eee..69df30eda 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -74,6 +74,28 @@ _TESTS = [
 ]
 
 
+class TestPlayerInfo(unittest.TestCase):
+    def test_youtube_extract_player_info(self):
+        PLAYER_URLS = (
+            ('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
+            # obsolete
+            ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
+            ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
+            ('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'),
+            ('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'),
+            ('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
+            ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
+            ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
+            ('http://s.ytimg.com/yt/swfbin/watch_as3-vflrEm9Nq.swf', 'vflrEm9Nq'),
+            ('https://s.ytimg.com/yts/swfbin/player-vflenCdZL/watch_as3.swf', 'vflenCdZL'),
+        )
+        for player_url, expected_player_id in PLAYER_URLS:
+            expected_player_type = player_url.split('.')[-1]
+            player_type, player_id = YoutubeIE._extract_player_info(player_url)
+            self.assertEqual(player_type, expected_player_type)
+            self.assertEqual(player_id, expected_player_id)
+
+
 class TestSignature(unittest.TestCase):
     def setUp(self):
         TEST_DIR = os.path.dirname(os.path.abspath(__file__))
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 28886cff2..5ea66c962 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -426,6 +426,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                      (?(1).+)?                                                # if we found the ID, everything can follow
                      $""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
     _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
+    _PLAYER_INFO_RE = (
+        r'/(?P<id>[a-zA-Z0-9_-]{8,})/player_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?/base\.(?P<ext>[a-z]+)$',
+        r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.(?P<ext>[a-z]+)$',
+    )
     _formats = {
         '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
         '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
@@ -1273,14 +1277,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         """ Return a string representation of a signature """
         return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
 
-    def _extract_signature_function(self, video_id, player_url, example_sig):
-        id_m = re.match(
-            r'.*?[-.](?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
-            player_url)
-        if not id_m:
+    @classmethod
+    def _extract_player_info(cls, player_url):
+        for player_re in cls._PLAYER_INFO_RE:
+            id_m = re.search(player_re, player_url)
+            if id_m:
+                break
+        else:
             raise ExtractorError('Cannot identify player %r' % player_url)
-        player_type = id_m.group('ext')
-        player_id = id_m.group('id')
+        return id_m.group('ext'), id_m.group('id')
+
+    def _extract_signature_function(self, video_id, player_url, example_sig):
+        player_type, player_id = self._extract_player_info(player_url)
 
         # Read from filesystem cache
         func_id = '%s_%s_%s' % (
@@ -2009,22 +2017,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
                         if self._downloader.params.get('verbose'):
                             if player_url is None:
-                                player_version = 'unknown'
                                 player_desc = 'unknown'
                             else:
-                                if player_url.endswith('swf'):
-                                    player_version = self._search_regex(
-                                        r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
-                                        'flash player', fatal=False)
-                                    player_desc = 'flash player %s' % player_version
-                                else:
-                                    player_version = self._search_regex(
-                                        [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
-                                         r'(?:www|player(?:_ias)?)[-.]([^/]+)(?:/[a-z]{2,3}_[A-Z]{2})?/base\.js'],
-                                        player_url,
-                                        'html5 player', fatal=False)
-                                    player_desc = 'html5 player %s' % player_version
-
+                                player_type, player_version = self._extract_player_info(player_url)
+                                player_desc = '%s player %s' % ('flash' if player_type == 'swf' else 'html5', player_version)
                             parts_sizes = self._signature_cache_id(encrypted_sig)
                             self.to_screen('{%s} signature length %s, %s' %
                                            (format_id, parts_sizes, player_desc))

From 4433bb02457f961dc2a753b3d4350a4a8cae138f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 2 May 2020 23:40:30 +0700
Subject: [PATCH 025/340] [extractor/common] Extract multiple JSON-LD entries

---
 youtube_dl/extractor/common.py | 41 ++++++++++++++++++++++++++--------
 1 file changed, 32 insertions(+), 9 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index c51a3a07d..e9306d806 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1182,16 +1182,33 @@ class InfoExtractor(object):
                                       'twitter card player')
 
     def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
-        json_ld = self._search_regex(
-            JSON_LD_RE, html, 'JSON-LD', group='json_ld', **kwargs)
+        json_ld_list = list(re.finditer(JSON_LD_RE, html))
         default = kwargs.get('default', NO_DEFAULT)
-        if not json_ld:
-            return default if default is not NO_DEFAULT else {}
         # JSON-LD may be malformed and thus `fatal` should be respected.
         # At the same time `default` may be passed that assumes `fatal=False`
         # for _search_regex. Let's simulate the same behavior here as well.
         fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
-        return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
+        json_ld = []
+        for mobj in json_ld_list:
+            json_ld_item = self._parse_json(
+                mobj.group('json_ld'), video_id, fatal=fatal)
+            if not json_ld_item:
+                continue
+            if isinstance(json_ld_item, dict):
+                json_ld.append(json_ld_item)
+            elif isinstance(json_ld_item, (list, tuple)):
+                json_ld.extend(json_ld_item)
+        if json_ld:
+            json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
+        if json_ld:
+            return json_ld
+        if default is not NO_DEFAULT:
+            return default
+        elif fatal:
+            raise RegexNotFoundError('Unable to extract JSON-LD')
+        else:
+            self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
+            return {}
 
     def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
         if isinstance(json_ld, compat_str):
@@ -1256,10 +1273,10 @@ class InfoExtractor(object):
             extract_interaction_statistic(e)
 
         for e in json_ld:
-            if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
+            if '@context' in e:
                 item_type = e.get('@type')
                 if expected_type is not None and expected_type != item_type:
-                    return info
+                    continue
                 if item_type in ('TVEpisode', 'Episode'):
                     episode_name = unescapeHTML(e.get('name'))
                     info.update({
@@ -1293,11 +1310,17 @@ class InfoExtractor(object):
                     })
                 elif item_type == 'VideoObject':
                     extract_video_object(e)
-                    continue
+                    if expected_type is None:
+                        continue
+                    else:
+                        break
                 video = e.get('video')
                 if isinstance(video, dict) and video.get('@type') == 'VideoObject':
                     extract_video_object(video)
-                break
+                if expected_type is None:
+                    continue
+                else:
+                    break
         return dict((k, v) for k, v in info.items() if v is not None)
 
     @staticmethod

From 6ffc3cf74ab3d7044c46e2ff4cd5265aeca347f1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 2 May 2020 23:42:51 +0700
Subject: [PATCH 026/340] [crunchyroll] Fix and improve extraction (closes
 #25096, closes #25060)

---
 youtube_dl/extractor/crunchyroll.py | 56 ++++++++++++++++-------------
 1 file changed, 31 insertions(+), 25 deletions(-)

diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 85a9a577f..bc2d1fa8b 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -13,6 +13,7 @@ from ..compat import (
     compat_b64decode,
     compat_etree_Element,
     compat_etree_fromstring,
+    compat_str,
     compat_urllib_parse_urlencode,
     compat_urllib_request,
     compat_urlparse,
@@ -25,9 +26,9 @@ from ..utils import (
     intlist_to_bytes,
     int_or_none,
     lowercase_escape,
+    merge_dicts,
     remove_end,
     sanitized_Request,
-    unified_strdate,
     urlencode_postdata,
     xpath_text,
 )
@@ -136,6 +137,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
             # rtmp
             'skip_download': True,
         },
+        'skip': 'Video gone',
     }, {
         'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1',
         'info_dict': {
@@ -157,11 +159,12 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
         'info_dict': {
             'id': '702409',
             'ext': 'mp4',
-            'title': 'Re:ZERO -Starting Life in Another World- Episode 5 – The Morning of Our Promise Is Still Distant',
-            'description': 'md5:97664de1ab24bbf77a9c01918cb7dca9',
+            'title': compat_str,
+            'description': compat_str,
             'thumbnail': r're:^https?://.*\.jpg$',
-            'uploader': 'TV TOKYO',
-            'upload_date': '20160508',
+            'uploader': 'Re:Zero Partners',
+            'timestamp': 1462098900,
+            'upload_date': '20160501',
         },
         'params': {
             # m3u8 download
@@ -172,12 +175,13 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
         'info_dict': {
             'id': '727589',
             'ext': 'mp4',
-            'title': "KONOSUBA -God's blessing on this wonderful world! 2 Episode 1 – Give Me Deliverance From This Judicial Injustice!",
-            'description': 'md5:cbcf05e528124b0f3a0a419fc805ea7d',
+            'title': compat_str,
+            'description': compat_str,
             'thumbnail': r're:^https?://.*\.jpg$',
             'uploader': 'Kadokawa Pictures Inc.',
-            'upload_date': '20170118',
-            'series': "KONOSUBA -God's blessing on this wonderful world!",
+            'timestamp': 1484130900,
+            'upload_date': '20170111',
+            'series': compat_str,
             'season': "KONOSUBA -God's blessing on this wonderful world! 2",
             'season_number': 2,
             'episode': 'Give Me Deliverance From This Judicial Injustice!',
@@ -200,10 +204,11 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
         'info_dict': {
             'id': '535080',
             'ext': 'mp4',
-            'title': '11eyes Episode 1 – Red Night ~ Piros éjszaka',
-            'description': 'Kakeru and Yuka are thrown into an alternate nightmarish world they call "Red Night".',
+            'title': compat_str,
+            'description': compat_str,
             'uploader': 'Marvelous AQL Inc.',
-            'upload_date': '20091021',
+            'timestamp': 1255512600,
+            'upload_date': '20091014',
         },
         'params': {
             # Just test metadata extraction
@@ -224,15 +229,17 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
             # just test metadata extraction
             'skip_download': True,
         },
+        'skip': 'Video gone',
     }, {
         # A video with a vastly different season name compared to the series name
         'url': 'http://www.crunchyroll.com/nyarko-san-another-crawling-chaos/episode-1-test-590532',
         'info_dict': {
             'id': '590532',
             'ext': 'mp4',
-            'title': 'Haiyoru! Nyaruani (ONA) Episode 1 – Test',
-            'description': 'Mahiro and Nyaruko talk about official certification.',
+            'title': compat_str,
+            'description': compat_str,
             'uploader': 'TV TOKYO',
+            'timestamp': 1330956000,
             'upload_date': '20120305',
             'series': 'Nyarko-san: Another Crawling Chaos',
             'season': 'Haiyoru! Nyaruani (ONA)',
@@ -442,23 +449,21 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             webpage, 'language', default=None, group='lang')
 
         video_title = self._html_search_regex(
-            r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>',
-            webpage, 'video_title')
+            (r'(?s)<h1[^>]*>((?:(?!<h1).)*?<(?:span[^>]+itemprop=["\']title["\']|meta[^>]+itemprop=["\']position["\'])[^>]*>(?:(?!<h1).)+?)</h1>',
+             r'<title>(.+?),\s+-\s+.+? Crunchyroll'),
+            webpage, 'video_title', default=None)
+        if not video_title:
+            video_title = re.sub(r'^Watch\s+', '', self._og_search_description(webpage))
         video_title = re.sub(r' {2,}', ' ', video_title)
         video_description = (self._parse_json(self._html_search_regex(
             r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id,
             webpage, 'description', default='{}'), video_id) or media_metadata).get('description')
         if video_description:
             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
-        video_upload_date = self._html_search_regex(
-            [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
-            webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
-        if video_upload_date:
-            video_upload_date = unified_strdate(video_upload_date)
         video_uploader = self._html_search_regex(
             # try looking for both an uploader that's a link and one that's not
             [r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'],
-            webpage, 'video_uploader', fatal=False)
+            webpage, 'video_uploader', default=False)
 
         formats = []
         for stream in media.get('streams', []):
@@ -611,14 +616,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             r'(?s)<h\d[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)',
             webpage, 'season number', default=None))
 
-        return {
+        info = self._search_json_ld(webpage, video_id, default={})
+
+        return merge_dicts({
             'id': video_id,
             'title': video_title,
             'description': video_description,
             'duration': duration,
             'thumbnail': thumbnail,
             'uploader': video_uploader,
-            'upload_date': video_upload_date,
             'series': series,
             'season': season,
             'season_number': season_number,
@@ -626,7 +632,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
             'episode_number': episode_number,
             'subtitles': subtitles,
             'formats': formats,
-        }
+        }, info)
 
 
 class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):

From 66f32ca0e1fbacdc4a61451c5496553439002bdb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 2 May 2020 23:59:25 +0700
Subject: [PATCH 027/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 34 ++++++++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index f753972c4..6cd586019 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,37 @@
+version <unreleased>
+
+Core
++ [extractor/common] Extract multiple JSON-LD entries
+* [options] Clarify doc on --exec command (#19087, #24883)
+* [extractor/common] Skip malformed ISM manifest XMLs while extracting
+  ISM formats (#24667)
+
+Extractors
+* [crunchyroll] Fix and improve extraction (#25096, #25060)
+* [youtube] Improve player id extraction
+* [youtube] Use redirected video id if any (#25063)
+* [yahoo] Fix GYAO Player extraction and relax URL regular expression
+  (#24178, #24778)
+* [tvplay] Fix Viafree extraction (#15189, #24473, #24789)
+* [tenplay] Relax URL regular expression (#25001)
++ [prosiebensat1] Extract series metadata
+* [prosiebensat1] Improve extraction and remove 7tv.de support (#24948)
+- [prosiebensat1] Remove 7tv.de support (#24948)
+* [youtube] Fix DRM videos detection (#24736)
+* [thisoldhouse] Fix video id extraction (#24548, #24549)
++ [soundcloud] Extract AAC format (#19173, #24708)
+* [youtube] Skip broken multifeed videos (#24711)
+* [nova:embed] Fix extraction (#24700)
+* [motherless] Fix extraction (#24699)
+* [twitch:clips] Extend URL regular expression (#24290, #24642)
+* [tv4] Fix ISM formats extraction (#24667)
+* [tele5] Fix extraction (#24553)
++ [mofosex] Add support for generic embeds (#24633)
++ [youporn] Add support for generic embeds
++ [spankwire] Add support for generic embeds (#24633)
+* [spankwire] Fix extraction (#18924, #20648)
+
+
 version 2020.03.24
 
 Core

From 00a41ca4c32ff4b4b30c0bb6d9fbdf74d8230dc6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 3 May 2020 00:05:05 +0700
Subject: [PATCH 028/340] release 2020.05.03

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 README.md                                        | 6 +++---
 docs/supportedsites.md                           | 1 +
 youtube_dl/version.py                            | 2 +-
 9 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 40a869113..487de9298 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.03.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.03.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.03.24
+ [debug] youtube-dl version 2020.05.03
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 7b10df3d4..da4b17db9 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.03.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.03.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 04bbcfa68..e64e39516 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.03.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.03.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index a9e231817..11ac95173 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.03.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.03.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.03.24
+ [debug] youtube-dl version 2020.05.03
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 4a3d32d51..c75c2a073 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.03.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.03.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 6cd586019..200df7c03 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.05.03
 
 Core
 + [extractor/common] Extract multiple JSON-LD entries
diff --git a/README.md b/README.md
index 4f54a5240..12dc00b3e 100644
--- a/README.md
+++ b/README.md
@@ -434,9 +434,9 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo
                                      either the path to the binary or its
                                      containing directory.
     --exec CMD                       Execute a command on the file after
-                                     downloading, similar to find's -exec
-                                     syntax. Example: --exec 'adb push {}
-                                     /sdcard/Music/ && rm {}'
+                                     downloading and post-processing, similar to
+                                     find's -exec syntax. Example: --exec 'adb
+                                     push {} /sdcard/Music/ && rm {}'
     --convert-subs FORMAT            Convert the subtitles to other format
                                      (currently supported: srt|ass|vtt|lrc)
 
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 174b83bf3..843dc2dc0 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -497,6 +497,7 @@
  - **MNetTV**
  - **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
  - **Mofosex**
+ - **MofosexEmbed**
  - **Mojvideo**
  - **Morningstar**: morningstar.com
  - **Motherless**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 5aedd3268..f933eb8ec 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.03.24'
+__version__ = '2020.05.03'

From f7f304910d1c1fc19313231d424daba304e1de71 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 4 May 2020 21:15:19 +0700
Subject: [PATCH 029/340] [puhutv] Remove no longer available HTTP formats
 (closes #25124)

---
 youtube_dl/extractor/puhutv.py | 20 ++------------------
 1 file changed, 2 insertions(+), 18 deletions(-)

diff --git a/youtube_dl/extractor/puhutv.py b/youtube_dl/extractor/puhutv.py
index fb704a3c4..ca71665e0 100644
--- a/youtube_dl/extractor/puhutv.py
+++ b/youtube_dl/extractor/puhutv.py
@@ -82,17 +82,6 @@ class PuhuTVIE(InfoExtractor):
         urls = []
         formats = []
 
-        def add_http_from_hls(m3u8_f):
-            http_url = m3u8_f['url'].replace('/hls/', '/mp4/').replace('/chunklist.m3u8', '.mp4')
-            if http_url != m3u8_f['url']:
-                f = m3u8_f.copy()
-                f.update({
-                    'format_id': f['format_id'].replace('hls', 'http'),
-                    'protocol': 'http',
-                    'url': http_url,
-                })
-                formats.append(f)
-
         for video in videos['data']['videos']:
             media_url = url_or_none(video.get('url'))
             if not media_url or media_url in urls:
@@ -101,12 +90,9 @@ class PuhuTVIE(InfoExtractor):
 
             playlist = video.get('is_playlist')
             if (video.get('stream_type') == 'hls' and playlist is True) or 'playlist.m3u8' in media_url:
-                m3u8_formats = self._extract_m3u8_formats(
+                formats.extend(self._extract_m3u8_formats(
                     media_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                    m3u8_id='hls', fatal=False)
-                for m3u8_f in m3u8_formats:
-                    formats.append(m3u8_f)
-                    add_http_from_hls(m3u8_f)
+                    m3u8_id='hls', fatal=False))
                 continue
 
             quality = int_or_none(video.get('quality'))
@@ -128,8 +114,6 @@ class PuhuTVIE(InfoExtractor):
                 format_id += '-%sp' % quality
             f['format_id'] = format_id
             formats.append(f)
-            if is_hls:
-                add_http_from_hls(f)
         self._sort_formats(formats)
 
         creator = try_get(

From c380cc28c4e94b4b61db7f86d35e48197b407266 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 04:19:33 +0700
Subject: [PATCH 030/340] [utils] Improve cookie files support

+ Add support for UTF-8 in cookie files
* Skip malformed cookie file entries instead of crashing (invalid entry len, invalid expires at)
---
 test/test_YoutubeDLCookieJar.py             |  7 ++
 test/testdata/cookies/malformed_cookies.txt |  9 +++
 youtube_dl/utils.py                         | 82 +++++++++++++++++++--
 3 files changed, 93 insertions(+), 5 deletions(-)
 create mode 100644 test/testdata/cookies/malformed_cookies.txt

diff --git a/test/test_YoutubeDLCookieJar.py b/test/test_YoutubeDLCookieJar.py
index f959798de..05f48bd74 100644
--- a/test/test_YoutubeDLCookieJar.py
+++ b/test/test_YoutubeDLCookieJar.py
@@ -39,6 +39,13 @@ class TestYoutubeDLCookieJar(unittest.TestCase):
         assert_cookie_has_value('HTTPONLY_COOKIE')
         assert_cookie_has_value('JS_ACCESSIBLE_COOKIE')
 
+    def test_malformed_cookies(self):
+        cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/malformed_cookies.txt')
+        cookiejar.load(ignore_discard=True, ignore_expires=True)
+        # Cookies should be empty since all malformed cookie file entries
+        # will be ignored
+        self.assertFalse(cookiejar._cookies)
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/testdata/cookies/malformed_cookies.txt b/test/testdata/cookies/malformed_cookies.txt
new file mode 100644
index 000000000..17bc40354
--- /dev/null
+++ b/test/testdata/cookies/malformed_cookies.txt
@@ -0,0 +1,9 @@
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file!  Do not edit.
+
+# Cookie file entry with invalid number of fields - 6 instead of 7
+www.foobar.foobar	FALSE	/	FALSE	0	COOKIE
+
+# Cookie file entry with invalid expires at
+www.foobar.foobar	FALSE	/	FALSE	1.7976931348623157e+308	COOKIE	VALUE
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 38262bee4..112279ed7 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -7,6 +7,7 @@ import base64
 import binascii
 import calendar
 import codecs
+import collections
 import contextlib
 import ctypes
 import datetime
@@ -30,6 +31,7 @@ import ssl
 import subprocess
 import sys
 import tempfile
+import time
 import traceback
 import xml.etree.ElementTree
 import zlib
@@ -2735,14 +2737,66 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
     1. https://curl.haxx.se/docs/http-cookies.html
     """
     _HTTPONLY_PREFIX = '#HttpOnly_'
+    _ENTRY_LEN = 7
+    _HEADER = '''# Netscape HTTP Cookie File
+# This file is generated by youtube-dl.  Do not edit.
+
+'''
+    _CookieFileEntry = collections.namedtuple(
+        'CookieFileEntry',
+        ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
 
     def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+        """
+        Save cookies to a file.
+
+        Most of the code is taken from CPython 3.8 and slightly adapted
+        to support cookie files with UTF-8 in both python 2 and 3.
+        """
+        if filename is None:
+            if self.filename is not None:
+                filename = self.filename
+            else:
+                raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
+
         # Store session cookies with `expires` set to 0 instead of an empty
         # string
         for cookie in self:
             if cookie.expires is None:
                 cookie.expires = 0
-        compat_cookiejar.MozillaCookieJar.save(self, filename, ignore_discard, ignore_expires)
+
+        with io.open(filename, 'w', encoding='utf-8') as f:
+            f.write(self._HEADER)
+            now = time.time()
+            for cookie in self:
+                if not ignore_discard and cookie.discard:
+                    continue
+                if not ignore_expires and cookie.is_expired(now):
+                    continue
+                if cookie.secure:
+                    secure = 'TRUE'
+                else:
+                    secure = 'FALSE'
+                if cookie.domain.startswith('.'):
+                    initial_dot = 'TRUE'
+                else:
+                    initial_dot = 'FALSE'
+                if cookie.expires is not None:
+                    expires = compat_str(cookie.expires)
+                else:
+                    expires = ''
+                if cookie.value is None:
+                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                    # with no name, whereas http.cookiejar regards it as a
+                    # cookie with no value.
+                    name = ''
+                    value = cookie.name
+                else:
+                    name = cookie.name
+                    value = cookie.value
+                f.write(
+                    '\t'.join([cookie.domain, initial_dot, cookie.path,
+                               secure, expires, name, value]) + '\n')
 
     def load(self, filename=None, ignore_discard=False, ignore_expires=False):
         """Load cookies from a file."""
@@ -2752,12 +2806,30 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
             else:
                 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
 
+        def prepare_line(line):
+            if line.startswith(self._HTTPONLY_PREFIX):
+                line = line[len(self._HTTPONLY_PREFIX):]
+            # comments and empty lines are fine
+            if line.startswith('#') or not line.strip():
+                return line
+            cookie_list = line.split('\t')
+            if len(cookie_list) != self._ENTRY_LEN:
+                raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
+            cookie = self._CookieFileEntry(*cookie_list)
+            if cookie.expires_at and not cookie.expires_at.isdigit():
+                raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
+            return line
+
         cf = io.StringIO()
-        with open(filename) as f:
+        with io.open(filename, encoding='utf-8') as f:
             for line in f:
-                if line.startswith(self._HTTPONLY_PREFIX):
-                    line = line[len(self._HTTPONLY_PREFIX):]
-                cf.write(compat_str(line))
+                try:
+                    cf.write(prepare_line(line))
+                except compat_cookiejar.LoadError as e:
+                    write_string(
+                        'WARNING: skipping cookie file entry due to %s: %r\n'
+                        % (e, line), sys.stderr)
+                    continue
         cf.seek(0)
         self._really_load(cf, filename, ignore_discard, ignore_expires)
         # Session cookies are denoted by either `expires` field set to

From 676723e0daf86eafb31d82ba07813ebc98b334dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 05:09:07 +0700
Subject: [PATCH 031/340] [dailymotion] Fix typo

---
 youtube_dl/extractor/dailymotion.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 327fdb04a..b8529050c 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -32,7 +32,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
 
     @staticmethod
     def _get_cookie_value(cookies, name):
-        cookie = cookies.get('name')
+        cookie = cookies.get(name)
         if cookie:
             return cookie.value
 

From 6d874fee2a4b54272c48960f3082072d37a7e0ae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 05:54:10 +0700
Subject: [PATCH 032/340] [compat] Introduce compat_cookiejar_Cookie

---
 youtube_dl/compat.py | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index d1b86bd13..0ee9bc760 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -57,6 +57,17 @@ try:
 except ImportError:  # Python 2
     import cookielib as compat_cookiejar
 
+if sys.version_info[0] == 2:
+    class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
+        def __init__(self, version, name, value, *args, **kwargs):
+            if isinstance(name, compat_str):
+                name = name.encode()
+            if isinstance(value, compat_str):
+                value = value.encode()
+            compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
+else:
+    compat_cookiejar_Cookie = compat_cookiejar.Cookie
+
 try:
     import http.cookies as compat_cookies
 except ImportError:  # Python 2
@@ -2987,6 +2998,7 @@ __all__ = [
     'compat_basestring',
     'compat_chr',
     'compat_cookiejar',
+    'compat_cookiejar_Cookie',
     'compat_cookies',
     'compat_ctypes_WINFUNCTYPE',
     'compat_etree_Element',

From 6c22cee673f59407a63b2916d8f0623a95a8ea20 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 06:00:37 +0700
Subject: [PATCH 033/340] [extractor/common] Use compat_cookiejar_Cookie for
 _set_cookie (closes #23256, closes #24776)

To always ensure cookie name and value are bytestrings on python 2.
---
 youtube_dl/extractor/common.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index e9306d806..a61753b17 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -15,7 +15,7 @@ import time
 import math
 
 from ..compat import (
-    compat_cookiejar,
+    compat_cookiejar_Cookie,
     compat_cookies,
     compat_etree_Element,
     compat_etree_fromstring,
@@ -2843,7 +2843,7 @@ class InfoExtractor(object):
 
     def _set_cookie(self, domain, name, value, expire_time=None, port=None,
                     path='/', secure=False, discard=False, rest={}, **kwargs):
-        cookie = compat_cookiejar.Cookie(
+        cookie = compat_cookiejar_Cookie(
             0, name, value, port, port is not None, domain, True,
             domain.startswith('.'), path, True, secure, expire_time,
             discard, None, None, rest)

From 1328305851bf2b708f74140b35a600c955d58394 Mon Sep 17 00:00:00 2001
From: hh0rva1h <61889859+hh0rva1h@users.noreply.github.com>
Date: Tue, 5 May 2020 01:22:50 +0200
Subject: [PATCH 034/340] [orf] Add support for more radio stations (closes
 #24938) (#24968)

---
 youtube_dl/extractor/extractors.py |  10 +++
 youtube_dl/extractor/orf.py        | 139 ++++++++++++++++++++++++++++-
 2 files changed, 146 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index e407ab3d9..4b3092028 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -804,6 +804,16 @@ from .orf import (
     ORFFM4IE,
     ORFFM4StoryIE,
     ORFOE1IE,
+    ORFOE3IE,
+    ORFNOEIE,
+    ORFWIEIE,
+    ORFBGLIE,
+    ORFOOEIE,
+    ORFSTMIE,
+    ORFKTNIE,
+    ORFSBGIE,
+    ORFTIRIE,
+    ORFVBGIE,
     ORFIPTVIE,
 )
 from .outsidetv import OutsideTVIE
diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py
index d54b8ace6..700ce448c 100644
--- a/youtube_dl/extractor/orf.py
+++ b/youtube_dl/extractor/orf.py
@@ -162,13 +162,12 @@ class ORFTVthekIE(InfoExtractor):
 class ORFRadioIE(InfoExtractor):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        station = mobj.group('station')
         show_date = mobj.group('date')
         show_id = mobj.group('show')
 
         data = self._download_json(
             'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s'
-            % (station, show_id, show_date), show_id)
+            % (self._API_STATION, show_id, show_date), show_id)
 
         entries = []
         for info in data['streams']:
@@ -183,7 +182,7 @@ class ORFRadioIE(InfoExtractor):
             duration = end - start if end and start else None
             entries.append({
                 'id': loop_stream_id.replace('.mp3', ''),
-                'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (station, loop_stream_id),
+                'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (self._LOOP_STATION, loop_stream_id),
                 'title': title,
                 'description': clean_html(data.get('subtitle')),
                 'duration': duration,
@@ -205,6 +204,8 @@ class ORFFM4IE(ORFRadioIE):
     IE_NAME = 'orf:fm4'
     IE_DESC = 'radio FM4'
     _VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>4\w+)'
+    _API_STATION = 'fm4'
+    _LOOP_STATION = 'fm4'
 
     _TEST = {
         'url': 'http://fm4.orf.at/player/20170107/4CC',
@@ -223,10 +224,142 @@ class ORFFM4IE(ORFRadioIE):
     }
 
 
+class ORFNOEIE(ORFRadioIE):
+    IE_NAME = 'orf:noe'
+    IE_DESC = 'Radio Niederösterreich'
+    _VALID_URL = r'https?://(?P<station>noe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'noe'
+    _LOOP_STATION = 'oe2n'
+
+    _TEST = {
+        'url': 'https://noe.orf.at/player/20200423/NGM',
+        'only_matching': True,
+    }
+
+
+class ORFWIEIE(ORFRadioIE):
+    IE_NAME = 'orf:wien'
+    IE_DESC = 'Radio Wien'
+    _VALID_URL = r'https?://(?P<station>wien)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'wie'
+    _LOOP_STATION = 'oe2w'
+
+    _TEST = {
+        'url': 'https://wien.orf.at/player/20200423/WGUM',
+        'only_matching': True,
+    }
+
+
+class ORFBGLIE(ORFRadioIE):
+    IE_NAME = 'orf:burgenland'
+    IE_DESC = 'Radio Burgenland'
+    _VALID_URL = r'https?://(?P<station>burgenland)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'bgl'
+    _LOOP_STATION = 'oe2b'
+
+    _TEST = {
+        'url': 'https://burgenland.orf.at/player/20200423/BGM',
+        'only_matching': True,
+    }
+
+
+class ORFOOEIE(ORFRadioIE):
+    IE_NAME = 'orf:oberoesterreich'
+    IE_DESC = 'Radio Oberösterreich'
+    _VALID_URL = r'https?://(?P<station>ooe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'ooe'
+    _LOOP_STATION = 'oe2o'
+
+    _TEST = {
+        'url': 'https://ooe.orf.at/player/20200423/OGMO',
+        'only_matching': True,
+    }
+
+
+class ORFSTMIE(ORFRadioIE):
+    IE_NAME = 'orf:steiermark'
+    IE_DESC = 'Radio Steiermark'
+    _VALID_URL = r'https?://(?P<station>steiermark)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'stm'
+    _LOOP_STATION = 'oe2st'
+
+    _TEST = {
+        'url': 'https://steiermark.orf.at/player/20200423/STGMS',
+        'only_matching': True,
+    }
+
+
+class ORFKTNIE(ORFRadioIE):
+    IE_NAME = 'orf:kaernten'
+    IE_DESC = 'Radio Kärnten'
+    _VALID_URL = r'https?://(?P<station>kaernten)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'ktn'
+    _LOOP_STATION = 'oe2k'
+
+    _TEST = {
+        'url': 'https://kaernten.orf.at/player/20200423/KGUMO',
+        'only_matching': True,
+    }
+
+
+class ORFSBGIE(ORFRadioIE):
+    IE_NAME = 'orf:salzburg'
+    IE_DESC = 'Radio Salzburg'
+    _VALID_URL = r'https?://(?P<station>salzburg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'sbg'
+    _LOOP_STATION = 'oe2s'
+
+    _TEST = {
+        'url': 'https://salzburg.orf.at/player/20200423/SGUM',
+        'only_matching': True,
+    }
+
+
+class ORFTIRIE(ORFRadioIE):
+    IE_NAME = 'orf:tirol'
+    IE_DESC = 'Radio Tirol'
+    _VALID_URL = r'https?://(?P<station>tirol)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'tir'
+    _LOOP_STATION = 'oe2t'
+
+    _TEST = {
+        'url': 'https://tirol.orf.at/player/20200423/TGUMO',
+        'only_matching': True,
+    }
+
+
+class ORFVBGIE(ORFRadioIE):
+    IE_NAME = 'orf:vorarlberg'
+    IE_DESC = 'Radio Vorarlberg'
+    _VALID_URL = r'https?://(?P<station>vorarlberg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'vbg'
+    _LOOP_STATION = 'oe2v'
+
+    _TEST = {
+        'url': 'https://vorarlberg.orf.at/player/20200423/VGUM',
+        'only_matching': True,
+    }
+
+
+class ORFOE3IE(ORFRadioIE):
+    IE_NAME = 'orf:oe3'
+    IE_DESC = 'Radio Österreich 3'
+    _VALID_URL = r'https?://(?P<station>oe3)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'oe3'
+    _LOOP_STATION = 'oe3'
+
+    _TEST = {
+        'url': 'https://oe3.orf.at/player/20200424/3WEK',
+        'only_matching': True,
+    }
+
+
 class ORFOE1IE(ORFRadioIE):
     IE_NAME = 'orf:oe1'
     IE_DESC = 'Radio Österreich 1'
     _VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
+    _API_STATION = 'oe1'
+    _LOOP_STATION = 'oe1'
 
     _TEST = {
         'url': 'http://oe1.orf.at/player/20170108/456544',

From ce7db64bf1a558759be63e92f34550d7ba0e4052 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 5 May 2020 11:19:19 +0100
Subject: [PATCH 035/340] [uol] fix extraction(closes #22007)

---
 youtube_dl/extractor/uol.py | 139 ++++++++++++++++--------------------
 1 file changed, 62 insertions(+), 77 deletions(-)

diff --git a/youtube_dl/extractor/uol.py b/youtube_dl/extractor/uol.py
index 08f0c072e..628adf219 100644
--- a/youtube_dl/extractor/uol.py
+++ b/youtube_dl/extractor/uol.py
@@ -2,12 +2,17 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+    compat_urllib_parse_urlencode,
+)
 from ..utils import (
     clean_html,
     int_or_none,
     parse_duration,
+    parse_iso8601,
+    qualities,
     update_url_query,
-    str_or_none,
 )
 
 
@@ -16,21 +21,25 @@ class UOLIE(InfoExtractor):
     _VALID_URL = r'https?://(?:.+?\.)?uol\.com\.br/.*?(?:(?:mediaId|v)=|view/(?:[a-z0-9]+/)?|video(?:=|/(?:\d{4}/\d{2}/\d{2}/)?))(?P<id>\d+|[\w-]+-[A-Z0-9]+)'
     _TESTS = [{
         'url': 'http://player.mais.uol.com.br/player_video_v3.swf?mediaId=15951931',
-        'md5': '25291da27dc45e0afb5718a8603d3816',
+        'md5': '4f1e26683979715ff64e4e29099cf020',
         'info_dict': {
             'id': '15951931',
             'ext': 'mp4',
             'title': 'Miss simpatia é encontrada morta',
             'description': 'md5:3f8c11a0c0556d66daf7e5b45ef823b2',
+            'timestamp': 1470421860,
+            'upload_date': '20160805',
         }
     }, {
         'url': 'http://tvuol.uol.com.br/video/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326',
-        'md5': 'e41a2fb7b7398a3a46b6af37b15c00c9',
+        'md5': '2850a0e8dfa0a7307e04a96c5bdc5bc2',
         'info_dict': {
             'id': '15954259',
             'ext': 'mp4',
             'title': 'Incêndio destrói uma das maiores casas noturnas de Londres',
             'description': 'Em Londres, um incêndio destruiu uma das maiores boates da cidade. Não há informações sobre vítimas.',
+            'timestamp': 1470674520,
+            'upload_date': '20160808',
         }
     }, {
         'url': 'http://mais.uol.com.br/static/uolplayer/index.html?mediaId=15951931',
@@ -55,91 +64,55 @@ class UOLIE(InfoExtractor):
         'only_matching': True,
     }]
 
-    _FORMATS = {
-        '2': {
-            'width': 640,
-            'height': 360,
-        },
-        '5': {
-            'width': 1280,
-            'height': 720,
-        },
-        '6': {
-            'width': 426,
-            'height': 240,
-        },
-        '7': {
-            'width': 1920,
-            'height': 1080,
-        },
-        '8': {
-            'width': 192,
-            'height': 144,
-        },
-        '9': {
-            'width': 568,
-            'height': 320,
-        },
-        '11': {
-            'width': 640,
-            'height': 360,
-        }
-    }
-
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        media_id = None
-
-        if video_id.isdigit():
-            media_id = video_id
-
-        if not media_id:
-            embed_page = self._download_webpage(
-                'https://jsuol.com.br/c/tv/uol/embed/?params=[embed,%s]' % video_id,
-                video_id, 'Downloading embed page', fatal=False)
-            if embed_page:
-                media_id = self._search_regex(
-                    (r'uol\.com\.br/(\d+)', r'mediaId=(\d+)'),
-                    embed_page, 'media id', default=None)
-
-        if not media_id:
-            webpage = self._download_webpage(url, video_id)
-            media_id = self._search_regex(r'mediaId=(\d+)', webpage, 'media id')
 
         video_data = self._download_json(
-            'http://mais.uol.com.br/apiuol/v3/player/getMedia/%s.json' % media_id,
-            media_id)['item']
+            # https://api.mais.uol.com.br/apiuol/v4/player/data/[MEDIA_ID]
+            'https://api.mais.uol.com.br/apiuol/v3/media/detail/' + video_id,
+            video_id)['item']
+        media_id = compat_str(video_data['mediaId'])
         title = video_data['title']
+        ver = video_data.get('revision', 2)
 
-        query = {
-            'ver': video_data.get('numRevision', 2),
-            'r': 'http://mais.uol.com.br',
-        }
-        for k in ('token', 'sign'):
-            v = video_data.get(k)
-            if v:
-                query[k] = v
-
+        uol_formats = self._download_json(
+            'https://croupier.mais.uol.com.br/v3/formats/%s/jsonp' % media_id,
+            media_id)
+        quality = qualities(['mobile', 'WEBM', '360p', '720p', '1080p'])
         formats = []
-        for f in video_data.get('formats', []):
+        for format_id, f in uol_formats.items():
+            if not isinstance(f, dict):
+                continue
             f_url = f.get('url') or f.get('secureUrl')
             if not f_url:
                 continue
+            query = {
+                'ver': ver,
+                'r': 'http://mais.uol.com.br',
+            }
+            for k in ('token', 'sign'):
+                v = f.get(k)
+                if v:
+                    query[k] = v
             f_url = update_url_query(f_url, query)
-            format_id = str_or_none(f.get('id'))
-            if format_id == '10':
-                formats.extend(self._extract_m3u8_formats(
-                    f_url, video_id, 'mp4', 'm3u8_native',
-                    m3u8_id='hls', fatal=False))
+            format_id = format_id
+            if format_id == 'HLS':
+                m3u8_formats = self._extract_m3u8_formats(
+                    f_url, media_id, 'mp4', 'm3u8_native',
+                    m3u8_id='hls', fatal=False)
+                encoded_query = compat_urllib_parse_urlencode(query)
+                for m3u8_f in m3u8_formats:
+                    m3u8_f['extra_param_to_segment_url'] = encoded_query
+                    m3u8_f['url'] = update_url_query(m3u8_f['url'], query)
+                formats.extend(m3u8_formats)
                 continue
-            fmt = {
+            formats.append({
                 'format_id': format_id,
                 'url': f_url,
-                'source_preference': 1,
-            }
-            fmt.update(self._FORMATS.get(format_id, {}))
-            formats.append(fmt)
-        self._sort_formats(formats, ('height', 'width', 'source_preference', 'tbr', 'ext'))
+                'quality': quality(format_id),
+                'preference': -1,
+            })
+        self._sort_formats(formats)
 
         tags = []
         for tag in video_data.get('tags', []):
@@ -148,12 +121,24 @@ class UOLIE(InfoExtractor):
                 continue
             tags.append(tag_description)
 
+        thumbnails = []
+        for q in ('Small', 'Medium', 'Wmedium', 'Large', 'Wlarge', 'Xlarge'):
+            q_url = video_data.get('thumb' + q)
+            if not q_url:
+                continue
+            thumbnails.append({
+                'id': q,
+                'url': q_url,
+            })
+
         return {
             'id': media_id,
             'title': title,
-            'description': clean_html(video_data.get('desMedia')),
-            'thumbnail': video_data.get('thumbnail'),
-            'duration': int_or_none(video_data.get('durationSeconds')) or parse_duration(video_data.get('duration')),
+            'description': clean_html(video_data.get('description')),
+            'thumbnails': thumbnails,
+            'duration': parse_duration(video_data.get('duration')),
             'tags': tags,
             'formats': formats,
+            'timestamp': parse_iso8601(video_data.get('publishDate'), ' '),
+            'view_count': int_or_none(video_data.get('viewsQtty')),
         }

From f7b42518dc4c10d6027bfe7f5e7f186d526b1a4b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 20:01:25 +0700
Subject: [PATCH 036/340] [downloader/http] Finish downloading once received
 data length matches expected

Always do this if possible, i.e. if Content-Length or expected length is known, not only in test.
This will save unnecessary last extra loop trying to read 0 bytes.
---
 youtube_dl/downloader/http.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 3c72ea18b..970103a8d 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -299,7 +299,7 @@ class HttpFD(FileDownloader):
                     'elapsed': now - ctx.start_time,
                 })
 
-                if is_test and byte_counter == data_len:
+                if data_len is not None and byte_counter == data_len:
                     break
 
             if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:

From 6c907eb33ffd79ef9c4761c6460acb31b2eded46 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 5 May 2020 21:30:27 +0700
Subject: [PATCH 037/340] [downloader/http] Request last data block of exact
 remaining size

Always request last data block of exact size remaining to download if possible not the current block size.
---
 youtube_dl/downloader/http.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 970103a8d..5046878df 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -227,7 +227,7 @@ class HttpFD(FileDownloader):
             while True:
                 try:
                     # Download and write
-                    data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
+                    data_block = ctx.data.read(block_size if data_len is None else min(block_size, data_len - byte_counter))
                 # socket.timeout is a subclass of socket.error but may not have
                 # errno set
                 except socket.timeout as e:

From 30fa5c6087d2e5e7a2bfe395ffbb267d92959356 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 6 May 2020 23:20:14 +0700
Subject: [PATCH 038/340] [iprima] Improve extraction (closes #25138)

---
 youtube_dl/extractor/iprima.py | 32 ++++++++++++++++++++++++--------
 1 file changed, 24 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py
index 11bbeb592..53a550c11 100644
--- a/youtube_dl/extractor/iprima.py
+++ b/youtube_dl/extractor/iprima.py
@@ -16,12 +16,22 @@ class IPrimaIE(InfoExtractor):
     _GEO_BYPASS = False
 
     _TESTS = [{
-        'url': 'http://play.iprima.cz/gondici-s-r-o-33',
+        'url': 'https://prima.iprima.cz/particka/92-epizoda',
         'info_dict': {
-            'id': 'p136534',
+            'id': 'p51388',
             'ext': 'mp4',
-            'title': 'Gondíci s. r. o. (34)',
-            'description': 'md5:16577c629d006aa91f59ca8d8e7f99bd',
+            'title': 'Partička (92)',
+            'description': 'md5:859d53beae4609e6dd7796413f1b6cac',
+        },
+        'params': {
+            'skip_download': True,  # m3u8 download
+        },
+    }, {
+        'url': 'https://cnn.iprima.cz/videa/70-epizoda',
+        'info_dict': {
+            'id': 'p681554',
+            'ext': 'mp4',
+            'title': 'HLAVNÍ ZPRÁVY 3.5.2020',
         },
         'params': {
             'skip_download': True,  # m3u8 download
@@ -68,9 +78,15 @@ class IPrimaIE(InfoExtractor):
 
         webpage = self._download_webpage(url, video_id)
 
+        title = self._og_search_title(
+            webpage, default=None) or self._search_regex(
+            r'<h1>([^<]+)', webpage, 'title')
+
         video_id = self._search_regex(
             (r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)',
-             r'data-product="([^"]+)">'),
+             r'data-product="([^"]+)">',
+             r'id=["\']player-(p\d+)"',
+             r'playerId\s*:\s*["\']player-(p\d+)'),
             webpage, 'real id')
 
         playerpage = self._download_webpage(
@@ -125,8 +141,8 @@ class IPrimaIE(InfoExtractor):
 
         return {
             'id': video_id,
-            'title': self._og_search_title(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
+            'title': title,
+            'thumbnail': self._og_search_thumbnail(webpage, default=None),
             'formats': formats,
-            'description': self._og_search_description(webpage),
+            'description': self._og_search_description(webpage, default=None),
         }

From fa3db383330501f00fb0353a3989aef272b571f2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 8 May 2020 17:42:30 +0700
Subject: [PATCH 039/340] [youtube] Improve signature cipher extraction (closes
 #25188)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 5ea66c962..9f7483905 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1966,7 +1966,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 url = url_or_none(fmt.get('url'))
 
                 if not url:
-                    cipher = fmt.get('cipher')
+                    cipher = fmt.get('cipher') or fmt.get('signatureCipher')
                     if not cipher:
                         continue
                     url_data = compat_parse_qs(cipher)

From b74896dad1177a19307922d7ef0f3b0e60d8fb11 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 8 May 2020 18:07:05 +0700
Subject: [PATCH 040/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 200df7c03..fd68814db 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,26 @@
+version <unreleased>
+
+Core
+* [downloader/http] Request last data block of exact remaining size
+* [downloader/http] Finish downloading once received data length matches
+  expected
+* [extractor/common] Use compat_cookiejar_Cookie for _set_cookie to always
+  ensure cookie name and value are bytestrings on python 2 (#23256, #24776)
++ [compat] Introduce compat_cookiejar_Cookie
+* [utils] Improve cookie files support
+    + Add support for UTF-8 in cookie files
+    * Skip malformed cookie file entries instead of crashing (invalid entry
+      length, invalid expires at)
+
+Extractors
+* [youtube] Improve signature cipher extraction (#25187, #25188)
+* [iprima] Improve extraction (#25138)
+* [uol] Fix extraction (#22007)
++ [orf] Add support for more radio stations (#24938, #24968)
+* [dailymotion] Fix typo
+- [puhutv] Remove no longer available HTTP formats (#25124)
+
+
 version 2020.05.03
 
 Core

From b002bc433a0ba151b7c2f42ab6f1db9a27ed1ecd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 8 May 2020 18:10:37 +0700
Subject: [PATCH 041/340] release 2020.05.08

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          |  6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           |  6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      |  4 ++--
 ChangeLog                                        |  2 +-
 docs/supportedsites.md                           | 10 ++++++++++
 youtube_dl/version.py                            |  2 +-
 8 files changed, 24 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 487de9298..4999154e6 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.03
+ [debug] youtube-dl version 2020.05.08
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index da4b17db9..be994f368 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index e64e39516..f05326c8c 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 11ac95173..0dbb867cd 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.03
+ [debug] youtube-dl version 2020.05.08
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index c75c2a073..4b31c88ed 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.03**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index fd68814db..7805c62b6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.05.08
 
 Core
 * [downloader/http] Request last data block of exact remaining size
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 843dc2dc0..35c1050e5 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -620,11 +620,21 @@
  - **Ooyala**
  - **OoyalaExternal**
  - **OraTV**
+ - **orf:burgenland**: Radio Burgenland
  - **orf:fm4**: radio FM4
  - **orf:fm4:story**: fm4.orf.at stories
  - **orf:iptv**: iptv.ORF.at
+ - **orf:kaernten**: Radio Kärnten
+ - **orf:noe**: Radio Niederösterreich
+ - **orf:oberoesterreich**: Radio Oberösterreich
  - **orf:oe1**: Radio Österreich 1
+ - **orf:oe3**: Radio Österreich 3
+ - **orf:salzburg**: Radio Salzburg
+ - **orf:steiermark**: Radio Steiermark
+ - **orf:tirol**: Radio Tirol
  - **orf:tvthek**: ORF TVthek
+ - **orf:vorarlberg**: Radio Vorarlberg
+ - **orf:wien**: Radio Wien
  - **OsnatelTV**
  - **OutsideTV**
  - **PacktPub**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index f933eb8ec..b08ee126e 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.05.03'
+__version__ = '2020.05.08'

From 9d8f3a12a66efe99a005cf02d540c21a0963ceef Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 12 May 2020 20:48:16 +0100
Subject: [PATCH 042/340] [spike] fix Bellator mgid extraction(closes #25195)

---
 youtube_dl/extractor/spike.py | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/spike.py b/youtube_dl/extractor/spike.py
index 7c11ea7aa..aabff7a3c 100644
--- a/youtube_dl/extractor/spike.py
+++ b/youtube_dl/extractor/spike.py
@@ -8,15 +8,10 @@ class BellatorIE(MTVServicesInfoExtractor):
     _TESTS = [{
         'url': 'http://www.bellator.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
         'info_dict': {
-            'id': 'b55e434e-fde1-4a98-b7cc-92003a034de4',
-            'ext': 'mp4',
-            'title': 'Douglas Lima vs. Paul Daley - Round 1',
-            'description': 'md5:805a8dd29310fd611d32baba2f767885',
-        },
-        'params': {
-            # m3u8 download
-            'skip_download': True,
+            'title': 'Michael Page vs. Evangelista Cyborg',
+            'description': 'md5:0d917fc00ffd72dd92814963fc6cbb05',
         },
+        'playlist_count': 3,
     }, {
         'url': 'http://www.bellator.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
         'only_matching': True,
@@ -25,6 +20,9 @@ class BellatorIE(MTVServicesInfoExtractor):
     _FEED_URL = 'http://www.bellator.com/feeds/mrss/'
     _GEO_COUNTRIES = ['US']
 
+    def _extract_mgid(self, webpage):
+        return self._extract_triforce_mgid(webpage)
+
 
 class ParamountNetworkIE(MTVServicesInfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?paramountnetwork\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'

From 327593257c64b7f37440bd1ccea0e6935c30e68a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 14 May 2020 05:11:42 +0700
Subject: [PATCH 043/340] [bbccouk] PEP8

---
 youtube_dl/extractor/bbc.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index 901c5a54f..002c39c39 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -528,7 +528,7 @@ class BBCCoUkIE(InfoExtractor):
 
             def get_programme_id(item):
                 def get_from_attributes(item):
-                    for p in('identifier', 'group'):
+                    for p in ('identifier', 'group'):
                         value = item.get(p)
                         if value and re.match(r'^[pb][\da-z]{7}$', value):
                             return value

From adc13b0748784db87faef580929b1adadaff732e Mon Sep 17 00:00:00 2001
From: comsomisha <shmelev1996@mail.ru>
Date: Thu, 14 May 2020 01:51:40 +0300
Subject: [PATCH 044/340] [mailru] Fix extraction (closes #24530) (#25239)

---
 youtube_dl/extractor/mailru.py | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/youtube_dl/extractor/mailru.py b/youtube_dl/extractor/mailru.py
index 50234798b..65cc474db 100644
--- a/youtube_dl/extractor/mailru.py
+++ b/youtube_dl/extractor/mailru.py
@@ -128,6 +128,12 @@ class MailRuIE(InfoExtractor):
                 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id,
                 video_id, 'Downloading video JSON')
 
+        headers = {}
+
+        video_key = self._get_cookies('https://my.mail.ru').get('video_key')
+        if video_key:
+            headers['Cookie'] = 'video_key=%s' % video_key.value
+
         formats = []
         for f in video_data['videos']:
             video_url = f.get('url')
@@ -140,6 +146,7 @@ class MailRuIE(InfoExtractor):
                 'url': video_url,
                 'format_id': format_id,
                 'height': height,
+                'http_headers': headers,
             })
         self._sort_formats(formats)
 

From fae11394f0c51d02320cf4e173877cfc4ed5ddce Mon Sep 17 00:00:00 2001
From: TotalCaesar659 <14265316+TotalCaesar659@users.noreply.github.com>
Date: Thu, 14 May 2020 01:53:17 +0300
Subject: [PATCH 045/340] [README.md] flake8 HTTPS URL (#25230)

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 12dc00b3e..45326c69e 100644
--- a/README.md
+++ b/README.md
@@ -1032,7 +1032,7 @@ After you have ensured this site is distributing its content legally, you can fo
 5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
 7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
-8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](http://flake8.pycqa.org/en/latest/index.html#quickstart):
+8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
 
         $ flake8 youtube_dl/extractor/yourextractor.py
 

From 384bf91f8802805283f993714ace137e831ba45e Mon Sep 17 00:00:00 2001
From: Juan Francisco Cantero Hurtado <iam@juanfra.info>
Date: Thu, 14 May 2020 00:54:42 +0200
Subject: [PATCH 046/340] [youtube] Add support for yewtu.be (#25226)

---
 youtube_dl/extractor/youtube.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 9f7483905..2cf79e74d 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -388,6 +388,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                             (?:www\.)?invidious\.drycat\.fr/|
                             (?:www\.)?tube\.poal\.co/|
                             (?:www\.)?vid\.wxzm\.sx/|
+                            (?:www\.)?yewtu\.be/|
                             (?:www\.)?yt\.elukerio\.org/|
                             (?:www\.)?yt\.lelux\.fi/|
                             (?:www\.)?kgg2m7yk5aybusll\.onion/|

From b334732709a283a3a284e8835f4473c2a2be2827 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 15 May 2020 14:12:31 +0100
Subject: [PATCH 047/340] [soundcloud] reduce API playlist page limit(closes
 #25274)

---
 youtube_dl/extractor/soundcloud.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 422ce1626..d37c52543 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -559,7 +559,7 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
 class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
     def _extract_playlist(self, base_url, playlist_id, playlist_title):
         COMMON_QUERY = {
-            'limit': 2000000000,
+            'limit': 80000,
             'linked_partitioning': '1',
         }
 

From 52c50a10af5de16165621f8929b64dc726774276 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 15 May 2020 15:57:06 +0100
Subject: [PATCH 048/340] [vimeo] improve format extraction and sorting(closes
 #25285)

---
 youtube_dl/extractor/vimeo.py | 43 ++++++++++++++++++-----------------
 1 file changed, 22 insertions(+), 21 deletions(-)

diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 8cd611e1e..421795b94 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -140,28 +140,28 @@ class VimeoBaseInfoExtractor(InfoExtractor):
             })
 
         # TODO: fix handling of 308 status code returned for live archive manifest requests
+        sep_pattern = r'/sep/video/'
         for files_type in ('hls', 'dash'):
             for cdn_name, cdn_data in config_files.get(files_type, {}).get('cdns', {}).items():
                 manifest_url = cdn_data.get('url')
                 if not manifest_url:
                     continue
                 format_id = '%s-%s' % (files_type, cdn_name)
-                if files_type == 'hls':
-                    formats.extend(self._extract_m3u8_formats(
-                        manifest_url, video_id, 'mp4',
-                        'm3u8' if is_live else 'm3u8_native', m3u8_id=format_id,
-                        note='Downloading %s m3u8 information' % cdn_name,
-                        fatal=False))
-                elif files_type == 'dash':
-                    mpd_pattern = r'/%s/(?:sep/)?video/' % video_id
-                    mpd_manifest_urls = []
-                    if re.search(mpd_pattern, manifest_url):
-                        for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
-                            mpd_manifest_urls.append((format_id + suffix, re.sub(
-                                mpd_pattern, '/%s/%s/' % (video_id, repl), manifest_url)))
-                    else:
-                        mpd_manifest_urls = [(format_id, manifest_url)]
-                    for f_id, m_url in mpd_manifest_urls:
+                sep_manifest_urls = []
+                if re.search(sep_pattern, manifest_url):
+                    for suffix, repl in (('', 'video'), ('_sep', 'sep/video')):
+                        sep_manifest_urls.append((format_id + suffix, re.sub(
+                            sep_pattern, '/%s/' % repl, manifest_url)))
+                else:
+                    sep_manifest_urls = [(format_id, manifest_url)]
+                for f_id, m_url in sep_manifest_urls:
+                    if files_type == 'hls':
+                        formats.extend(self._extract_m3u8_formats(
+                            m_url, video_id, 'mp4',
+                            'm3u8' if is_live else 'm3u8_native', m3u8_id=f_id,
+                            note='Downloading %s m3u8 information' % cdn_name,
+                            fatal=False))
+                    elif files_type == 'dash':
                         if 'json=1' in m_url:
                             real_m_url = (self._download_json(m_url, video_id, fatal=False) or {}).get('url')
                             if real_m_url:
@@ -170,11 +170,6 @@ class VimeoBaseInfoExtractor(InfoExtractor):
                             m_url.replace('/master.json', '/master.mpd'), video_id, f_id,
                             'Downloading %s MPD information' % cdn_name,
                             fatal=False)
-                        for f in mpd_formats:
-                            if f.get('vcodec') == 'none':
-                                f['preference'] = -50
-                            elif f.get('acodec') == 'none':
-                                f['preference'] = -40
                         formats.extend(mpd_formats)
 
         live_archive = live_event.get('archive') or {}
@@ -186,6 +181,12 @@ class VimeoBaseInfoExtractor(InfoExtractor):
                 'preference': 1,
             })
 
+        for f in formats:
+            if f.get('vcodec') == 'none':
+                f['preference'] = -50
+            elif f.get('acodec') == 'none':
+                f['preference'] = -40
+
         subtitles = {}
         text_tracks = config['request'].get('text_tracks')
         if text_tracks:

From bf097a5077036d25e5c2ae2ea2d3b8c55bfdc83c Mon Sep 17 00:00:00 2001
From: Dave Loyall <dave@the-good-guys.net>
Date: Tue, 19 May 2020 14:11:05 -0500
Subject: [PATCH 049/340] [redtube] Improve title extraction (#25208)

---
 youtube_dl/extractor/redtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py
index b1bde1e81..deb3ad52c 100644
--- a/youtube_dl/extractor/redtube.py
+++ b/youtube_dl/extractor/redtube.py
@@ -57,7 +57,7 @@ class RedTubeIE(InfoExtractor):
 
         if not info.get('title'):
             info['title'] = self._html_search_regex(
-                (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
+                (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
                  r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
                 webpage, 'title', group='title',
                 default=None) or self._og_search_title(webpage)

From 9a269547f2268822724018330c8a939d3bdc2db4 Mon Sep 17 00:00:00 2001
From: tlsssl <63866177+tlsssl@users.noreply.github.com>
Date: Tue, 19 May 2020 19:13:06 +0000
Subject: [PATCH 050/340] [indavideo] Switch to HTTPS for API request (#25191)

---
 youtube_dl/extractor/indavideo.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
index 2b5b2b5b0..4c16243ec 100644
--- a/youtube_dl/extractor/indavideo.py
+++ b/youtube_dl/extractor/indavideo.py
@@ -58,7 +58,7 @@ class IndavideoEmbedIE(InfoExtractor):
         video_id = self._match_id(url)
 
         video = self._download_json(
-            'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
+            'https://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
             video_id)['data']
 
         title = video['title']

From 9cd5f54e31bcfde1f0491d2c7c3e2b467386f3d6 Mon Sep 17 00:00:00 2001
From: Rob <ankenyr@gmail.com>
Date: Tue, 19 May 2020 13:21:52 -0700
Subject: [PATCH 051/340] [utils] Fix file permissions in write_json_file
 (closes #12471) (#25122)

---
 youtube_dl/utils.py | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 112279ed7..d1eca3760 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -1837,6 +1837,12 @@ def write_json_file(obj, fn):
                 os.unlink(fn)
             except OSError:
                 pass
+        try:
+            mask = os.umask(0)
+            os.umask(mask)
+            os.chmod(tf.name, 0o666 & ~mask)
+        except OSError:
+            pass
         os.rename(tf.name, fn)
     except Exception:
         try:

From cd13343ad8c5f6e1cbd47ae0ae1eed00d27ff69a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 20 May 2020 03:39:41 +0700
Subject: [PATCH 052/340] [redtube] Improve formats extraction and extract m3u8
 formats (closes #25311, closes #25321)

---
 youtube_dl/extractor/redtube.py | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py
index deb3ad52c..2d2f6a98c 100644
--- a/youtube_dl/extractor/redtube.py
+++ b/youtube_dl/extractor/redtube.py
@@ -4,6 +4,7 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
+    determine_ext,
     ExtractorError,
     int_or_none,
     merge_dicts,
@@ -77,7 +78,7 @@ class RedTubeIE(InfoExtractor):
                     })
         medias = self._parse_json(
             self._search_regex(
-                r'mediaDefinition\s*:\s*(\[.+?\])', webpage,
+                r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
                 'media definitions', default='{}'),
             video_id, fatal=False)
         if medias and isinstance(medias, list):
@@ -85,6 +86,12 @@ class RedTubeIE(InfoExtractor):
                 format_url = url_or_none(media.get('videoUrl'))
                 if not format_url:
                     continue
+                if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
+                    formats.extend(self._extract_m3u8_formats(
+                        format_url, video_id, 'mp4',
+                        entry_protocol='m3u8_native', m3u8_id='hls',
+                        fatal=False))
+                    continue
                 format_id = media.get('quality')
                 formats.append({
                     'url': format_url,

From a54c5f83c01cb243a38c0dbc5981f97bd9224c46 Mon Sep 17 00:00:00 2001
From: Michael Klein <github@a98shuttle.de>
Date: Tue, 19 May 2020 23:08:08 +0200
Subject: [PATCH 053/340] [ard] Improve _VALID_URL (closes #25134) (#25198)

---
 youtube_dl/extractor/ard.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index 2f47e21c3..e23b71466 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -249,7 +249,7 @@ class ARDMediathekIE(ARDMediathekBaseIE):
 
 
 class ARDIE(InfoExtractor):
-    _VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
+    _VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos(?:extern)?/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
     _TESTS = [{
         # available till 14.02.2019
         'url': 'http://www.daserste.de/information/talk/maischberger/videos/das-groko-drama-zerlegen-sich-die-volksparteien-video-102.html',
@@ -263,6 +263,9 @@ class ARDIE(InfoExtractor):
             'upload_date': '20180214',
             'thumbnail': r're:^https?://.*\.jpg$',
         },
+    }, {
+        'url': 'https://www.daserste.de/information/reportage-dokumentation/erlebnis-erde/videosextern/woelfe-und-herdenschutzhunde-ungleiche-brueder-102.html',
+        'only_matching': True,
     }, {
         'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
         'only_matching': True,

From 8f841fafcd1d34112b14300cf76c78c270deacea Mon Sep 17 00:00:00 2001
From: JordanWeatherby <47519158+JordanWeatherby@users.noreply.github.com>
Date: Wed, 20 May 2020 22:30:50 +0100
Subject: [PATCH 054/340] [giantbomb] Extend _VALID_URL (#25222)

---
 youtube_dl/extractor/giantbomb.py | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/giantbomb.py b/youtube_dl/extractor/giantbomb.py
index 6a1b1e96e..c6477958d 100644
--- a/youtube_dl/extractor/giantbomb.py
+++ b/youtube_dl/extractor/giantbomb.py
@@ -13,10 +13,10 @@ from ..utils import (
 
 
 class GiantBombIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?:www\.)?giantbomb\.com/(?:videos|shows)/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
+    _TESTS = [{
         'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
-        'md5': 'c8ea694254a59246a42831155dec57ac',
+        'md5': '132f5a803e7e0ab0e274d84bda1e77ae',
         'info_dict': {
             'id': '2300-9782',
             'display_id': 'quick-look-destiny-the-dark-below',
@@ -26,7 +26,10 @@ class GiantBombIE(InfoExtractor):
             'duration': 2399,
             'thumbnail': r're:^https?://.*\.jpg$',
         }
-    }
+    }, {
+        'url': 'https://www.giantbomb.com/shows/ben-stranding/2970-20212',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)

From 2791e80b60e1ca1c36b83ca93be35e7e5418a1c3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 23 May 2020 12:26:21 +0700
Subject: [PATCH 055/340] [postprocessor/ffmpeg] Embed series metadata with
 --add-metadata

---
 youtube_dl/postprocessor/ffmpeg.py | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py
index fd3f921a8..5f7298345 100644
--- a/youtube_dl/postprocessor/ffmpeg.py
+++ b/youtube_dl/postprocessor/ffmpeg.py
@@ -447,6 +447,13 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
                         metadata[meta_f] = info[info_f]
                     break
 
+        # See [1-4] for some info on media metadata/metadata supported
+        # by ffmpeg.
+        # 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
+        # 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
+        # 3. https://kodi.wiki/view/Video_file_tagging
+        # 4. http://atomicparsley.sourceforge.net/mpeg-4files.html
+
         add('title', ('track', 'title'))
         add('date', 'upload_date')
         add(('description', 'comment'), 'description')
@@ -457,6 +464,10 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
         add('album')
         add('album_artist')
         add('disc', 'disc_number')
+        add('show', 'series')
+        add('season_number')
+        add('episode_id', ('episode', 'episode_id'))
+        add('episode_sort', 'episode_number')
 
         if not metadata:
             self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')

From 1db5ab6b34b07b0d186f9e3d22436795dc0747a2 Mon Sep 17 00:00:00 2001
From: "striker.sh" <19488257+strikersh@users.noreply.github.com>
Date: Tue, 26 May 2020 20:26:45 +0200
Subject: [PATCH 056/340] [youtube] Add support for more invidious instances
 (#25417)

---
 youtube_dl/extractor/youtube.py | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 2cf79e74d..fec17987b 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -391,6 +391,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                             (?:www\.)?yewtu\.be/|
                             (?:www\.)?yt\.elukerio\.org/|
                             (?:www\.)?yt\.lelux\.fi/|
+                            (?:www\.)?invidious\.ggc-project\.de/|
+                            (?:www\.)?yt\.maisputain\.ovh/|
+                            (?:www\.)?invidious\.13ad\.de/|
+                            (?:www\.)?invidious\.toot\.koeln/|
+                            (?:www\.)?invidious\.fdn\.fr/|
+                            (?:www\.)?watch\.nettohikari\.com/|
                             (?:www\.)?kgg2m7yk5aybusll\.onion/|
                             (?:www\.)?qklhadlycap4cnod\.onion/|
                             (?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion/|
@@ -398,6 +404,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                             (?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion/|
                             (?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion/|
                             (?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p/|
+                            (?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion/|
                             youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
                          (?:                                                  # the various things that can precede the ID:

From fe515e5c75bf8be6024893a78ce6c24364bbc1dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 29 May 2020 02:01:51 +0700
Subject: [PATCH 057/340] [ard:beta] Extend _VALID_URL (closes #25405)

---
 youtube_dl/extractor/ard.py | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index e23b71466..5b7b2dd6d 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -313,9 +313,9 @@ class ARDIE(InfoExtractor):
 
 
 class ARDBetaMediathekIE(ARDMediathekBaseIE):
-    _VALID_URL = r'https://(?:beta|www)\.ardmediathek\.de/(?P<client>[^/]+)/(?:player|live)/(?P<video_id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^/?#]+))?'
+    _VALID_URL = r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?:player|live|video)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
     _TESTS = [{
-        'url': 'https://beta.ardmediathek.de/ard/player/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE/die-robuste-roswita',
+        'url': 'https://ardmediathek.de/ard/video/die-robuste-roswita/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
         'md5': 'dfdc87d2e7e09d073d5a80770a9ce88f',
         'info_dict': {
             'display_id': 'die-robuste-roswita',
@@ -328,6 +328,15 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
             'upload_date': '20191222',
             'ext': 'mp4',
         },
+    }, {
+        'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
+        'only_matching': True,
+    }, {
+        'url': 'https://ardmediathek.de/ard/video/saartalk/saartalk-gesellschaftsgift-haltung-gegen-hass/sr-fernsehen/Y3JpZDovL3NyLW9ubGluZS5kZS9TVF84MTY4MA/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.ardmediathek.de/ard/video/trailer/private-eyes-s01-e01/one/Y3JpZDovL3dkci5kZS9CZWl0cmFnLTE1MTgwYzczLWNiMTEtNGNkMS1iMjUyLTg5MGYzOWQxZmQ1YQ/',
+        'only_matching': True,
     }, {
         'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3N3ci5kZS9hZXgvbzEwNzE5MTU/',
         'only_matching': True,
@@ -339,7 +348,11 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE):
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         video_id = mobj.group('video_id')
-        display_id = mobj.group('display_id') or video_id
+        display_id = mobj.group('display_id')
+        if display_id:
+            display_id = display_id.rstrip('/')
+        if not display_id:
+            display_id = video_id
 
         player_page = self._download_json(
             'https://api.ardmediathek.de/public-gateway',

From efd72b05d212f914315f6520cc831a0fbea26f6c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 29 May 2020 03:28:44 +0700
Subject: [PATCH 058/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 7805c62b6..e174aad58 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,25 @@
+version <unreleased>
+
+Core
+* [postprocessor/ffmpeg] Embed series metadata with --add-metadata
+* [utils] Fix file permissions in write_json_file (#12471, #25122)
+
+Extractors
+* [ard:beta] Extend URL regular expression (#25405)
++ [youtube] Add support for more invidious instances (#25417)
+* [giantbomb] Extend URL regular expression (#25222)
+* [ard] Improve URL regular expression (#25134, #25198)
+* [redtube] Improve formats extraction and extract m3u8 formats (#25311,
+  #25321)
+* [indavideo] Switch to HTTPS for API request (#25191)
+* [redtube] Improve title extraction (#25208)
+* [vimeo] Improve format extraction and sorting (#25285)
+* [soundcloud] Reduce API playlist page limit (#25274)
++ [youtube] Add support for yewtu.be (#25226)
+* [mailru] Fix extraction (#24530, #25239)
+* [bellator] Fix mgid extraction (#25195)
+
+
 version 2020.05.08
 
 Core

From 228c1d685bb6d35b2c1a73e1adbc085c76da6b75 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 29 May 2020 03:33:13 +0700
Subject: [PATCH 059/340] release 2020.05.29

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 CONTRIBUTING.md                                  | 2 +-
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 8 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 4999154e6..09bf763cd 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.08
+ [debug] youtube-dl version 2020.05.29
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index be994f368..dc9b67cc8 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index f05326c8c..129ca0a02 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 0dbb867cd..40e53bcae 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.08
+ [debug] youtube-dl version 2020.05.29
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 4b31c88ed..619a45f19 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.08. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.08**
+- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ac759ddc4..58ab3a4b8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -153,7 +153,7 @@ After you have ensured this site is distributing its content legally, you can fo
 5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
 6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in.
 7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/ytdl-org/youtube-dl/blob/7f41a598b3fba1bcab2817de64a08941200aa3c8/youtube_dl/extractor/common.py#L94-L303). Add tests and code for as many as you want.
-8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](http://flake8.pycqa.org/en/latest/index.html#quickstart):
+8. Make sure your code follows [youtube-dl coding conventions](#youtube-dl-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
 
         $ flake8 youtube_dl/extractor/yourextractor.py
 
diff --git a/ChangeLog b/ChangeLog
index e174aad58..c13035c89 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.05.29
 
 Core
 * [postprocessor/ffmpeg] Embed series metadata with --add-metadata
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index b08ee126e..966fb3aa9 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.05.08'
+__version__ = '2020.05.29'

From bef4688c72f9bdb37f261cc2699d6915a5593edc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 31 May 2020 11:10:31 +0700
Subject: [PATCH 060/340] [jwplatform] Improve embeds extraction (closes
 #25467)

---
 youtube_dl/extractor/jwplatform.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/jwplatform.py b/youtube_dl/extractor/jwplatform.py
index 2aabd98b5..dfa07e423 100644
--- a/youtube_dl/extractor/jwplatform.py
+++ b/youtube_dl/extractor/jwplatform.py
@@ -32,7 +32,7 @@ class JWPlatformIE(InfoExtractor):
     @staticmethod
     def _extract_urls(webpage):
         return re.findall(
-            r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//content\.jwplatform\.com/players/[a-zA-Z0-9]{8})',
+            r'<(?:script|iframe)[^>]+?src=["\']((?:https?:)?//(?:content\.jwplatform|cdn\.jwplayer)\.com/players/[a-zA-Z0-9]{8})',
             webpage)
 
     def _real_extract(self, url):

From 7016e24ebea09c717f413273cda16721c4098325 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 1 Jun 2020 20:31:51 +0700
Subject: [PATCH 061/340] [periscope] Fix untitled broadcasts (#25482)

---
 youtube_dl/extractor/periscope.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py
index c02e34aba..b15906390 100644
--- a/youtube_dl/extractor/periscope.py
+++ b/youtube_dl/extractor/periscope.py
@@ -18,7 +18,7 @@ class PeriscopeBaseIE(InfoExtractor):
             item_id, query=query)
 
     def _parse_broadcast_data(self, broadcast, video_id):
-        title = broadcast['status']
+        title = broadcast.get('status') or 'Periscope Broadcast'
         uploader = broadcast.get('user_display_name') or broadcast.get('username')
         title = '%s - %s' % (uploader, title) if uploader else title
         is_live = broadcast.get('state').lower() == 'running'

From 7b0b53ea69ad32d0bf6987633e56f68da3913caa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 1 Jun 2020 20:32:57 +0700
Subject: [PATCH 062/340] [twitter:broadcast] Add untitled periscope broadcast
 test

---
 youtube_dl/extractor/twitter.py | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/youtube_dl/extractor/twitter.py b/youtube_dl/extractor/twitter.py
index 01468981c..4284487db 100644
--- a/youtube_dl/extractor/twitter.py
+++ b/youtube_dl/extractor/twitter.py
@@ -578,6 +578,18 @@ class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
     IE_NAME = 'twitter:broadcast'
     _VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
 
+    _TEST = {
+        # untitled Periscope video
+        'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
+        'info_dict': {
+            'id': '1yNGaQLWpejGj',
+            'ext': 'mp4',
+            'title': 'Andrea May Sahouri - Periscope Broadcast',
+            'uploader': 'Andrea May Sahouri',
+            'uploader_id': '1PXEdBZWpGwKe',
+        },
+    }
+
     def _real_extract(self, url):
         broadcast_id = self._match_id(url)
         broadcast = self._call_api(

From d5147b65ac3d81b5acb4aa2c4aca008ffd3e7c06 Mon Sep 17 00:00:00 2001
From: Matej Dujava <mdujava@gmail.com>
Date: Mon, 1 Jun 2020 16:11:31 +0200
Subject: [PATCH 063/340] [malltv] Add support for sk.mall.tv (#25445)

---
 youtube_dl/extractor/malltv.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/malltv.py b/youtube_dl/extractor/malltv.py
index e13c2e11a..6f4fd927f 100644
--- a/youtube_dl/extractor/malltv.py
+++ b/youtube_dl/extractor/malltv.py
@@ -8,7 +8,7 @@ from ..utils import merge_dicts
 
 
 class MallTVIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _VALID_URL = r'https?://(?:(?:www|sk)\.)?mall\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
     _TESTS = [{
         'url': 'https://www.mall.tv/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
         'md5': '1c4a37f080e1f3023103a7b43458e518',
@@ -26,6 +26,9 @@ class MallTVIE(InfoExtractor):
     }, {
         'url': 'https://www.mall.tv/kdo-to-plati/18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
         'only_matching': True,
+    }, {
+        'url': 'https://sk.mall.tv/gejmhaus/reklamacia-nehreje-vyrobnik-tepla-alebo-spekacka',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From b4eb0bc7bd2524a63e3a6441fe82a6cfd8ebc365 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 5 Jun 2020 23:33:14 +0700
Subject: [PATCH 064/340] [brightcove] Fix subtitles extraction (closes #25540)

---
 youtube_dl/extractor/brightcove.py | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 85001b3ad..462815317 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -25,9 +25,11 @@ from ..utils import (
     int_or_none,
     parse_iso8601,
     smuggle_url,
+    str_or_none,
     unescapeHTML,
     unsmuggle_url,
     update_url_query,
+    url_or_none,
     clean_html,
     mimetype2ext,
     UnsupportedError,
@@ -553,10 +555,16 @@ class BrightcoveNewIE(AdobePassIE):
 
         subtitles = {}
         for text_track in json_data.get('text_tracks', []):
-            if text_track.get('src'):
-                subtitles.setdefault(text_track.get('srclang'), []).append({
-                    'url': text_track['src'],
-                })
+            if text_track.get('kind') != 'captions':
+                continue
+            text_track_url = url_or_none(text_track.get('src'))
+            if not text_track_url:
+                continue
+            lang = (str_or_none(text_track.get('srclang'))
+                    or str_or_none(text_track.get('label')) or 'en').lower()
+            subtitles.setdefault(lang, []).append({
+                'url': text_track_url,
+            })
 
         is_live = False
         duration = float_or_none(json_data.get('duration'), 1000)

From c8b232cc48858713d9f5c88300ffcbd022d740b2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 5 Jun 2020 23:35:57 +0700
Subject: [PATCH 065/340] [brightcove] Sort imports

---
 youtube_dl/extractor/brightcove.py | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 462815317..5c22a730d 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -5,34 +5,34 @@ import base64
 import re
 import struct
 
-from .common import InfoExtractor
 from .adobepass import AdobePassIE
+from .common import InfoExtractor
 from ..compat import (
     compat_etree_fromstring,
+    compat_HTTPError,
     compat_parse_qs,
     compat_urllib_parse_urlparse,
     compat_urlparse,
     compat_xml_parse_error,
-    compat_HTTPError,
 )
 from ..utils import (
-    ExtractorError,
+    clean_html,
     extract_attributes,
+    ExtractorError,
     find_xpath_attr,
     fix_xml_ampersands,
     float_or_none,
-    js_to_json,
     int_or_none,
+    js_to_json,
+    mimetype2ext,
     parse_iso8601,
     smuggle_url,
     str_or_none,
     unescapeHTML,
     unsmuggle_url,
+    UnsupportedError,
     update_url_query,
     url_or_none,
-    clean_html,
-    mimetype2ext,
-    UnsupportedError,
 )
 
 

From a0455d0ffd93b069b8ab1aa95b7fa7d0bc526302 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 00:12:47 +0700
Subject: [PATCH 066/340] [twitch] Pass v5 accept header and fix thumbnails
 extraction (closes #25531)

---
 youtube_dl/extractor/twitch.py | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 78ee0115c..45b8a7236 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -21,6 +21,7 @@ from ..utils import (
     orderedSet,
     parse_duration,
     parse_iso8601,
+    qualities,
     try_get,
     unified_timestamp,
     update_url_query,
@@ -50,7 +51,10 @@ class TwitchBaseIE(InfoExtractor):
 
     def _call_api(self, path, item_id, *args, **kwargs):
         headers = kwargs.get('headers', {}).copy()
-        headers['Client-ID'] = self._CLIENT_ID
+        headers.update({
+            'Accept': 'application/vnd.twitchtv.v5+json; charset=UTF-8',
+            'Client-ID': self._CLIENT_ID,
+        })
         kwargs['headers'] = headers
         response = self._download_json(
             '%s/%s' % (self._API_BASE, path), item_id,
@@ -186,12 +190,27 @@ class TwitchItemBaseIE(TwitchBaseIE):
             is_live = False
         else:
             is_live = None
+        _QUALITIES = ('small', 'medium', 'large')
+        quality_key = qualities(_QUALITIES)
+        thumbnails = []
+        preview = info.get('preview')
+        if isinstance(preview, dict):
+            for thumbnail_id, thumbnail_url in preview.items():
+                thumbnail_url = url_or_none(thumbnail_url)
+                if not thumbnail_url:
+                    continue
+                if thumbnail_id not in _QUALITIES:
+                    continue
+                thumbnails.append({
+                    'url': thumbnail_url,
+                    'preference': quality_key(thumbnail_id),
+                })
         return {
             'id': info['_id'],
             'title': info.get('title') or 'Untitled Broadcast',
             'description': info.get('description'),
             'duration': int_or_none(info.get('length')),
-            'thumbnail': info.get('preview'),
+            'thumbnails': thumbnails,
             'uploader': info.get('channel', {}).get('display_name'),
             'uploader_id': info.get('channel', {}).get('name'),
             'timestamp': parse_iso8601(info.get('recorded_at')),

From ce3735df0270ef4dfd86a527c4d0edff822dd920 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 00:55:29 +0700
Subject: [PATCH 067/340] [twitch:stream] Fix extraction (closes #25528)

---
 youtube_dl/extractor/twitch.py | 29 ++++++++++++++++++-----------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 45b8a7236..4cd5f0db4 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -22,6 +22,7 @@ from ..utils import (
     parse_duration,
     parse_iso8601,
     qualities,
+    str_or_none,
     try_get,
     unified_timestamp,
     update_url_query,
@@ -591,10 +592,18 @@ class TwitchStreamIE(TwitchBaseIE):
                 else super(TwitchStreamIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        channel_id = self._match_id(url)
+        channel_name = self._match_id(url)
+
+        access_token = self._call_api(
+            'api/channels/%s/access_token' % channel_name, channel_name,
+            'Downloading access token JSON')
+
+        token = access_token['token']
+        channel_id = compat_str(self._parse_json(
+            token, channel_name)['channel_id'])
 
         stream = self._call_api(
-            'kraken/streams/%s?stream_type=all' % channel_id.lower(),
+            'kraken/streams/%s?stream_type=all' % channel_id,
             channel_id, 'Downloading stream JSON').get('stream')
 
         if not stream:
@@ -604,11 +613,9 @@ class TwitchStreamIE(TwitchBaseIE):
         # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
         # an invalid m3u8 URL. Working around by use of original channel name from stream
         # JSON and fallback to lowercase if it's not available.
-        channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
-
-        access_token = self._call_api(
-            'api/channels/%s/access_token' % channel_id, channel_id,
-            'Downloading channel access token')
+        channel_name = try_get(
+            stream, lambda x: x['channel']['name'],
+            compat_str) or channel_name.lower()
 
         query = {
             'allow_source': 'true',
@@ -619,11 +626,11 @@ class TwitchStreamIE(TwitchBaseIE):
             'playlist_include_framerate': 'true',
             'segment_preference': '4',
             'sig': access_token['sig'].encode('utf-8'),
-            'token': access_token['token'].encode('utf-8'),
+            'token': token.encode('utf-8'),
         }
         formats = self._extract_m3u8_formats(
             '%s/api/channel/hls/%s.m3u8?%s'
-            % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
+            % (self._USHER_BASE, channel_name, compat_urllib_parse_urlencode(query)),
             channel_id, 'mp4')
         self._prefer_source(formats)
 
@@ -646,8 +653,8 @@ class TwitchStreamIE(TwitchBaseIE):
             })
 
         return {
-            'id': compat_str(stream['_id']),
-            'display_id': channel_id,
+            'id': str_or_none(stream.get('_id')) or channel_id,
+            'display_id': channel_name,
             'title': title,
             'description': description,
             'thumbnails': thumbnails,

From b37e47a3f980c2470882ec83dda43c8166ddb3cd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 00:57:40 +0700
Subject: [PATCH 068/340] [twitch:stream] Expect 400 and 410 HTTP errors from
 API

---
 youtube_dl/extractor/twitch.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 4cd5f0db4..e211cd4c8 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -56,7 +56,10 @@ class TwitchBaseIE(InfoExtractor):
             'Accept': 'application/vnd.twitchtv.v5+json; charset=UTF-8',
             'Client-ID': self._CLIENT_ID,
         })
-        kwargs['headers'] = headers
+        kwargs.update({
+            'headers': headers,
+            'expected_status': (400, 410),
+        })
         response = self._download_json(
             '%s/%s' % (self._API_BASE, path), item_id,
             *args, **compat_kwargs(kwargs))

From 0b1eaec3bc7f974fba4dae742516153e2f9b7562 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 01:35:09 +0700
Subject: [PATCH 069/340] [tele5] Prefer jwplatform over nexx (closes #25533)

---
 youtube_dl/extractor/tele5.py | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/youtube_dl/extractor/tele5.py b/youtube_dl/extractor/tele5.py
index 364556a1f..c209eb04f 100644
--- a/youtube_dl/extractor/tele5.py
+++ b/youtube_dl/extractor/tele5.py
@@ -6,14 +6,8 @@ import re
 from .common import InfoExtractor
 from .jwplatform import JWPlatformIE
 from .nexx import NexxIE
-from ..compat import (
-    compat_str,
-    compat_urlparse,
-)
-from ..utils import (
-    NO_DEFAULT,
-    try_get,
-)
+from ..compat import compat_urlparse
+from ..utils import NO_DEFAULT
 
 
 class Tele5IE(InfoExtractor):
@@ -30,6 +24,21 @@ class Tele5IE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
+    }, {
+        # jwplatform, nexx unavailable
+        'url': 'https://www.tele5.de/filme/ghoul-das-geheimnis-des-friedhofmonsters/',
+        'info_dict': {
+            'id': 'WJuiOlUp',
+            'ext': 'mp4',
+            'upload_date': '20200603',
+            'timestamp': 1591214400,
+            'title': 'Ghoul - Das Geheimnis des Friedhofmonsters',
+            'description': 'md5:42002af1d887ff3d5b2b3ca1f8137d97',
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'add_ie': [JWPlatformIE.ie_key()],
     }, {
         'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191',
         'only_matching': True,
@@ -88,15 +97,6 @@ class Tele5IE(InfoExtractor):
             if not jwplatform_id:
                 jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')
 
-            media = self._download_json(
-                'https://cdn.jwplayer.com/v2/media/' + jwplatform_id,
-                display_id)
-            nexx_id = try_get(
-                media, lambda x: x['playlist'][0]['nexx_id'], compat_str)
-
-            if nexx_id:
-                return nexx_result(nexx_id)
-
         return self.url_result(
             'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
             video_id=jwplatform_id)

From b77888228d605a73f85d367845cf50609b855b62 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 01:44:36 +0700
Subject: [PATCH 070/340] [jwplatform] Add support for bypass geo restriction

---
 youtube_dl/extractor/jwplatform.py | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/youtube_dl/extractor/jwplatform.py b/youtube_dl/extractor/jwplatform.py
index dfa07e423..c34b5f5e6 100644
--- a/youtube_dl/extractor/jwplatform.py
+++ b/youtube_dl/extractor/jwplatform.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..utils import unsmuggle_url
 
 
 class JWPlatformIE(InfoExtractor):
@@ -36,6 +37,10 @@ class JWPlatformIE(InfoExtractor):
             webpage)
 
     def _real_extract(self, url):
+        url, smuggled_data = unsmuggle_url(url, {})
+        self._initialize_geo_bypass({
+            'countries': smuggled_data.get('geo_countries'),
+        })
         video_id = self._match_id(url)
         json_data = self._download_json('https://cdn.jwplayer.com/v2/media/' + video_id, video_id)
         return self._parse_jwplayer_data(json_data, video_id)

From a5b6102ea893d6943f9ffa9fc0677229c56c99ca Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 01:45:05 +0700
Subject: [PATCH 071/340] [tele5] Bypass geo restriction

---
 youtube_dl/extractor/tele5.py | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/tele5.py b/youtube_dl/extractor/tele5.py
index c209eb04f..3e1a7a9e6 100644
--- a/youtube_dl/extractor/tele5.py
+++ b/youtube_dl/extractor/tele5.py
@@ -7,11 +7,15 @@ from .common import InfoExtractor
 from .jwplatform import JWPlatformIE
 from .nexx import NexxIE
 from ..compat import compat_urlparse
-from ..utils import NO_DEFAULT
+from ..utils import (
+    NO_DEFAULT,
+    smuggle_url,
+)
 
 
 class Tele5IE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _GEO_COUNTRIES = ['DE']
     _TESTS = [{
         'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416',
         'info_dict': {
@@ -98,5 +102,7 @@ class Tele5IE(InfoExtractor):
                 jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')
 
         return self.url_result(
-            'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
-            video_id=jwplatform_id)
+            smuggle_url(
+                'jwplatform:%s' % jwplatform_id,
+                {'geo_countries': self._GEO_COUNTRIES}),
+            ie=JWPlatformIE.ie_key(), video_id=jwplatform_id)

From 607d204551aa0def292383c2870fba2afca096da Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 01:49:27 +0700
Subject: [PATCH 072/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index c13035c89..03b05ca28 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+version <unreleased>
+
+Extractors
+* [tele5] Bypass geo restriction
++ [jwplatform] Add support for bypass geo restriction
+* [tele5] Prefer jwplatform over nexx (#25533)
+* [twitch:stream] Expect 400 and 410 HTTP errors from API
+* [twitch:stream] Fix extraction (#25528)
+* [twitch] Fix thumbnails extraction (#25531)
++ [twitch] Pass v5 Accept HTTP header (#25531)
+* [brightcove] Fix subtitles extraction (#25540)
++ [malltv] Add support for sk.mall.tv (#25445)
+* [periscope] Fix untitled broadcasts (#25482)
+* [jwplatform] Improve embeds extraction (#25467)
+
+
 version 2020.05.29
 
 Core

From e1723c4bac4e465991789b5a29beb946d872f508 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 01:51:39 +0700
Subject: [PATCH 073/340] release 2020.06.06

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 09bf763cd..3fe1b1a33 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.29
+ [debug] youtube-dl version 2020.06.06
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index dc9b67cc8..e9f4b880c 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 129ca0a02..bbd34ecab 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 40e53bcae..4299474fa 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.05.29
+ [debug] youtube-dl version 2020.06.06
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 619a45f19..c9ccc7010 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.05.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.05.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 03b05ca28..f439f29e0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.06.06
 
 Extractors
 * [tele5] Bypass geo restriction
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 966fb3aa9..30f31f888 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.05.29'
+__version__ = '2020.06.06'

From 562de77f41d0c08df9dbb08cfa86ba6c7d239c5a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 02:14:35 +0700
Subject: [PATCH 074/340] [kaltura] Add support for multiple embeds on a
 webpage (closes #25523)

---
 youtube_dl/extractor/generic.py | 18 +++++++++++++++---
 youtube_dl/extractor/kaltura.py | 19 +++++++++++++------
 2 files changed, 28 insertions(+), 9 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index ce8252f6a..355067a50 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -1708,6 +1708,15 @@ class GenericIE(InfoExtractor):
             },
             'add_ie': ['Kaltura'],
         },
+        {
+            # multiple kaltura embeds, nsfw
+            'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
+            'info_dict': {
+                'id': 'kamila-avec-video-jaime-sadomie',
+                'title': "Kamila avec vídeo “J'aime sadomie”",
+            },
+            'playlist_count': 8,
+        },
         {
             # Non-standard Vimeo embed
             'url': 'https://openclassrooms.com/courses/understanding-the-web',
@@ -2844,9 +2853,12 @@ class GenericIE(InfoExtractor):
             return self.url_result(mobj.group('url'), 'Zapiks')
 
         # Look for Kaltura embeds
-        kaltura_url = KalturaIE._extract_url(webpage)
-        if kaltura_url:
-            return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
+        kaltura_urls = KalturaIE._extract_urls(webpage)
+        if kaltura_urls:
+            return self.playlist_from_matches(
+                kaltura_urls, video_id, video_title,
+                getter=lambda x: smuggle_url(x, {'source_url': url}),
+                ie=KalturaIE.ie_key())
 
         # Look for EaglePlatform embeds
         eagleplatform_url = EaglePlatformIE._extract_url(webpage)
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index 2d38b758b..49d13460d 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -113,9 +113,14 @@ class KalturaIE(InfoExtractor):
 
     @staticmethod
     def _extract_url(webpage):
+        urls = KalturaIE._extract_urls(webpage)
+        return urls[0] if urls else None
+
+    @staticmethod
+    def _extract_urls(webpage):
         # Embed codes: https://knowledge.kaltura.com/embedding-kaltura-media-players-your-site
-        mobj = (
-            re.search(
+        finditer = (
+            re.finditer(
                 r"""(?xs)
                     kWidget\.(?:thumb)?[Ee]mbed\(
                     \{.*?
@@ -124,7 +129,7 @@ class KalturaIE(InfoExtractor):
                         (?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
                         (?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
                 """, webpage)
-            or re.search(
+            or re.finditer(
                 r'''(?xs)
                     (?P<q1>["'])
                         (?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
@@ -138,7 +143,7 @@ class KalturaIE(InfoExtractor):
                     )
                     (?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
                 ''', webpage)
-            or re.search(
+            or re.finditer(
                 r'''(?xs)
                     <(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
                       (?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
@@ -148,7 +153,8 @@ class KalturaIE(InfoExtractor):
                     (?P=q1)
                 ''', webpage)
         )
-        if mobj:
+        urls = []
+        for mobj in finditer:
             embed_info = mobj.groupdict()
             for k, v in embed_info.items():
                 if v:
@@ -160,7 +166,8 @@ class KalturaIE(InfoExtractor):
                 webpage)
             if service_mobj:
                 url = smuggle_url(url, {'service_url': service_mobj.group('id')})
-            return url
+            urls.append(url)
+        return urls
 
     def _kaltura_api_call(self, video_id, actions, service_url=None, *args, **kwargs):
         params = actions[0]

From 84213ea8d41d5fe1608333a16ac578dccdf9a915 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 6 Jun 2020 04:16:31 +0700
Subject: [PATCH 075/340] [youtube] Extract chapters from JSON (closes #24819)

---
 test/test_youtube_chapters.py   |  2 +-
 youtube_dl/extractor/youtube.py | 63 +++++++++++++++++++++++++++++++--
 2 files changed, 62 insertions(+), 3 deletions(-)

diff --git a/test/test_youtube_chapters.py b/test/test_youtube_chapters.py
index 324ca8525..e69c57377 100644
--- a/test/test_youtube_chapters.py
+++ b/test/test_youtube_chapters.py
@@ -267,7 +267,7 @@ class TestYoutubeChapters(unittest.TestCase):
         for description, duration, expected_chapters in self._TEST_CASES:
             ie = YoutubeIE()
             expect_value(
-                self, ie._extract_chapters(description, duration),
+                self, ie._extract_chapters_from_description(description, duration),
                 expected_chapters, None)
 
 
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index fec17987b..54ec76db5 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1652,8 +1652,63 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         video_id = mobj.group(2)
         return video_id
 
+    def _extract_chapters_from_json(self, webpage, video_id, duration):
+        if not webpage:
+            return
+        player = self._parse_json(
+            self._search_regex(
+                r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
+                'player args', default='{}'),
+            video_id, fatal=False)
+        if not player or not isinstance(player, dict):
+            return
+        watch_next_response = player.get('watch_next_response')
+        if not isinstance(watch_next_response, compat_str):
+            return
+        response = self._parse_json(watch_next_response, video_id, fatal=False)
+        if not response or not isinstance(response, dict):
+            return
+        chapters_list = try_get(
+            response,
+            lambda x: x['playerOverlays']
+                       ['playerOverlayRenderer']
+                       ['decoratedPlayerBarRenderer']
+                       ['decoratedPlayerBarRenderer']
+                       ['playerBar']
+                       ['chapteredPlayerBarRenderer']
+                       ['chapters'],
+            list)
+        if not chapters_list:
+            return
+
+        def chapter_time(chapter):
+            return float_or_none(
+                try_get(
+                    chapter,
+                    lambda x: x['chapterRenderer']['timeRangeStartMillis'],
+                    int),
+                scale=1000)
+        chapters = []
+        for next_num, chapter in enumerate(chapters_list, start=1):
+            start_time = chapter_time(chapter)
+            if start_time is None:
+                continue
+            end_time = (chapter_time(chapters_list[next_num])
+                        if next_num < len(chapters_list) else duration)
+            if end_time is None:
+                continue
+            title = try_get(
+                chapter, lambda x: x['chapterRenderer']['title']['simpleText'],
+                compat_str)
+            chapters.append({
+                'start_time': start_time,
+                'end_time': end_time,
+                'title': title,
+            })
+        return chapters
+
     @staticmethod
-    def _extract_chapters(description, duration):
+    def _extract_chapters_from_description(description, duration):
         if not description:
             return None
         chapter_lines = re.findall(
@@ -1687,6 +1742,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             })
         return chapters
 
+    def _extract_chapters(self, webpage, description, video_id, duration):
+        return (self._extract_chapters_from_json(webpage, video_id, duration)
+                or self._extract_chapters_from_description(description, duration))
+
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
 
@@ -2324,7 +2383,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                     errnote='Unable to download video annotations', fatal=False,
                     data=urlencode_postdata({xsrf_field_name: xsrf_token}))
 
-        chapters = self._extract_chapters(description_original, video_duration)
+        chapters = self._extract_chapters(video_webpage, description_original, video_id, video_duration)
 
         # Look for the DASH manifest
         if self._downloader.params.get('youtube_include_dash_manifest', True):

From 48bd042ce76f93b22196ce0a73c41a81a406d856 Mon Sep 17 00:00:00 2001
From: Philipp Hagemeister <phihag@phihag.de>
Date: Sun, 14 Jun 2020 13:17:51 +0200
Subject: [PATCH 076/340] [facebook] Support single-video ID links

I stumbled upon this at https://www.facebook.com/bwfbadminton/posts/10157127020046316 . No idea how prevalent it is yet.
---
 youtube_dl/extractor/facebook.py | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index ce64e2683..610d66745 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -466,15 +466,18 @@ class FacebookIE(InfoExtractor):
             return info_dict
 
         if '/posts/' in url:
-            entries = [
-                self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
-                for vid in self._parse_json(
-                    self._search_regex(
-                        r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])',
-                        webpage, 'video ids', group='ids'),
-                    video_id)]
+            video_id_json = self._search_regex(
+                r'(["\'])video_ids\1\s*:\s*(?P<ids>\[.+?\])', webpage, 'video ids', group='ids',
+                default='')
+            if video_id_json:
+                entries = [
+                    self.url_result('facebook:%s' % vid, FacebookIE.ie_key())
+                    for vid in self._parse_json(video_id_json, video_id)]
+                return self.playlist_result(entries, video_id)
 
-            return self.playlist_result(entries, video_id)
+            # Single Video?
+            video_id = self._search_regex(r'video_id:\s*"([0-9]+)"', webpage, 'single video id')
+            return self.url_result('facebook:%s' % video_id, FacebookIE.ie_key())
         else:
             _, info_dict = self._extract_from_url(
                 self._VIDEO_PAGE_TEMPLATE % video_id,

From d84b21b427d080a031ca1f9c75beb1dc8024b900 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 01:59:46 +0700
Subject: [PATCH 077/340] [youtube] Fix playlist and feed extraction (closes
 #25675)

---
 youtube_dl/extractor/youtube.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 54ec76db5..e01c27438 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -70,6 +70,11 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
     _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
 
+    _YOUTUBE_CLIENT_HEADERS = {
+        'x-youtube-client-name': '1',
+        'x-youtube-client-version': '1.20200609.04.02',
+    }
+
     def _set_language(self):
         self._set_cookie(
             '.youtube.com', 'PREF', 'f1=50000000&hl=en',
@@ -301,7 +306,8 @@ class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
                         'https://youtube.com/%s' % mobj.group('more'), playlist_id,
                         'Downloading page #%s%s'
                         % (page_num, ' (retry #%d)' % count if count else ''),
-                        transform_source=uppercase_escape)
+                        transform_source=uppercase_escape,
+                        headers=self._YOUTUBE_CLIENT_HEADERS)
                     break
                 except ExtractorError as e:
                     if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
@@ -3250,7 +3256,8 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
             more = self._download_json(
                 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
                 'Downloading page #%s' % page_num,
-                transform_source=uppercase_escape)
+                transform_source=uppercase_escape,
+                headers=self._YOUTUBE_CLIENT_HEADERS)
             content_html = more['content_html']
             more_widget_html = more['load_more_widget_html']
 

From b477fc13144be90ba7bbec3386f2e48fa3e7d604 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:29:04 +0700
Subject: [PATCH 078/340] [youtube] Fix thumbnails extraction and remove
 uploader id extraction warning (closes #25676)

---
 youtube_dl/extractor/youtube.py | 40 ++++++++++++++++++++++-----------
 1 file changed, 27 insertions(+), 13 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index e01c27438..1ab429162 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2225,8 +2225,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         if mobj is not None:
             video_uploader_id = mobj.group('uploader_id')
             video_uploader_url = mobj.group('uploader_url')
-        else:
-            self._downloader.report_warning('unable to extract uploader nickname')
 
         channel_id = (
             str_or_none(video_details.get('channelId'))
@@ -2237,17 +2235,33 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 video_webpage, 'channel id', default=None, group='id'))
         channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
 
-        # thumbnail image
-        # We try first to get a high quality image:
-        m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
-                            video_webpage, re.DOTALL)
-        if m_thumb is not None:
-            video_thumbnail = m_thumb.group(1)
-        elif 'thumbnail_url' not in video_info:
-            self._downloader.report_warning('unable to extract video thumbnail')
+        thumbnails = []
+        thumbnails_list = try_get(
+            video_details, lambda x: x['thumbnail']['thumbnails'], list) or []
+        for t in thumbnails_list:
+            if not isinstance(t, dict):
+                continue
+            thumbnail_url = url_or_none(t.get('url'))
+            if not thumbnail_url:
+                continue
+            thumbnails.append({
+                'url': thumbnail_url,
+                'width': int_or_none(t.get('width')),
+                'height': int_or_none(t.get('height')),
+            })
+
+        if not thumbnails:
             video_thumbnail = None
-        else:   # don't panic if we can't find it
-            video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
+            # We try first to get a high quality image:
+            m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
+                                video_webpage, re.DOTALL)
+            if m_thumb is not None:
+                video_thumbnail = m_thumb.group(1)
+            thumbnail_url = try_get(video_info, lambda x: x['thumbnail_url'][0], compat_str)
+            if thumbnail_url:
+                video_thumbnail = compat_urllib_parse_unquote_plus(thumbnail_url)
+            if video_thumbnail:
+                thumbnails.append({'url': video_thumbnail})
 
         # upload date
         upload_date = self._html_search_meta(
@@ -2480,7 +2494,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'creator': video_creator or artist,
             'title': video_title,
             'alt_title': video_alt_title or track,
-            'thumbnail': video_thumbnail,
+            'thumbnails': thumbnails,
             'description': video_description,
             'categories': video_categories,
             'tags': video_tags,

From 37357d21a97dc7a4ab6fa75b862f303c7eb36eb8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:37:19 +0700
Subject: [PATCH 079/340] [youtube] Fix upload date extraction

---
 youtube_dl/extractor/youtube.py | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1ab429162..dcd2d966b 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1898,6 +1898,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         video_details = try_get(
             player_response, lambda x: x['videoDetails'], dict) or {}
 
+        microformat = try_get(
+            player_response, lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {}
+
         video_title = video_info.get('title', [None])[0] or video_details.get('title')
         if not video_title:
             self._downloader.report_warning('Unable to extract video title')
@@ -2271,6 +2274,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 [r'(?s)id="eow-date.*?>(.*?)</span>',
                  r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'],
                 video_webpage, 'upload date', default=None)
+        if not upload_date:
+            upload_date = microformat.get('publishDate') or microformat.get('uploadDate')
         upload_date = unified_strdate(upload_date)
 
         video_license = self._html_search_regex(

From 7b16239a490a0d8784375895b620598bfccf0ede Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:38:45 +0700
Subject: [PATCH 080/340] [youtube] Improve view count extraction

---
 youtube_dl/extractor/youtube.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index dcd2d966b..aaa4023b4 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1978,6 +1978,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             view_count = extract_view_count(video_info)
         if view_count is None and video_details:
             view_count = int_or_none(video_details.get('viewCount'))
+        if view_count is None and microformat:
+            view_count = int_or_none(microformat.get('viewCount'))
 
         if is_live is None:
             is_live = bool_or_none(video_details.get('isLive'))

From a6211d237b4e7051ca018cc09440502561fedaa7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:43:09 +0700
Subject: [PATCH 081/340] [youtube] Fix uploader id and uploader URL extraction

---
 youtube_dl/extractor/youtube.py | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index aaa4023b4..ce2212a7c 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2230,6 +2230,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         if mobj is not None:
             video_uploader_id = mobj.group('uploader_id')
             video_uploader_url = mobj.group('uploader_url')
+        else:
+            owner_profile_url = url_or_none(microformat.get('ownerProfileUrl'))
+            if owner_profile_url:
+                video_uploader_id = self._search_regex(
+                    r'(?:user|channel)/([^/]+)', owner_profile_url, 'uploader id',
+                    default=None)
+                video_uploader_url = owner_profile_url
 
         channel_id = (
             str_or_none(video_details.get('channelId'))

From 7adc7ca54731997ca5ad4f0cd26217c7f480b88f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:52:09 +0700
Subject: [PATCH 082/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index f439f29e0..d925990fe 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,16 @@
+version <unreleased>
+
+Extractors
+* [youtube] Fix uploader id and uploader URL extraction
+* [youtube] Improve view count extraction
+* [youtube] Fix upload date extraction (#25677)
+* [youtube] Fix thumbnails extraction (#25676)
+* [youtube] Fix playlist and feed extraction (#25675)
++ [facebook] Add support for single-video ID links
++ [youtube] Extract chapters from JSON (#24819)
++ [kaltura] Add support for multiple embeds on a webpage (#25523)
+
+
 version 2020.06.06
 
 Extractors

From ed604ce7bcf5bfb02cf6d134eebae705f7c0f381 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 02:53:33 +0700
Subject: [PATCH 083/340] release 2020.06.16

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 3fe1b1a33..261dbfeb0 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.06
+ [debug] youtube-dl version 2020.06.16
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index e9f4b880c..7fbd35d28 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index bbd34ecab..463a6cd4b 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 4299474fa..b1f9ccb21 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.06
+ [debug] youtube-dl version 2020.06.16
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index c9ccc7010..6caddd6a1 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index d925990fe..234fcc50e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.06.16
 
 Extractors
 * [youtube] Fix uploader id and uploader URL extraction
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 30f31f888..56ed71c2a 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.06.06'
+__version__ = '2020.06.16'

From dbeafce5d58b0d40b7af35e5276f77855ced81a4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 03:13:39 +0700
Subject: [PATCH 084/340] [youtube] Fix categories and improve tags extraction

---
 youtube_dl/extractor/youtube.py | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index ce2212a7c..53dccdf0b 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2356,17 +2356,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         m_cat_container = self._search_regex(
             r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
             video_webpage, 'categories', default=None)
+        category = None
         if m_cat_container:
             category = self._html_search_regex(
                 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
                 default=None)
-            video_categories = None if category is None else [category]
-        else:
-            video_categories = None
+        if not category:
+            category = try_get(
+                microformat, lambda x: x['category'], compat_str)
+        video_categories = None if category is None else [category]
 
         video_tags = [
             unescapeHTML(m.group('content'))
             for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
+        if not video_tags:
+            video_tags = try_get(video_details, lambda x: x['keywords'], list)
 
         def _extract_count(count_name):
             return str_to_int(self._search_regex(

From ee0b726cd74ac6d4f6b187b1264baa0fc14c8800 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 06:17:53 +0700
Subject: [PATCH 085/340] [youtube] Force old layout (closes #25682, closes
 #25683, closes #25680, closes #25686)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 53dccdf0b..1bc79e014 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -77,7 +77,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
     def _set_language(self):
         self._set_cookie(
-            '.youtube.com', 'PREF', 'f1=50000000&hl=en',
+            '.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',
             # YouTube sets the expire time to about two months
             expire_time=time.time() + 2 * 30 * 24 * 3600)
 

From 1c748722f9bbe08ed362985a723cc1fd39b21f85 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 06:19:23 +0700
Subject: [PATCH 086/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 234fcc50e..7a6517d5e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+version <unreleased>
+
+Extractors
+* [youtube] Force old layout (#25682, #25683, #25680, #25686)
+* [youtube] Fix categories and improve tags extraction
+
+
 version 2020.06.16
 
 Extractors

From 9ff6165a8199488e92e8a76962f37d944e14a738 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 06:22:01 +0700
Subject: [PATCH 087/340] release 2020.06.16.1

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 261dbfeb0..d29d5366f 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.16
+ [debug] youtube-dl version 2020.06.16.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 7fbd35d28..ee882f98c 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 463a6cd4b..23033fe13 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index b1f9ccb21..597531330 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.16
+ [debug] youtube-dl version 2020.06.16.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 6caddd6a1..5cfcb9318 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16**
+- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 7a6517d5e..07d6ccd69 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.06.16.1
 
 Extractors
 * [youtube] Force old layout (#25682, #25683, #25680, #25686)
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 56ed71c2a..6b88eb38c 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.06.16'
+__version__ = '2020.06.16.1'

From 2391941f283a1107b01f9df76a8b0e521a5abe3b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 16 Jun 2020 17:37:58 +0700
Subject: [PATCH 088/340] [brightcove] Improve embed detection (closes #25674)

---
 youtube_dl/extractor/brightcove.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 5c22a730d..2aa9f4782 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -426,7 +426,7 @@ class BrightcoveNewIE(AdobePassIE):
         # [2] looks like:
         for video, script_tag, account_id, player_id, embed in re.findall(
                 r'''(?isx)
-                    (<video\s+[^>]*\bdata-video-id\s*=\s*['"]?[^>]+>)
+                    (<video(?:-js)?\s+[^>]*\bdata-video-id\s*=\s*['"]?[^>]+>)
                     (?:.*?
                         (<script[^>]+
                             src=["\'](?:https?:)?//players\.brightcove\.net/

From 9a7e5cb88aa3a80f7b4d37424ca7cb3bd144cdc8 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 23 Jun 2020 15:08:50 +0100
Subject: [PATCH 089/340] [bellmedia] add support for cp24.com clip URLs(closes
 #25764)

---
 youtube_dl/extractor/bellmedia.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/bellmedia.py b/youtube_dl/extractor/bellmedia.py
index 485173774..9f9de96c6 100644
--- a/youtube_dl/extractor/bellmedia.py
+++ b/youtube_dl/extractor/bellmedia.py
@@ -25,8 +25,8 @@ class BellMediaIE(InfoExtractor):
                 etalk|
                 marilyn
             )\.ca|
-            much\.com
-        )/.*?(?:\bvid(?:eoid)?=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
+            (?:much|cp24)\.com
+        )/.*?(?:\b(?:vid(?:eoid)?|clipId)=|-vid|~|%7E|/(?:episode)?)(?P<id>[0-9]{6,})'''
     _TESTS = [{
         'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070',
         'md5': '36d3ef559cfe8af8efe15922cd3ce950',
@@ -62,6 +62,9 @@ class BellMediaIE(InfoExtractor):
     }, {
         'url': 'http://www.etalk.ca/video?videoid=663455',
         'only_matching': True,
+    }, {
+        'url': 'https://www.cp24.com/video?clipId=1982548',
+        'only_matching': True,
     }]
     _DOMAINS = {
         'thecomedynetwork': 'comedy',

From e942cfd1a74cea3a3a53231800bbf5a002eed85e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 28 Jun 2020 10:30:03 +0700
Subject: [PATCH 090/340] [youtube:playlists] Extend _VALID_URL (closes #25810)

---
 youtube_dl/extractor/youtube.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1bc79e014..43c5eff1f 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3116,7 +3116,7 @@ class YoutubeLiveIE(YoutubeBaseInfoExtractor):
 
 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
     IE_DESC = 'YouTube.com user/channel playlists'
-    _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
+    _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel|c)/(?P<id>[^/]+)/playlists'
     IE_NAME = 'youtube:playlists'
 
     _TESTS = [{
@@ -3142,6 +3142,9 @@ class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
             'title': 'Chem Player',
         },
         'skip': 'Blocked',
+    }, {
+        'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
+        'only_matching': True,
     }]
 
 

From 07af16b92ef52ac29ecd7f1defdca89295fa611c Mon Sep 17 00:00:00 2001
From: Glenn Slayden <5589855+glenn-slayden@users.noreply.github.com>
Date: Tue, 30 Jun 2020 12:56:16 -0700
Subject: [PATCH 091/340] [youtube] Prevent excess HTTP 301 (#25786)

---
 youtube_dl/extractor/youtube.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 43c5eff1f..ef08bf8cb 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -303,7 +303,7 @@ class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
                     # Downloading page may result in intermittent 5xx HTTP error
                     # that is usually worked around with a retry
                     more = self._download_json(
-                        'https://youtube.com/%s' % mobj.group('more'), playlist_id,
+                        'https://www.youtube.com/%s' % mobj.group('more'), playlist_id,
                         'Downloading page #%s%s'
                         % (page_num, ' (retry #%d)' % count if count else ''),
                         transform_source=uppercase_escape,
@@ -2776,7 +2776,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
         ids = []
         last_id = playlist_id[-11:]
         for n in itertools.count(1):
-            url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
+            url = 'https://www.youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
             webpage = self._download_webpage(
                 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
             new_ids = orderedSet(re.findall(
@@ -3289,7 +3289,7 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
                 break
 
             more = self._download_json(
-                'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
+                'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
                 'Downloading page #%s' % page_num,
                 transform_source=uppercase_escape,
                 headers=self._YOUTUBE_CLIENT_HEADERS)

From 718393c632df5106df92c60c650f52d86a9a3510 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 11 Jul 2020 18:27:19 +0700
Subject: [PATCH 092/340] [wistia] Restrict embed regex (closes #25969)

---
 youtube_dl/extractor/wistia.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/wistia.py b/youtube_dl/extractor/wistia.py
index 168e5e901..77febd2eb 100644
--- a/youtube_dl/extractor/wistia.py
+++ b/youtube_dl/extractor/wistia.py
@@ -56,7 +56,7 @@ class WistiaIE(InfoExtractor):
             urls.append(unescapeHTML(match.group('url')))
         for match in re.finditer(
                 r'''(?sx)
-                    <div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]{10})\b.*?\2
+                    <div[^>]+class=(["'])(?:(?!\1).)*?\bwistia_async_(?P<id>[a-z0-9]{10})\b(?:(?!\1).)*?\1
                 ''', webpage):
             urls.append('wistia:%s' % match.group('id'))
         for match in re.finditer(r'(?:data-wistia-?id=["\']|Wistia\.embed\(["\']|id=["\']wistia_)(?P<id>[a-z0-9]{10})', webpage):

From a115e07594ccb7749ca108c889978510c7df126e Mon Sep 17 00:00:00 2001
From: MRWITEK <mrvvitek@gmail.com>
Date: Tue, 14 Jul 2020 14:01:15 +0300
Subject: [PATCH 093/340] [youtube] Improve description extraction (closes
 #25937) (#25980)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index ef08bf8cb..c27f2cd95 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1930,7 +1930,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             ''', replace_url, video_description)
             video_description = clean_html(video_description)
         else:
-            video_description = self._html_search_meta('description', video_webpage) or video_details.get('shortDescription')
+            video_description = video_details.get('shortDescription') or self._html_search_meta('description', video_webpage)
 
         if not smuggled_data.get('force_singlefeed', False):
             if not self._downloader.params.get('noplaylist'):

From e450f6cb634f17fd4ef59291eafb68b05c141e43 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 28 Jul 2020 05:04:50 +0700
Subject: [PATCH 094/340] [youtube] Fix sigfunc name extraction (closes #26134,
 closes #26135, closes #26136, closes #26137)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index c27f2cd95..b35bf03aa 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1384,7 +1384,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         funcname = self._search_regex(
             (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
              r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
-             r'\b(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
+             r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
              r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
              # Obsolete patterns
              r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',

From 570611955ff76b7140c14f5bfc842d452f40d357 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 28 Jul 2020 05:07:54 +0700
Subject: [PATCH 095/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 07d6ccd69..a49904c89 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+version <unreleased>
+
+Extractors
+* [youtube] Fix sigfunc name extraction (#26134, #26135, #26136, #26137)
+* [youtube] Improve description extraction (#25937, #25980)
+* [wistia] Restrict embed regular expression (#25969)
+* [youtube] Prevent excess HTTP 301 (#25786)
++ [youtube:playlists] Extend URL regular expression (#25810)
++ [bellmedia] Add support for cp24.com clip URLs (#25764)
+* [brightcove] Improve embed detection (#25674)
+
+
 version 2020.06.16.1
 
 Extractors

From a4ed50bb84658b7e77cbb37597c36fa62a9acd4b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 28 Jul 2020 05:13:03 +0700
Subject: [PATCH 096/340] release 2020.07.28

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index d29d5366f..f2260db46 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.16.1
+ [debug] youtube-dl version 2020.07.28
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index ee882f98c..8bc05c4ba 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 23033fe13..98348e0cd 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 597531330..86706f528 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.06.16.1
+ [debug] youtube-dl version 2020.07.28
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 5cfcb9318..52c2709f9 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.06.16.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.06.16.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index a49904c89..bf515f784 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.07.28
 
 Extractors
 * [youtube] Fix sigfunc name extraction (#26134, #26135, #26136, #26137)
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 6b88eb38c..17101fa47 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.06.16.1'
+__version__ = '2020.07.28'

From 6cb30ea5eddb2db4a2536d1b851f4cc45f427d3c Mon Sep 17 00:00:00 2001
From: JChris246 <43832407+JChris246@users.noreply.github.com>
Date: Wed, 12 Aug 2020 10:37:22 -0400
Subject: [PATCH 097/340] [xhamster] Extend _VALID_URL (closes #25789) (#25804)

---
 youtube_dl/extractor/xhamster.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index 0f7be6a7d..72ce5e1d3 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -25,8 +25,8 @@ class XHamsterIE(InfoExtractor):
                     https?://
                         (?:.+?\.)?%s/
                         (?:
-                            movies/(?P<id>\d+)/(?P<display_id>[^/]*)\.html|
-                            videos/(?P<display_id_2>[^/]*)-(?P<id_2>\d+)
+                            movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html|
+                            videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+)
                         )
                     ''' % _DOMAINS
     _TESTS = [{
@@ -105,6 +105,9 @@ class XHamsterIE(InfoExtractor):
     }, {
         'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
         'only_matching': True,
+    }, {
+        'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From a7e348556a213f008758011777a0a25759efc2e8 Mon Sep 17 00:00:00 2001
From: TheRealDude2 <the.real.dude@gmx.de>
Date: Wed, 12 Aug 2020 16:42:17 +0200
Subject: [PATCH 098/340] [xhamster] Fix extraction (closes #26157) (#26254)

---
 youtube_dl/extractor/xhamster.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index 72ce5e1d3..babe31739 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -132,7 +132,7 @@ class XHamsterIE(InfoExtractor):
 
         initials = self._parse_json(
             self._search_regex(
-                r'window\.initials\s*=\s*({.+?})\s*;\s*\n', webpage, 'initials',
+                r'window\.initials\s*=\s*({.+?})\s*;', webpage, 'initials',
                 default='{}'),
             video_id, fatal=False)
         if initials:

From 10709fc7c640fcd2f4866090d68f130fc8d9ad0c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 12 Aug 2020 21:51:06 +0700
Subject: [PATCH 099/340] [xhamster] Extend _VALID_URL (closes #25927)

---
 youtube_dl/extractor/xhamster.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index babe31739..902a3ed33 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -20,7 +20,7 @@ from ..utils import (
 
 
 class XHamsterIE(InfoExtractor):
-    _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster[27]\.com)'
+    _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.com)'
     _VALID_URL = r'''(?x)
                     https?://
                         (?:.+?\.)?%s/
@@ -99,6 +99,12 @@ class XHamsterIE(InfoExtractor):
     }, {
         'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
         'only_matching': True,
+    }, {
+        'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
+        'only_matching': True,
+    }, {
+        'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
+        'only_matching': True,
     }, {
         'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
         'only_matching': True,

From f5863a3ea08492bd9fc04c55e1e912d24e92d49b Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 27 Aug 2020 19:20:41 +0100
Subject: [PATCH 100/340] [biqle] improve video_ext extraction

---
 youtube_dl/extractor/biqle.py | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/biqle.py b/youtube_dl/extractor/biqle.py
index af21e3ee5..17ebbb257 100644
--- a/youtube_dl/extractor/biqle.py
+++ b/youtube_dl/extractor/biqle.py
@@ -3,10 +3,11 @@ from __future__ import unicode_literals
 
 from .common import InfoExtractor
 from .vk import VKIE
-from ..utils import (
-    HEADRequest,
-    int_or_none,
+from ..compat import (
+    compat_b64decode,
+    compat_urllib_parse_unquote,
 )
+from ..utils import int_or_none
 
 
 class BIQLEIE(InfoExtractor):
@@ -47,9 +48,16 @@ class BIQLEIE(InfoExtractor):
         if VKIE.suitable(embed_url):
             return self.url_result(embed_url, VKIE.ie_key(), video_id)
 
-        self._request_webpage(
-            HEADRequest(embed_url), video_id, headers={'Referer': url})
-        video_id, sig, _, access_token = self._get_cookies(embed_url)['video_ext'].value.split('%3A')
+        embed_page = self._download_webpage(
+            embed_url, video_id, headers={'Referer': url})
+        video_ext = self._get_cookies(embed_url).get('video_ext')
+        if video_ext:
+            video_ext = compat_urllib_parse_unquote(video_ext.value)
+        if not video_ext:
+            video_ext = compat_b64decode(self._search_regex(
+                r'video_ext\s*:\s*[\'"]([A-Za-z0-9+/=]+)',
+                embed_page, 'video_ext')).decode()
+        video_id, sig, _, access_token = video_ext.split(':')
         item = self._download_json(
             'https://api.vk.com/method/video.get', video_id,
             headers={'User-Agent': 'okhttp/3.4.1'}, query={

From 841b683804c6f706554bf7607e52072575358445 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 10:45:34 +0700
Subject: [PATCH 101/340] [twitch] Rework extractors (closes #12297, closes
 #20414, closes #20604, closes #21811, closes #21812, closes #22979, closes
 #24263, closes #25010, closes #25553, closes #25606)

* Switch to GraphQL.
+ Add support for collections.
+ Add support for clips and collections playlists.
---
 youtube_dl/extractor/extractors.py |  11 +-
 youtube_dl/extractor/twitch.py     | 674 ++++++++++++++++++-----------
 2 files changed, 429 insertions(+), 256 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 4b3092028..9564465a0 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1229,14 +1229,11 @@ from .twentymin import TwentyMinutenIE
 from .twentythreevideo import TwentyThreeVideoIE
 from .twitcasting import TwitCastingIE
 from .twitch import (
-    TwitchVideoIE,
-    TwitchChapterIE,
     TwitchVodIE,
-    TwitchProfileIE,
-    TwitchAllVideosIE,
-    TwitchUploadsIE,
-    TwitchPastBroadcastsIE,
-    TwitchHighlightsIE,
+    TwitchCollectionIE,
+    TwitchVideosIE,
+    TwitchVideosClipsIE,
+    TwitchVideosCollectionsIE,
     TwitchStreamIE,
     TwitchClipsIE,
 )
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index e211cd4c8..eadc48c6d 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -1,24 +1,26 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import collections
 import itertools
-import re
-import random
 import json
+import random
+import re
 
 from .common import InfoExtractor
 from ..compat import (
     compat_kwargs,
     compat_parse_qs,
     compat_str,
+    compat_urlparse,
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
 )
 from ..utils import (
     clean_html,
     ExtractorError,
+    float_or_none,
     int_or_none,
-    orderedSet,
     parse_duration,
     parse_iso8601,
     qualities,
@@ -150,120 +152,16 @@ class TwitchBaseIE(InfoExtractor):
                     })
         self._sort_formats(formats)
 
+    def _download_access_token(self, channel_name):
+        return self._call_api(
+            'api/channels/%s/access_token' % channel_name, channel_name,
+            'Downloading access token JSON')
 
-class TwitchItemBaseIE(TwitchBaseIE):
-    def _download_info(self, item, item_id):
-        return self._extract_info(self._call_api(
-            'kraken/videos/%s%s' % (item, item_id), item_id,
-            'Downloading %s info JSON' % self._ITEM_TYPE))
-
-    def _extract_media(self, item_id):
-        info = self._download_info(self._ITEM_SHORTCUT, item_id)
-        response = self._call_api(
-            'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
-            'Downloading %s playlist JSON' % self._ITEM_TYPE)
-        entries = []
-        chunks = response['chunks']
-        qualities = list(chunks.keys())
-        for num, fragment in enumerate(zip(*chunks.values()), start=1):
-            formats = []
-            for fmt_num, fragment_fmt in enumerate(fragment):
-                format_id = qualities[fmt_num]
-                fmt = {
-                    'url': fragment_fmt['url'],
-                    'format_id': format_id,
-                    'quality': 1 if format_id == 'live' else 0,
-                }
-                m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
-                if m:
-                    fmt['height'] = int(m.group('height'))
-                formats.append(fmt)
-            self._sort_formats(formats)
-            entry = dict(info)
-            entry['id'] = '%s_%d' % (entry['id'], num)
-            entry['title'] = '%s part %d' % (entry['title'], num)
-            entry['formats'] = formats
-            entries.append(entry)
-        return self.playlist_result(entries, info['id'], info['title'])
-
-    def _extract_info(self, info):
-        status = info.get('status')
-        if status == 'recording':
-            is_live = True
-        elif status == 'recorded':
-            is_live = False
-        else:
-            is_live = None
-        _QUALITIES = ('small', 'medium', 'large')
-        quality_key = qualities(_QUALITIES)
-        thumbnails = []
-        preview = info.get('preview')
-        if isinstance(preview, dict):
-            for thumbnail_id, thumbnail_url in preview.items():
-                thumbnail_url = url_or_none(thumbnail_url)
-                if not thumbnail_url:
-                    continue
-                if thumbnail_id not in _QUALITIES:
-                    continue
-                thumbnails.append({
-                    'url': thumbnail_url,
-                    'preference': quality_key(thumbnail_id),
-                })
-        return {
-            'id': info['_id'],
-            'title': info.get('title') or 'Untitled Broadcast',
-            'description': info.get('description'),
-            'duration': int_or_none(info.get('length')),
-            'thumbnails': thumbnails,
-            'uploader': info.get('channel', {}).get('display_name'),
-            'uploader_id': info.get('channel', {}).get('name'),
-            'timestamp': parse_iso8601(info.get('recorded_at')),
-            'view_count': int_or_none(info.get('views')),
-            'is_live': is_live,
-        }
-
-    def _real_extract(self, url):
-        return self._extract_media(self._match_id(url))
+    def _extract_channel_id(self, token, channel_name):
+        return compat_str(self._parse_json(token, channel_name)['channel_id'])
 
 
-class TwitchVideoIE(TwitchItemBaseIE):
-    IE_NAME = 'twitch:video'
-    _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
-    _ITEM_TYPE = 'video'
-    _ITEM_SHORTCUT = 'a'
-
-    _TEST = {
-        'url': 'http://www.twitch.tv/riotgames/b/577357806',
-        'info_dict': {
-            'id': 'a577357806',
-            'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
-        },
-        'playlist_mincount': 12,
-        'skip': 'HTTP Error 404: Not Found',
-    }
-
-
-class TwitchChapterIE(TwitchItemBaseIE):
-    IE_NAME = 'twitch:chapter'
-    _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
-    _ITEM_TYPE = 'chapter'
-    _ITEM_SHORTCUT = 'c'
-
-    _TESTS = [{
-        'url': 'http://www.twitch.tv/acracingleague/c/5285812',
-        'info_dict': {
-            'id': 'c5285812',
-            'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
-        },
-        'playlist_mincount': 3,
-        'skip': 'HTTP Error 404: Not Found',
-    }, {
-        'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
-        'only_matching': True,
-    }]
-
-
-class TwitchVodIE(TwitchItemBaseIE):
+class TwitchVodIE(TwitchBaseIE):
     IE_NAME = 'twitch:vod'
     _VALID_URL = r'''(?x)
                     https?://
@@ -332,17 +230,60 @@ class TwitchVodIE(TwitchItemBaseIE):
         'only_matching': True,
     }]
 
-    def _real_extract(self, url):
-        item_id = self._match_id(url)
+    def _download_info(self, item_id):
+        return self._extract_info(
+            self._call_api(
+                'kraken/videos/%s' % item_id, item_id,
+                'Downloading video info JSON'))
 
-        info = self._download_info(self._ITEM_SHORTCUT, item_id)
+    @staticmethod
+    def _extract_info(info):
+        status = info.get('status')
+        if status == 'recording':
+            is_live = True
+        elif status == 'recorded':
+            is_live = False
+        else:
+            is_live = None
+        _QUALITIES = ('small', 'medium', 'large')
+        quality_key = qualities(_QUALITIES)
+        thumbnails = []
+        preview = info.get('preview')
+        if isinstance(preview, dict):
+            for thumbnail_id, thumbnail_url in preview.items():
+                thumbnail_url = url_or_none(thumbnail_url)
+                if not thumbnail_url:
+                    continue
+                if thumbnail_id not in _QUALITIES:
+                    continue
+                thumbnails.append({
+                    'url': thumbnail_url,
+                    'preference': quality_key(thumbnail_id),
+                })
+        return {
+            'id': info['_id'],
+            'title': info.get('title') or 'Untitled Broadcast',
+            'description': info.get('description'),
+            'duration': int_or_none(info.get('length')),
+            'thumbnails': thumbnails,
+            'uploader': info.get('channel', {}).get('display_name'),
+            'uploader_id': info.get('channel', {}).get('name'),
+            'timestamp': parse_iso8601(info.get('recorded_at')),
+            'view_count': int_or_none(info.get('views')),
+            'is_live': is_live,
+        }
+
+    def _real_extract(self, url):
+        vod_id = self._match_id(url)
+
+        info = self._download_info(vod_id)
         access_token = self._call_api(
-            'api/vods/%s/access_token' % item_id, item_id,
+            'api/vods/%s/access_token' % vod_id, vod_id,
             'Downloading %s access token' % self._ITEM_TYPE)
 
         formats = self._extract_m3u8_formats(
             '%s/vod/%s.m3u8?%s' % (
-                self._USHER_BASE, item_id,
+                self._USHER_BASE, vod_id,
                 compat_urllib_parse_urlencode({
                     'allow_source': 'true',
                     'allow_audio_only': 'true',
@@ -352,7 +293,7 @@ class TwitchVodIE(TwitchItemBaseIE):
                     'nauth': access_token['token'],
                     'nauthsig': access_token['sig'],
                 })),
-            item_id, 'mp4', entry_protocol='m3u8_native')
+            vod_id, 'mp4', entry_protocol='m3u8_native')
 
         self._prefer_source(formats)
         info['formats'] = formats
@@ -366,7 +307,7 @@ class TwitchVodIE(TwitchItemBaseIE):
             info['subtitles'] = {
                 'rechat': [{
                     'url': update_url_query(
-                        'https://api.twitch.tv/v5/videos/%s/comments' % item_id, {
+                        'https://api.twitch.tv/v5/videos/%s/comments' % vod_id, {
                             'client_id': self._CLIENT_ID,
                         }),
                     'ext': 'json',
@@ -376,164 +317,405 @@ class TwitchVodIE(TwitchItemBaseIE):
         return info
 
 
-class TwitchPlaylistBaseIE(TwitchBaseIE):
-    _PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
+def _make_video_result(node):
+    assert isinstance(node, dict)
+    video_id = node.get('id')
+    if not video_id:
+        return
+    return {
+        '_type': 'url_transparent',
+        'ie_key': TwitchVodIE.ie_key(),
+        'id': video_id,
+        'url': 'https://www.twitch.tv/videos/%s' % video_id,
+        'title': node.get('title'),
+        'thumbnail': node.get('previewThumbnailURL'),
+        'duration': float_or_none(node.get('lengthSeconds')),
+        'view_count': int_or_none(node.get('viewCount')),
+    }
+
+
+class TwitchGraphQLBaseIE(TwitchBaseIE):
     _PAGE_LIMIT = 100
 
-    def _extract_playlist(self, channel_id):
-        info = self._call_api(
-            'kraken/channels/%s' % channel_id,
-            channel_id, 'Downloading channel info JSON')
-        channel_name = info.get('display_name') or info.get('name')
+    def _download_gql(self, video_id, op, variables, sha256_hash, note, fatal=True):
+        return self._download_json(
+            'https://gql.twitch.tv/gql', video_id, note,
+            data=json.dumps({
+                'operationName': op,
+                'variables': variables,
+                'extensions': {
+                    'persistedQuery': {
+                        'version': 1,
+                        'sha256Hash': sha256_hash,
+                    }
+                }
+            }).encode(),
+            headers={
+                'Content-Type': 'text/plain;charset=UTF-8',
+                'Client-ID': self._CLIENT_ID,
+            }, fatal=fatal)
+
+
+class TwitchCollectionIE(TwitchGraphQLBaseIE):
+    _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/collections/(?P<id>[^/]+)'
+
+    _TESTS = [{
+        'url': 'https://www.twitch.tv/collections/wlDCoH0zEBZZbQ',
+        'info_dict': {
+            'id': 'wlDCoH0zEBZZbQ',
+            'title': 'Overthrow Nook, capitalism for children',
+        },
+        'playlist_mincount': 13,
+    }]
+
+    _OPERATION_NAME = 'CollectionSideBar'
+    _SHA256_HASH = '27111f1b382effad0b6def325caef1909c733fe6a4fbabf54f8d491ef2cf2f14'
+
+    def _real_extract(self, url):
+        collection_id = self._match_id(url)
+        collection = self._download_gql(
+            collection_id, self._OPERATION_NAME,
+            {'collectionID': collection_id}, self._SHA256_HASH,
+            'Downloading collection GraphQL')['data']['collection']
+        title = collection.get('title')
         entries = []
+        for edge in collection['items']['edges']:
+            if not isinstance(edge, dict):
+                continue
+            node = edge.get('node')
+            if not isinstance(node, dict):
+                continue
+            video = _make_video_result(node)
+            if video:
+                entries.append(video)
+        return self.playlist_result(
+            entries, playlist_id=collection_id, playlist_title=title)
+
+
+class TwitchPlaylistBaseIE(TwitchGraphQLBaseIE):
+    def _entries(self, channel_name, *args):
+        cursor = None
+        variables_common = self._make_variables(channel_name, *args)
+        entries_key = '%ss' % self._ENTRY_KIND
+        for page_num in itertools.count(1):
+            variables = variables_common.copy()
+            variables['limit'] = self._PAGE_LIMIT
+            if cursor:
+                variables['cursor'] = cursor
+            page = self._download_gql(
+                channel_name, self._OPERATION_NAME, variables,
+                self._SHA256_HASH,
+                'Downloading %ss GraphQL page %s' % (self._NODE_KIND, page_num),
+                fatal=False)
+            if not page:
+                break
+            edges = try_get(
+                page, lambda x: x['data']['user'][entries_key]['edges'], list)
+            if not edges:
+                break
+            for edge in edges:
+                if not isinstance(edge, dict):
+                    continue
+                if edge.get('__typename') != self._EDGE_KIND:
+                    continue
+                node = edge.get('node')
+                if not isinstance(node, dict):
+                    continue
+                if node.get('__typename') != self._NODE_KIND:
+                    continue
+                entry = self._extract_entry(node)
+                if entry:
+                    cursor = edge.get('cursor')
+                    yield entry
+            if not cursor or not isinstance(cursor, compat_str):
+                break
+
+    # Deprecated kraken v5 API
+    def _entries_kraken(self, channel_name, broadcast_type, sort):
+        access_token = self._download_access_token(channel_name)
+        channel_id = self._extract_channel_id(access_token['token'], channel_name)
         offset = 0
-        limit = self._PAGE_LIMIT
-        broken_paging_detected = False
         counter_override = None
         for counter in itertools.count(1):
             response = self._call_api(
-                self._PLAYLIST_PATH % (channel_id, offset, limit),
+                'kraken/channels/%s/videos/' % channel_id,
                 channel_id,
-                'Downloading %s JSON page %s'
-                % (self._PLAYLIST_TYPE, counter_override or counter))
-            page_entries = self._extract_playlist_page(response)
-            if not page_entries:
+                'Downloading video JSON page %s' % (counter_override or counter),
+                query={
+                    'offset': offset,
+                    'limit': self._PAGE_LIMIT,
+                    'broadcast_type': broadcast_type,
+                    'sort': sort,
+                })
+            videos = response.get('videos')
+            if not isinstance(videos, list):
                 break
+            for video in videos:
+                if not isinstance(video, dict):
+                    continue
+                video_url = url_or_none(video.get('url'))
+                if not video_url:
+                    continue
+                yield {
+                    '_type': 'url_transparent',
+                    'ie_key': TwitchVodIE.ie_key(),
+                    'id': video.get('_id'),
+                    'url': video_url,
+                    'title': video.get('title'),
+                    'description': video.get('description'),
+                    'timestamp': unified_timestamp(video.get('published_at')),
+                    'duration': float_or_none(video.get('length')),
+                    'view_count': int_or_none(video.get('views')),
+                    'language': video.get('language'),
+                }
+            offset += self._PAGE_LIMIT
             total = int_or_none(response.get('_total'))
-            # Since the beginning of March 2016 twitch's paging mechanism
-            # is completely broken on the twitch side. It simply ignores
-            # a limit and returns the whole offset number of videos.
-            # Working around by just requesting all videos at once.
-            # Upd: pagination bug was fixed by twitch on 15.03.2016.
-            if not broken_paging_detected and total and len(page_entries) > limit:
-                self.report_warning(
-                    'Twitch pagination is broken on twitch side, requesting all videos at once',
-                    channel_id)
-                broken_paging_detected = True
-                offset = total
-                counter_override = '(all at once)'
-                continue
-            entries.extend(page_entries)
-            if broken_paging_detected or total and len(page_entries) >= total:
+            if total and offset >= total:
                 break
-            offset += limit
-        return self.playlist_result(
-            [self._make_url_result(entry) for entry in orderedSet(entries)],
-            channel_id, channel_name)
-
-    def _make_url_result(self, url):
-        try:
-            video_id = 'v%s' % TwitchVodIE._match_id(url)
-            return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
-        except AssertionError:
-            return self.url_result(url)
-
-    def _extract_playlist_page(self, response):
-        videos = response.get('videos')
-        return [video['url'] for video in videos] if videos else []
-
-    def _real_extract(self, url):
-        return self._extract_playlist(self._match_id(url))
 
 
-class TwitchProfileIE(TwitchPlaylistBaseIE):
-    IE_NAME = 'twitch:profile'
-    _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
-    _PLAYLIST_TYPE = 'profile'
+class TwitchVideosIE(TwitchPlaylistBaseIE):
+    _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:videos|profile)'
 
     _TESTS = [{
-        'url': 'http://www.twitch.tv/vanillatv/profile',
-        'info_dict': {
-            'id': 'vanillatv',
-            'title': 'VanillaTV',
-        },
-        'playlist_mincount': 412,
-    }, {
-        'url': 'http://m.twitch.tv/vanillatv/profile',
-        'only_matching': True,
-    }]
-
-
-class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
-    _VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
-    _PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
-
-
-class TwitchAllVideosIE(TwitchVideosBaseIE):
-    IE_NAME = 'twitch:videos:all'
-    _VALID_URL = r'%s/all' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
-    _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
-    _PLAYLIST_TYPE = 'all videos'
-
-    _TESTS = [{
-        'url': 'https://www.twitch.tv/spamfish/videos/all',
+        # All Videos sorted by Date
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=all',
         'info_dict': {
             'id': 'spamfish',
-            'title': 'Spamfish',
+            'title': 'spamfish - All Videos sorted by Date',
         },
-        'playlist_mincount': 869,
+        'playlist_mincount': 924,
+    }, {
+        # All Videos sorted by Popular
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=all&sort=views',
+        'info_dict': {
+            'id': 'spamfish',
+            'title': 'spamfish - All Videos sorted by Popular',
+        },
+        'playlist_mincount': 931,
+    }, {
+        # Past Broadcasts sorted by Date
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=archives',
+        'info_dict': {
+            'id': 'spamfish',
+            'title': 'spamfish - Past Broadcasts sorted by Date',
+        },
+        'playlist_mincount': 27,
+    }, {
+        # Highlights sorted by Date
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=highlights',
+        'info_dict': {
+            'id': 'spamfish',
+            'title': 'spamfish - Highlights sorted by Date',
+        },
+        'playlist_mincount': 901,
+    }, {
+        # Uploads sorted by Date
+        'url': 'https://www.twitch.tv/esl_csgo/videos?filter=uploads&sort=time',
+        'info_dict': {
+            'id': 'esl_csgo',
+            'title': 'esl_csgo - Uploads sorted by Date',
+        },
+        'playlist_mincount': 5,
+    }, {
+        # Past Premieres sorted by Date
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=past_premieres',
+        'info_dict': {
+            'id': 'spamfish',
+            'title': 'spamfish - Past Premieres sorted by Date',
+        },
+        'playlist_mincount': 1,
+    }, {
+        'url': 'https://www.twitch.tv/spamfish/videos/all',
+        'only_matching': True,
     }, {
         'url': 'https://m.twitch.tv/spamfish/videos/all',
         'only_matching': True,
-    }]
-
-
-class TwitchUploadsIE(TwitchVideosBaseIE):
-    IE_NAME = 'twitch:videos:uploads'
-    _VALID_URL = r'%s/uploads' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
-    _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
-    _PLAYLIST_TYPE = 'uploads'
-
-    _TESTS = [{
-        'url': 'https://www.twitch.tv/spamfish/videos/uploads',
-        'info_dict': {
-            'id': 'spamfish',
-            'title': 'Spamfish',
-        },
-        'playlist_mincount': 0,
     }, {
-        'url': 'https://m.twitch.tv/spamfish/videos/uploads',
+        'url': 'https://www.twitch.tv/spamfish/videos',
         'only_matching': True,
     }]
 
+    Broadcast = collections.namedtuple('Broadcast', ['type', 'label'])
 
-class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
-    IE_NAME = 'twitch:videos:past-broadcasts'
-    _VALID_URL = r'%s/past-broadcasts' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
-    _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
-    _PLAYLIST_TYPE = 'past broadcasts'
+    _DEFAULT_BROADCAST = Broadcast(None, 'All Videos')
+    _BROADCASTS = {
+        'archives': Broadcast('ARCHIVE', 'Past Broadcasts'),
+        'highlights': Broadcast('HIGHLIGHT', 'Highlights'),
+        'uploads': Broadcast('UPLOAD', 'Uploads'),
+        'past_premieres': Broadcast('PAST_PREMIERE', 'Past Premieres'),
+        'all': _DEFAULT_BROADCAST,
+    }
+
+    _DEFAULT_SORTED_BY = 'Date'
+    _SORTED_BY = {
+        'time': _DEFAULT_SORTED_BY,
+        'views': 'Popular',
+    }
+
+    _SHA256_HASH = 'a937f1d22e269e39a03b509f65a7490f9fc247d7f83d6ac1421523e3b68042cb'
+    _OPERATION_NAME = 'FilterableVideoTower_Videos'
+    _ENTRY_KIND = 'video'
+    _EDGE_KIND = 'VideoEdge'
+    _NODE_KIND = 'Video'
+
+    @classmethod
+    def suitable(cls, url):
+        return (False
+                if any(ie.suitable(url) for ie in (
+                    TwitchVideosClipsIE,
+                    TwitchVideosCollectionsIE))
+                else super(TwitchVideosIE, cls).suitable(url))
+
+    @staticmethod
+    def _make_variables(channel_name, broadcast_type, sort):
+        return {
+            'channelOwnerLogin': channel_name,
+            'broadcastType': broadcast_type,
+            'videoSort': sort.upper(),
+        }
+
+    @staticmethod
+    def _extract_entry(node):
+        return _make_video_result(node)
+
+    def _real_extract(self, url):
+        channel_name = self._match_id(url)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        filter = qs.get('filter', ['all'])[0]
+        sort = qs.get('sort', ['time'])[0]
+        broadcast = self._BROADCASTS.get(filter, self._DEFAULT_BROADCAST)
+        return self.playlist_result(
+            self._entries(channel_name, broadcast.type, sort),
+            playlist_id=channel_name,
+            playlist_title='%s - %s sorted by %s'
+            % (channel_name, broadcast.label,
+               self._SORTED_BY.get(sort, self._DEFAULT_SORTED_BY)))
+
+
+class TwitchVideosClipsIE(TwitchPlaylistBaseIE):
+    _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/(?:clips|videos/*?\?.*?\bfilter=clips)'
 
     _TESTS = [{
-        'url': 'https://www.twitch.tv/spamfish/videos/past-broadcasts',
+        # Clips
+        'url': 'https://www.twitch.tv/vanillatv/clips?filter=clips&range=all',
         'info_dict': {
-            'id': 'spamfish',
-            'title': 'Spamfish',
+            'id': 'vanillatv',
+            'title': 'vanillatv - Clips Top All',
         },
-        'playlist_mincount': 0,
+        'playlist_mincount': 1,
     }, {
-        'url': 'https://m.twitch.tv/spamfish/videos/past-broadcasts',
+        'url': 'https://www.twitch.tv/dota2ruhub/videos?filter=clips&range=7d',
         'only_matching': True,
     }]
 
+    Clip = collections.namedtuple('Clip', ['filter', 'label'])
 
-class TwitchHighlightsIE(TwitchVideosBaseIE):
-    IE_NAME = 'twitch:videos:highlights'
-    _VALID_URL = r'%s/highlights' % TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE
-    _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
-    _PLAYLIST_TYPE = 'highlights'
+    _DEFAULT_CLIP = Clip('LAST_WEEK', 'Top 7D')
+    _RANGE = {
+        '24hr': Clip('LAST_DAY', 'Top 24H'),
+        '7d': _DEFAULT_CLIP,
+        '30d': Clip('LAST_MONTH', 'Top 30D'),
+        'all': Clip('ALL_TIME', 'Top All'),
+    }
+
+    # NB: values other than 20 result in skipped videos
+    _PAGE_LIMIT = 20
+
+    _SHA256_HASH = 'b73ad2bfaecfd30a9e6c28fada15bd97032c83ec77a0440766a56fe0bd632777'
+    _OPERATION_NAME = 'ClipsCards__User'
+    _ENTRY_KIND = 'clip'
+    _EDGE_KIND = 'ClipEdge'
+    _NODE_KIND = 'Clip'
+
+    @staticmethod
+    def _make_variables(channel_name, filter):
+        return {
+            'login': channel_name,
+            'criteria': {
+                'filter': filter,
+            },
+        }
+
+    @staticmethod
+    def _extract_entry(node):
+        assert isinstance(node, dict)
+        clip_url = url_or_none(node.get('url'))
+        if not clip_url:
+            return
+        return {
+            '_type': 'url_transparent',
+            'ie_key': TwitchClipsIE.ie_key(),
+            'id': node.get('id'),
+            'url': clip_url,
+            'title': node.get('title'),
+            'thumbnail': node.get('thumbnailURL'),
+            'duration': float_or_none(node.get('durationSeconds')),
+            'timestamp': unified_timestamp(node.get('createdAt')),
+            'view_count': int_or_none(node.get('viewCount')),
+            'language': node.get('language'),
+        }
+
+    def _real_extract(self, url):
+        channel_name = self._match_id(url)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        range = qs.get('range', ['7d'])[0]
+        clip = self._RANGE.get(range, self._DEFAULT_CLIP)
+        return self.playlist_result(
+            self._entries(channel_name, clip.filter),
+            playlist_id=channel_name,
+            playlist_title='%s - Clips %s' % (channel_name, clip.label))
+
+
+class TwitchVideosCollectionsIE(TwitchPlaylistBaseIE):
+    _VALID_URL = r'https?://(?:(?:www|go|m)\.)?twitch\.tv/(?P<id>[^/]+)/videos/*?\?.*?\bfilter=collections'
 
     _TESTS = [{
-        'url': 'https://www.twitch.tv/spamfish/videos/highlights',
+        # Collections
+        'url': 'https://www.twitch.tv/spamfish/videos?filter=collections',
         'info_dict': {
             'id': 'spamfish',
-            'title': 'Spamfish',
+            'title': 'spamfish - Collections',
         },
-        'playlist_mincount': 805,
-    }, {
-        'url': 'https://m.twitch.tv/spamfish/videos/highlights',
-        'only_matching': True,
+        'playlist_mincount': 3,
     }]
 
+    _SHA256_HASH = '07e3691a1bad77a36aba590c351180439a40baefc1c275356f40fc7082419a84'
+    _OPERATION_NAME = 'ChannelCollectionsContent'
+    _ENTRY_KIND = 'collection'
+    _EDGE_KIND = 'CollectionsItemEdge'
+    _NODE_KIND = 'Collection'
+
+    @staticmethod
+    def _make_variables(channel_name):
+        return {
+            'ownerLogin': channel_name,
+        }
+
+    @staticmethod
+    def _extract_entry(node):
+        assert isinstance(node, dict)
+        collection_id = node.get('id')
+        if not collection_id:
+            return
+        return {
+            '_type': 'url_transparent',
+            'ie_key': TwitchCollectionIE.ie_key(),
+            'id': collection_id,
+            'url': 'https://www.twitch.tv/collections/%s' % collection_id,
+            'title': node.get('title'),
+            'thumbnail': node.get('thumbnailURL'),
+            'duration': float_or_none(node.get('lengthSeconds')),
+            'timestamp': unified_timestamp(node.get('updatedAt')),
+            'view_count': int_or_none(node.get('viewCount')),
+        }
+
+    def _real_extract(self, url):
+        channel_name = self._match_id(url)
+        return self.playlist_result(
+            self._entries(channel_name), playlist_id=channel_name,
+            playlist_title='%s - Collections' % channel_name)
+
 
 class TwitchStreamIE(TwitchBaseIE):
     IE_NAME = 'twitch:stream'
@@ -583,27 +765,21 @@ class TwitchStreamIE(TwitchBaseIE):
     def suitable(cls, url):
         return (False
                 if any(ie.suitable(url) for ie in (
-                    TwitchVideoIE,
-                    TwitchChapterIE,
                     TwitchVodIE,
-                    TwitchProfileIE,
-                    TwitchAllVideosIE,
-                    TwitchUploadsIE,
-                    TwitchPastBroadcastsIE,
-                    TwitchHighlightsIE,
+                    TwitchCollectionIE,
+                    TwitchVideosIE,
+                    TwitchVideosClipsIE,
+                    TwitchVideosCollectionsIE,
                     TwitchClipsIE))
                 else super(TwitchStreamIE, cls).suitable(url))
 
     def _real_extract(self, url):
         channel_name = self._match_id(url)
 
-        access_token = self._call_api(
-            'api/channels/%s/access_token' % channel_name, channel_name,
-            'Downloading access token JSON')
+        access_token = self._download_access_token(channel_name)
 
         token = access_token['token']
-        channel_id = compat_str(self._parse_json(
-            token, channel_name)['channel_id'])
+        channel_id = self._extract_channel_id(token, channel_name)
 
         stream = self._call_api(
             'kraken/streams/%s?stream_type=all' % channel_id,

From 5ed05f26adea517aa715d5ec4b0dccfea30b2b8e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 10:45:57 +0700
Subject: [PATCH 102/340] [svtplay] Fix svt id extraction (closes #26425,
 closes #26428, closes #26438)

---
 youtube_dl/extractor/svt.py | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py
index e12389cad..8e9ec2ca3 100644
--- a/youtube_dl/extractor/svt.py
+++ b/youtube_dl/extractor/svt.py
@@ -224,9 +224,15 @@ class SVTPlayIE(SVTPlayBaseIE):
                 self._adjust_title(info_dict)
                 return info_dict
 
-        svt_id = self._search_regex(
-            r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
-            webpage, 'video id')
+            svt_id = try_get(
+                data, lambda x: x['statistics']['dataLake']['content']['id'],
+                compat_str)
+
+        if not svt_id:
+            svt_id = self._search_regex(
+                (r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
+                 r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"'),
+                webpage, 'video id')
 
         return self._extract_by_video_id(svt_id, webpage)
 

From 62ae19ff760b9df6a2600430e42e2c32f1449e7a Mon Sep 17 00:00:00 2001
From: TheRealDude2 <the.real.dude@gmx.de>
Date: Sun, 6 Sep 2020 06:10:27 +0200
Subject: [PATCH 103/340] [xhamster] Improve initials regex (#26526) (closes
 #26353)

---
 youtube_dl/extractor/xhamster.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index 902a3ed33..76aeaf9a4 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -138,7 +138,8 @@ class XHamsterIE(InfoExtractor):
 
         initials = self._parse_json(
             self._search_regex(
-                r'window\.initials\s*=\s*({.+?})\s*;', webpage, 'initials',
+                (r'window\.initials\s*=\s*({.+?})\s*;\s*</script>',
+                 r'window\.initials\s*=\s*({.+?})\s*;'), webpage, 'initials',
                 default='{}'),
             video_id, fatal=False)
         if initials:

From 1d9bf655e68c873dc1f04739640046c43a1443d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 11:19:53 +0700
Subject: [PATCH 104/340] [utils] Recognize wav mimetype (closes #26463)

---
 test/test_utils.py  | 2 ++
 youtube_dl/utils.py | 1 +
 2 files changed, 3 insertions(+)

diff --git a/test/test_utils.py b/test/test_utils.py
index 0896f4150..962fd8d75 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -803,6 +803,8 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
         self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
         self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
+        self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
+        self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
 
     def test_month_by_name(self):
         self.assertEqual(month_by_name(None), None)
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index d1eca3760..01d9c0362 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -4198,6 +4198,7 @@ def mimetype2ext(mt):
         'vnd.ms-sstr+xml': 'ism',
         'quicktime': 'mov',
         'mp2t': 'ts',
+        'x-wav': 'wav',
     }.get(res, res)
 
 

From 67171ed7e940d9022e3388ddd029978bc426ea1f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 11:31:28 +0700
Subject: [PATCH 105/340] [youtube:user] Extend _VALID_URL (closes #26443)

---
 youtube_dl/extractor/youtube.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index b35bf03aa..6611caf06 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3008,7 +3008,7 @@ class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
 
 class YoutubeUserIE(YoutubeChannelIE):
     IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
-    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
+    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9%-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_%-]+)'
     _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
     IE_NAME = 'youtube:user'
 
@@ -3038,6 +3038,9 @@ class YoutubeUserIE(YoutubeChannelIE):
     }, {
         'url': 'https://www.youtube.com/c/gametrailers',
         'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/c/Pawe%C5%82Zadro%C5%BCniak',
+        'only_matching': True,
     }, {
         'url': 'https://www.youtube.com/gametrailers',
         'only_matching': True,

From 16ee69c1b7d9877c852d50428d8f047ada45d539 Mon Sep 17 00:00:00 2001
From: random-nick <random-nick@email.com>
Date: Sun, 6 Sep 2020 04:44:53 +0000
Subject: [PATCH 106/340] [youtube] Fix age gate content detection (#26100)
 (closes #26152, closes #26311, closes #26384)

---
 youtube_dl/extractor/youtube.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 6611caf06..6ae2e58c1 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1825,7 +1825,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         # Get video info
         video_info = {}
         embed_webpage = None
-        if re.search(r'player-age-gate-content">', video_webpage) is not None:
+        if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
+                or re.search(r'player-age-gate-content">', video_webpage) is not None):
             age_gate = True
             # We simulate the access to the video from www.youtube.com/v/{video_id}
             # this can be viewed without login into Youtube

From 50e9fcc1fd5aa8e3c8bbeb33f2434fbf9c1200d7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 12:42:57 +0700
Subject: [PATCH 107/340] [nrktv:episode] Improve video id extraction (closes
 #25594, closes #26369, closes #26409)

---
 youtube_dl/extractor/nrk.py | 20 +++++++++++++-------
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 94115534b..84aacbcda 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -11,7 +11,6 @@ from ..compat import (
 from ..utils import (
     ExtractorError,
     int_or_none,
-    JSON_LD_RE,
     js_to_json,
     NO_DEFAULT,
     parse_age_limit,
@@ -425,13 +424,20 @@ class NRKTVEpisodeIE(InfoExtractor):
 
         webpage = self._download_webpage(url, display_id)
 
-        nrk_id = self._parse_json(
-            self._search_regex(JSON_LD_RE, webpage, 'JSON-LD', group='json_ld'),
-            display_id)['@id']
-
+        info = self._search_json_ld(webpage, display_id, default={})
+        nrk_id = info.get('@id') or self._html_search_meta(
+            'nrk:program-id', webpage, default=None) or self._search_regex(
+            r'data-program-id=["\'](%s)' % NRKTVIE._EPISODE_RE, webpage,
+            'nrk id')
         assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
-        return self.url_result(
-            'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
+
+        info.update({
+            '_type': 'url_transparent',
+            'id': nrk_id,
+            'url': 'nrk:%s' % nrk_id,
+            'ie_key': NRKIE.ie_key(),
+        })
+        return info
 
 
 class NRKTVSerieBaseIE(InfoExtractor):

From 6cd452acffe8d79c895a2ebd0346e2ba7f9e112f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 12:57:56 +0700
Subject: [PATCH 108/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index bf515f784..8d410f600 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+version <unreleased>
+
+Core
++ [utils] Recognize wav mimetype (#26463)
+
+Extractors
+* [nrktv:episode] Improve video id extraction (#25594, #26369, #26409)
+* [youtube] Fix age gate content detection (#26100, #26152, #26311, #26384)
+* [youtube:user] Extend URL regular expression (#26443)
+* [xhamster] Improve initials regular expression (#26526, #26353)
+* [svtplay] Fix video id extraction (#26425, #26428, #26438)
+* [twitch] Rework extractors (#12297, #20414, #20604, #21811, #21812, #22979,
+  #24263, #25010, #25553, #25606)
+    * Switch to GraphQL
+    + Add support for collections
+    + Add support for clips and collections playlists
+* [biqle] Improve video ext extraction
+* [xhamster] Fix extraction (#26157, #26254)
+* [xhamster] Extend URL regular expression (#25789, #25804, #25927))
+
+
 version 2020.07.28
 
 Extractors

From d51e23d9fc709a80ca037199a46e59c729a1192e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Sep 2020 13:00:41 +0700
Subject: [PATCH 109/340] release 2020.09.06

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          |  6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           |  6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      |  4 ++--
 ChangeLog                                        |  2 +-
 docs/supportedsites.md                           | 11 ++++-------
 youtube_dl/version.py                            |  2 +-
 8 files changed, 18 insertions(+), 21 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index f2260db46..f05aa66e6 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.07.28
+ [debug] youtube-dl version 2020.09.06
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 8bc05c4ba..29beaf437 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 98348e0cd..f96b8d2bb 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 86706f528..3a175aa4d 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.07.28
+ [debug] youtube-dl version 2020.09.06
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 52c2709f9..4977079de 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.07.28. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.07.28**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 8d410f600..86b0e8ccb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.09.06
 
 Core
 + [utils] Recognize wav mimetype (#26463)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 35c1050e5..5c4e1d58c 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -950,16 +950,13 @@
  - **TVPlayHome**
  - **Tweakers**
  - **TwitCasting**
- - **twitch:chapter**
  - **twitch:clips**
- - **twitch:profile**
  - **twitch:stream**
- - **twitch:video**
- - **twitch:videos:all**
- - **twitch:videos:highlights**
- - **twitch:videos:past-broadcasts**
- - **twitch:videos:uploads**
  - **twitch:vod**
+ - **TwitchCollection**
+ - **TwitchVideos**
+ - **TwitchVideosClips**
+ - **TwitchVideosCollections**
  - **twitter**
  - **twitter:amplify**
  - **twitter:broadcast**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 17101fa47..45b4d3291 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.07.28'
+__version__ = '2020.09.06'

From aa272535567cb372b93fcd4cd24e13bca050437f Mon Sep 17 00:00:00 2001
From: tfvlrue <35318734+tfvlrue@users.noreply.github.com>
Date: Sat, 12 Sep 2020 05:35:11 -0400
Subject: [PATCH 110/340] [soundcloud] Reduce pagination limit to fix 502 Bad
 Gateway errors when listing a user's tracks. (#26557)

Per the documentation here https://developers.soundcloud.com/blog/offset-pagination-deprecated the maximum limit is 200, so let's respect that (even if a higher value sometimes works).

Co-authored-by: tfvlrue <tfvlrue>
---
 youtube_dl/extractor/soundcloud.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index d37c52543..a2fddf6d9 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -558,8 +558,10 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
 
 class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
     def _extract_playlist(self, base_url, playlist_id, playlist_title):
+        # Per the SoundCloud documentation, the maximum limit for a linked partioning query is 200.
+        # https://developers.soundcloud.com/blog/offset-pagination-deprecated
         COMMON_QUERY = {
-            'limit': 80000,
+            'limit': 200,
             'linked_partitioning': '1',
         }
 

From 1f7675451c119b67edb486940e1275b096dd47d5 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 12 Sep 2020 19:20:53 +0100
Subject: [PATCH 111/340] [redbulltv] Add support for new redbull.com TV
 URLs(closes #22037)(closes #22063)

---
 youtube_dl/extractor/extractors.py |   2 +
 youtube_dl/extractor/redbulltv.py  | 110 +++++++++++++++++++++++++----
 2 files changed, 100 insertions(+), 12 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 9564465a0..ae7079a6a 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -918,7 +918,9 @@ from .rbmaradio import RBMARadioIE
 from .rds import RDSIE
 from .redbulltv import (
     RedBullTVIE,
+    RedBullEmbedIE,
     RedBullTVRrnContentIE,
+    RedBullIE,
 )
 from .reddit import (
     RedditIE,
diff --git a/youtube_dl/extractor/redbulltv.py b/youtube_dl/extractor/redbulltv.py
index dbe1aaded..06945bd0c 100644
--- a/youtube_dl/extractor/redbulltv.py
+++ b/youtube_dl/extractor/redbulltv.py
@@ -1,6 +1,8 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
 from ..compat import compat_HTTPError
 from ..utils import (
@@ -10,7 +12,7 @@ from ..utils import (
 
 
 class RedBullTVIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)(?:/events/[^/]+)?/(?:videos?|live)/(?P<id>AP-\w+)'
+    _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)(?:/events/[^/]+)?/(?:videos?|live|(?:film|episode)s)/(?P<id>AP-\w+)'
     _TESTS = [{
         # film
         'url': 'https://www.redbull.tv/video/AP-1Q6XCDTAN1W11',
@@ -29,8 +31,8 @@ class RedBullTVIE(InfoExtractor):
             'id': 'AP-1PMHKJFCW1W11',
             'ext': 'mp4',
             'title': 'Grime - Hashtags S2E4',
-            'description': 'md5:b5f522b89b72e1e23216e5018810bb25',
-            'duration': 904.6,
+            'description': 'md5:5546aa612958c08a98faaad4abce484d',
+            'duration': 904,
         },
         'params': {
             'skip_download': True,
@@ -44,11 +46,15 @@ class RedBullTVIE(InfoExtractor):
     }, {
         'url': 'https://www.redbull.com/us-en/events/AP-1XV2K61Q51W11/live/AP-1XUJ86FDH1W11',
         'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/films/AP-1ZSMAW8FH2111',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/episodes/AP-1TQWK7XE11W11',
+        'only_matching': True,
     }]
 
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-
+    def extract_info(self, video_id):
         session = self._download_json(
             'https://api.redbull.tv/v3/session', video_id,
             note='Downloading access token', query={
@@ -105,24 +111,104 @@ class RedBullTVIE(InfoExtractor):
             'subtitles': subtitles,
         }
 
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self.extract_info(video_id)
+
+
+class RedBullEmbedIE(RedBullTVIE):
+    _VALID_URL = r'https?://(?:www\.)?redbull\.com/embed/(?P<id>rrn:content:[^:]+:[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}:[a-z]{2}-[A-Z]{2,3})'
+    _TESTS = [{
+        # HLS manifest accessible only using assetId
+        'url': 'https://www.redbull.com/embed/rrn:content:episode-videos:f3021f4f-3ed4-51ac-915a-11987126e405:en-INT',
+        'only_matching': True,
+    }]
+    _VIDEO_ESSENSE_TMPL = '''... on %s {
+      videoEssence {
+        attributes
+      }
+    }'''
+
+    def _real_extract(self, url):
+        rrn_id = self._match_id(url)
+        asset_id = self._download_json(
+            'https://edge-graphql.crepo-production.redbullaws.com/v1/graphql',
+            rrn_id, headers={'API-KEY': 'e90a1ff11335423998b100c929ecc866'},
+            query={
+                'query': '''{
+  resource(id: "%s", enforceGeoBlocking: false) {
+    %s
+    %s
+  }
+}''' % (rrn_id, self._VIDEO_ESSENSE_TMPL % 'LiveVideo', self._VIDEO_ESSENSE_TMPL % 'VideoResource'),
+            })['data']['resource']['videoEssence']['attributes']['assetId']
+        return self.extract_info(asset_id)
+
 
 class RedBullTVRrnContentIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?redbull(?:\.tv|\.com(?:/[^/]+)?(?:/tv)?)/(?:video|live)/rrn:content:[^:]+:(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+    _VALID_URL = r'https?://(?:www\.)?redbull\.com/(?P<region>[a-z]{2,3})-(?P<lang>[a-z]{2})/tv/(?:video|live|film)/(?P<id>rrn:content:[^:]+:[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
     _TESTS = [{
         'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:live-videos:e3e6feb4-e95f-50b7-962a-c70f8fd13c73/mens-dh-finals-fort-william',
         'only_matching': True,
     }, {
         'url': 'https://www.redbull.com/int-en/tv/video/rrn:content:videos:a36a0f36-ff1b-5db8-a69d-ee11a14bf48b/tn-ts-style?playlist=rrn:content:event-profiles:83f05926-5de8-5389-b5e4-9bb312d715e8:extras',
         'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/tv/film/rrn:content:films:d1f4d00e-4c04-5d19-b510-a805ffa2ab83/follow-me',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        display_id = self._match_id(url)
+        region, lang, rrn_id = re.search(self._VALID_URL, url).groups()
+        rrn_id += ':%s-%s' % (lang, region.upper())
+        return self.url_result(
+            'https://www.redbull.com/embed/' + rrn_id,
+            RedBullEmbedIE.ie_key(), rrn_id)
 
-        webpage = self._download_webpage(url, display_id)
 
-        video_url = self._og_search_url(webpage)
+class RedBullIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?redbull\.com/(?P<region>[a-z]{2,3})-(?P<lang>[a-z]{2})/(?P<type>(?:episode|film|(?:(?:recap|trailer)-)?video)s|live)/(?!AP-|rrn:content:)(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://www.redbull.com/int-en/episodes/grime-hashtags-s02-e04',
+        'md5': 'db8271a7200d40053a1809ed0dd574ff',
+        'info_dict': {
+            'id': 'AA-1MT8DQWA91W14',
+            'ext': 'mp4',
+            'title': 'Grime - Hashtags S2E4',
+            'description': 'md5:5546aa612958c08a98faaad4abce484d',
+        },
+    }, {
+        'url': 'https://www.redbull.com/int-en/films/kilimanjaro-mountain-of-greatness',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/recap-videos/uci-mountain-bike-world-cup-2017-mens-xco-finals-from-vallnord',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/trailer-videos/kings-of-content',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/videos/tnts-style-red-bull-dance-your-style-s1-e12',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.redbull.com/int-en/live/mens-dh-finals-fort-william',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        region, lang, filter_type, display_id = re.search(self._VALID_URL, url).groups()
+        if filter_type == 'episodes':
+            filter_type = 'episode-videos'
+        elif filter_type == 'live':
+            filter_type = 'live-videos'
+
+        rrn_id = self._download_json(
+            'https://www.redbull.com/v3/api/graphql/v1/v3/query/%s-%s' % (lang, region.upper()),
+            display_id, query={
+                'filter[type]': filter_type,
+                'filter[uriSlug]': display_id,
+                'rb3Schema': 'v1:hero',
+            })['data']['id']
 
         return self.url_result(
-            video_url, ie=RedBullTVIE.ie_key(),
-            video_id=RedBullTVIE._match_id(video_url))
+            'https://www.redbull.com/embed/' + rrn_id,
+            RedBullEmbedIE.ie_key(), rrn_id)

From b03eebdb6aab0f778293ed298543083b6ae76963 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 13 Sep 2020 11:26:11 +0100
Subject: [PATCH 112/340] [redbulltv] improve support for rebull.com TV
 localized URLS(#22063)

---
 youtube_dl/extractor/redbulltv.py | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/redbulltv.py b/youtube_dl/extractor/redbulltv.py
index 06945bd0c..3aae79f5d 100644
--- a/youtube_dl/extractor/redbulltv.py
+++ b/youtube_dl/extractor/redbulltv.py
@@ -192,7 +192,14 @@ class RedBullIE(InfoExtractor):
     }, {
         'url': 'https://www.redbull.com/int-en/live/mens-dh-finals-fort-william',
         'only_matching': True,
+    }, {
+        # only available on the int-en website so a fallback is need for the API
+        # https://www.redbull.com/v3/api/graphql/v1/v3/query/en-GB>en-INT?filter[uriSlug]=fia-wrc-saturday-recap-estonia&rb3Schema=v1:hero
+        'url': 'https://www.redbull.com/gb-en/live/fia-wrc-saturday-recap-estonia',
+        'only_matching': True,
     }]
+    _INT_FALLBACK_LIST = ['de', 'en', 'es', 'fr']
+    _LAT_FALLBACK_MAP = ['ar', 'bo', 'car', 'cl', 'co', 'mx', 'pe']
 
     def _real_extract(self, url):
         region, lang, filter_type, display_id = re.search(self._VALID_URL, url).groups()
@@ -201,8 +208,16 @@ class RedBullIE(InfoExtractor):
         elif filter_type == 'live':
             filter_type = 'live-videos'
 
+        regions = [region.upper()]
+        if region != 'int':
+            if region in self._LAT_FALLBACK_MAP:
+                regions.append('LAT')
+            if lang in self._INT_FALLBACK_LIST:
+                regions.append('INT')
+        locale = '>'.join(['%s-%s' % (lang, reg) for reg in regions])
+
         rrn_id = self._download_json(
-            'https://www.redbull.com/v3/api/graphql/v1/v3/query/%s-%s' % (lang, region.upper()),
+            'https://www.redbull.com/v3/api/graphql/v1/v3/query/' + locale,
             display_id, query={
                 'filter[type]': filter_type,
                 'filter[uriSlug]': display_id,

From 95c98100155589e224c76fddb3d01dae0bd233ac Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 13 Sep 2020 18:59:37 +0700
Subject: [PATCH 113/340] [svtplay] Fix id extraction (closes #26576)

---
 youtube_dl/extractor/svt.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py
index 8e9ec2ca3..2f6887d86 100644
--- a/youtube_dl/extractor/svt.py
+++ b/youtube_dl/extractor/svt.py
@@ -231,7 +231,9 @@ class SVTPlayIE(SVTPlayBaseIE):
         if not svt_id:
             svt_id = self._search_regex(
                 (r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
-                 r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"'),
+                 r'["\']videoSvtId["\']\s*:\s*["\']([\da-zA-Z-]+)',
+                 r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"',
+                 r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)'),
                 webpage, 'video id')
 
         return self._extract_by_video_id(svt_id, webpage)

From da2069fb22fd3b34046fd1be03690fccdd9ab1a2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 13 Sep 2020 20:43:50 +0700
Subject: [PATCH 114/340] [googledrive] Use redirect URLs for source format
 (closes #18877, closes #23919, closes #24689, closes #26565)

---
 youtube_dl/extractor/googledrive.py | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/googledrive.py b/youtube_dl/extractor/googledrive.py
index 589e4d5c3..f2cc57e44 100644
--- a/youtube_dl/extractor/googledrive.py
+++ b/youtube_dl/extractor/googledrive.py
@@ -220,19 +220,27 @@ class GoogleDriveIE(InfoExtractor):
                 'id': video_id,
                 'export': 'download',
             })
-        urlh = self._request_webpage(
-            source_url, video_id, note='Requesting source file',
-            errnote='Unable to request source file', fatal=False)
+
+        def request_source_file(source_url, kind):
+            return self._request_webpage(
+                source_url, video_id, note='Requesting %s file' % kind,
+                errnote='Unable to request %s file' % kind, fatal=False)
+        urlh = request_source_file(source_url, 'source')
         if urlh:
-            def add_source_format(src_url):
+            def add_source_format(urlh):
                 formats.append({
-                    'url': src_url,
+                    # Use redirect URLs as download URLs in order to calculate
+                    # correct cookies in _calc_cookies.
+                    # Using original URLs may result in redirect loop due to
+                    # google.com's cookies mistakenly used for googleusercontent.com
+                    # redirect URLs (see #23919).
+                    'url': urlh.geturl(),
                     'ext': determine_ext(title, 'mp4').lower(),
                     'format_id': 'source',
                     'quality': 1,
                 })
             if urlh.headers.get('Content-Disposition'):
-                add_source_format(source_url)
+                add_source_format(urlh)
             else:
                 confirmation_webpage = self._webpage_read_content(
                     urlh, url, video_id, note='Downloading confirmation page',
@@ -242,9 +250,12 @@ class GoogleDriveIE(InfoExtractor):
                         r'confirm=([^&"\']+)', confirmation_webpage,
                         'confirmation code', fatal=False)
                     if confirm:
-                        add_source_format(update_url_query(source_url, {
+                        confirmed_source_url = update_url_query(source_url, {
                             'confirm': confirm,
-                        }))
+                        })
+                        urlh = request_source_file(confirmed_source_url, 'confirmed source')
+                        if urlh and urlh.headers.get('Content-Disposition'):
+                            add_source_format(urlh)
 
         if not formats:
             reason = self._search_regex(

From 06cd4cdb252a45da25ed75ea63b714bc3b9d691b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 13 Sep 2020 21:07:25 +0700
Subject: [PATCH 115/340] [srgssr] Extend _VALID_URL (closes #26555, closes
 #26556, closes #26578)

---
 youtube_dl/extractor/srgssr.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/srgssr.py b/youtube_dl/extractor/srgssr.py
index 170dce87f..f63a1359a 100644
--- a/youtube_dl/extractor/srgssr.py
+++ b/youtube_dl/extractor/srgssr.py
@@ -114,7 +114,7 @@ class SRGSSRPlayIE(InfoExtractor):
                             [^/]+/(?P<type>video|audio)/[^?]+|
                             popup(?P<type_2>video|audio)player
                         )
-                        \?id=(?P<id>[0-9a-f\-]{36}|\d+)
+                        \?.*?\b(?:id=|urn=urn:[^:]+:video:)(?P<id>[0-9a-f\-]{36}|\d+)
                     '''
 
     _TESTS = [{
@@ -175,6 +175,12 @@ class SRGSSRPlayIE(InfoExtractor):
     }, {
         'url': 'https://www.srf.ch/play/tv/popupvideoplayer?id=c4dba0ca-e75b-43b2-a34f-f708a4932e01',
         'only_matching': True,
+    }, {
+        'url': 'https://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?urn=urn:srf:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.rts.ch/play/tv/19h30/video/le-19h30?urn=urn:rts:video:6348260',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From ea74e00b3afe604531419c17eb7291038115a271 Mon Sep 17 00:00:00 2001
From: Daniel Peukert <dan.peukert@gmail.com>
Date: Sun, 13 Sep 2020 16:23:21 +0200
Subject: [PATCH 116/340] [youtube] Fix empty description extraction (#26575)
 (closes #26006)

---
 youtube_dl/extractor/youtube.py | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 6ae2e58c1..02f3ab61a 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1264,7 +1264,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'params': {
                 'skip_download': True,
             },
-        }
+        },
+        {
+            # empty description results in an empty string
+            'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
+            'info_dict': {
+                'id': 'x41yOUIvK2k',
+                'ext': 'mp4',
+                'title': 'IMG 3456',
+                'description': '',
+                'upload_date': '20170613',
+                'uploader_id': 'ElevageOrVert',
+                'uploader': 'ElevageOrVert',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        },
     ]
 
     def __init__(self, *args, **kwargs):
@@ -1931,7 +1947,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             ''', replace_url, video_description)
             video_description = clean_html(video_description)
         else:
-            video_description = video_details.get('shortDescription') or self._html_search_meta('description', video_webpage)
+            video_description = video_details.get('shortDescription')
+            if video_description is None:
+                video_description = self._html_search_meta('description', video_webpage)
 
         if not smuggled_data.get('force_singlefeed', False):
             if not self._downloader.params.get('noplaylist'):

From 97f34a48d72c5e55a584b7defc0dd00d2d9b416c Mon Sep 17 00:00:00 2001
From: Derek Land <d.d.land@hhs.nl>
Date: Sun, 13 Sep 2020 16:38:16 +0200
Subject: [PATCH 117/340] [rtlnl] Extend _VALID_URL (#26549) (closes #25821)

---
 youtube_dl/extractor/rtlnl.py | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py
index fadca8c17..cf4dc85db 100644
--- a/youtube_dl/extractor/rtlnl.py
+++ b/youtube_dl/extractor/rtlnl.py
@@ -14,12 +14,26 @@ class RtlNlIE(InfoExtractor):
     _VALID_URL = r'''(?x)
         https?://(?:(?:www|static)\.)?
         (?:
-            rtlxl\.nl/[^\#]*\#!/[^/]+/|
+            rtlxl\.nl/(?:[^\#]*\#!|programma)/[^/]+/|
             rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)
         )
         (?P<id>[0-9a-f-]+)'''
 
     _TESTS = [{
+        # new URL schema
+        'url': 'https://www.rtlxl.nl/programma/rtl-nieuws/0bd1384d-d970-3086-98bb-5c104e10c26f',
+        'md5': '490428f1187b60d714f34e1f2e3af0b6',
+        'info_dict': {
+            'id': '0bd1384d-d970-3086-98bb-5c104e10c26f',
+            'ext': 'mp4',
+            'title': 'RTL Nieuws',
+            'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+            'timestamp': 1593293400,
+            'upload_date': '20200627',
+            'duration': 661.08,
+        },
+    }, {
+        # old URL schema
         'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416',
         'md5': '473d1946c1fdd050b2c0161a4b13c373',
         'info_dict': {
@@ -31,6 +45,7 @@ class RtlNlIE(InfoExtractor):
             'upload_date': '20160429',
             'duration': 1167.96,
         },
+        'skip': '404',
     }, {
         # best format available a3t
         'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',

From 45f6362464d283fe61653c75fbb93a6a87ac6f65 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 13 Sep 2020 21:42:06 +0700
Subject: [PATCH 118/340] [rtlnl] Extend _VALID_URL for new embed URL schema

---
 youtube_dl/extractor/rtlnl.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py
index cf4dc85db..9eaa06f25 100644
--- a/youtube_dl/extractor/rtlnl.py
+++ b/youtube_dl/extractor/rtlnl.py
@@ -15,7 +15,8 @@ class RtlNlIE(InfoExtractor):
         https?://(?:(?:www|static)\.)?
         (?:
             rtlxl\.nl/(?:[^\#]*\#!|programma)/[^/]+/|
-            rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)
+            rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)|
+            embed\.rtl\.nl/\#uuid=
         )
         (?P<id>[0-9a-f-]+)'''
 
@@ -91,6 +92,10 @@ class RtlNlIE(InfoExtractor):
     }, {
         'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl',
         'only_matching': True,
+    }, {
+        # new embed URL schema
+        'url': 'https://embed.rtl.nl/#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From a31a022efd1b7bac8cd0a477f08534f3fcc67298 Mon Sep 17 00:00:00 2001
From: Alex Merkel <mail@alexmerkel.com>
Date: Thu, 18 Jun 2020 22:36:44 +0200
Subject: [PATCH 119/340] [postprocessor/embedthumbnail] Add support for non
 jpeg/png thumbnails (closes #25687)

---
 youtube_dl/postprocessor/embedthumbnail.py | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/youtube_dl/postprocessor/embedthumbnail.py b/youtube_dl/postprocessor/embedthumbnail.py
index 56be914b8..e2002ab0b 100644
--- a/youtube_dl/postprocessor/embedthumbnail.py
+++ b/youtube_dl/postprocessor/embedthumbnail.py
@@ -41,6 +41,28 @@ class EmbedThumbnailPP(FFmpegPostProcessor):
                 'Skipping embedding the thumbnail because the file is missing.')
             return [], info
 
+        # Check for mislabeled webp file
+        with open(encodeFilename(thumbnail_filename), "rb") as f:
+            b = f.read(16)
+        if b'\x57\x45\x42\x50' in b:  # Binary for WEBP
+            [thumbnail_filename_path, thumbnail_filename_extension] = os.path.splitext(thumbnail_filename)
+            if not thumbnail_filename_extension == ".webp":
+                webp_thumbnail_filename = thumbnail_filename_path + ".webp"
+                os.rename(encodeFilename(thumbnail_filename), encodeFilename(webp_thumbnail_filename))
+                thumbnail_filename = webp_thumbnail_filename
+
+        # If not a jpg or png thumbnail, convert it to jpg using ffmpeg
+        if not os.path.splitext(thumbnail_filename)[1].lower() in ['.jpg', '.png']:
+            jpg_thumbnail_filename = os.path.splitext(thumbnail_filename)[0] + ".jpg"
+            jpg_thumbnail_filename = os.path.join(os.path.dirname(jpg_thumbnail_filename), os.path.basename(jpg_thumbnail_filename).replace('%', '_'))  # ffmpeg interprets % as image sequence
+
+            self._downloader.to_screen('[ffmpeg] Converting thumbnail "%s" to JPEG' % thumbnail_filename)
+
+            self.run_ffmpeg(thumbnail_filename, jpg_thumbnail_filename, ['-bsf:v', 'mjpeg2jpeg'])
+
+            os.remove(encodeFilename(thumbnail_filename))
+            thumbnail_filename = jpg_thumbnail_filename
+
         if info['ext'] == 'mp3':
             options = [
                 '-c', 'copy', '-map', '0', '-map', '1',

From bff857a8af696e701482208617bf0b7564951326 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 14 Sep 2020 03:28:31 +0700
Subject: [PATCH 120/340] [postprocessor/embedthumbnail] Fix issues (closes
 #25717) * Fix WebP with wrong extension processing * Fix embedding of
 thumbnails with % character in path

---
 youtube_dl/postprocessor/embedthumbnail.py | 49 +++++++++++++---------
 1 file changed, 30 insertions(+), 19 deletions(-)

diff --git a/youtube_dl/postprocessor/embedthumbnail.py b/youtube_dl/postprocessor/embedthumbnail.py
index e2002ab0b..5a3359588 100644
--- a/youtube_dl/postprocessor/embedthumbnail.py
+++ b/youtube_dl/postprocessor/embedthumbnail.py
@@ -13,6 +13,7 @@ from ..utils import (
     encodeFilename,
     PostProcessingError,
     prepend_extension,
+    replace_extension,
     shell_quote
 )
 
@@ -41,27 +42,37 @@ class EmbedThumbnailPP(FFmpegPostProcessor):
                 'Skipping embedding the thumbnail because the file is missing.')
             return [], info
 
-        # Check for mislabeled webp file
-        with open(encodeFilename(thumbnail_filename), "rb") as f:
-            b = f.read(16)
-        if b'\x57\x45\x42\x50' in b:  # Binary for WEBP
-            [thumbnail_filename_path, thumbnail_filename_extension] = os.path.splitext(thumbnail_filename)
-            if not thumbnail_filename_extension == ".webp":
-                webp_thumbnail_filename = thumbnail_filename_path + ".webp"
-                os.rename(encodeFilename(thumbnail_filename), encodeFilename(webp_thumbnail_filename))
-                thumbnail_filename = webp_thumbnail_filename
+        def is_webp(path):
+            with open(encodeFilename(path), 'rb') as f:
+                b = f.read(12)
+            return b[0:4] == b'RIFF' and b[8:] == b'WEBP'
 
-        # If not a jpg or png thumbnail, convert it to jpg using ffmpeg
-        if not os.path.splitext(thumbnail_filename)[1].lower() in ['.jpg', '.png']:
-            jpg_thumbnail_filename = os.path.splitext(thumbnail_filename)[0] + ".jpg"
-            jpg_thumbnail_filename = os.path.join(os.path.dirname(jpg_thumbnail_filename), os.path.basename(jpg_thumbnail_filename).replace('%', '_'))  # ffmpeg interprets % as image sequence
+        # Correct extension for WebP file with wrong extension (see #25687, #25717)
+        _, thumbnail_ext = os.path.splitext(thumbnail_filename)
+        if thumbnail_ext:
+            thumbnail_ext = thumbnail_ext[1:].lower()
+            if thumbnail_ext != 'webp' and is_webp(thumbnail_filename):
+                self._downloader.to_screen(
+                    '[ffmpeg] Correcting extension to webp and escaping path for thumbnail "%s"' % thumbnail_filename)
+                thumbnail_webp_filename = replace_extension(thumbnail_filename, 'webp')
+                os.rename(encodeFilename(thumbnail_filename), encodeFilename(thumbnail_webp_filename))
+                thumbnail_filename = thumbnail_webp_filename
+                thumbnail_ext = 'webp'
 
-            self._downloader.to_screen('[ffmpeg] Converting thumbnail "%s" to JPEG' % thumbnail_filename)
-
-            self.run_ffmpeg(thumbnail_filename, jpg_thumbnail_filename, ['-bsf:v', 'mjpeg2jpeg'])
-
-            os.remove(encodeFilename(thumbnail_filename))
-            thumbnail_filename = jpg_thumbnail_filename
+        # Convert unsupported thumbnail formats to JPEG (see #25687, #25717)
+        if thumbnail_ext not in ['jpg', 'png']:
+            # NB: % is supposed to be escaped with %% but this does not work
+            # for input files so working around with standard substitution
+            escaped_thumbnail_filename = thumbnail_filename.replace('%', '#')
+            os.rename(encodeFilename(thumbnail_filename), encodeFilename(escaped_thumbnail_filename))
+            escaped_thumbnail_jpg_filename = replace_extension(escaped_thumbnail_filename, 'jpg')
+            self._downloader.to_screen('[ffmpeg] Converting thumbnail "%s" to JPEG' % escaped_thumbnail_filename)
+            self.run_ffmpeg(escaped_thumbnail_filename, escaped_thumbnail_jpg_filename, ['-bsf:v', 'mjpeg2jpeg'])
+            os.remove(encodeFilename(escaped_thumbnail_filename))
+            thumbnail_jpg_filename = replace_extension(thumbnail_filename, 'jpg')
+            # Rename back to unescaped for further processing
+            os.rename(encodeFilename(escaped_thumbnail_jpg_filename), encodeFilename(thumbnail_jpg_filename))
+            thumbnail_filename = thumbnail_jpg_filename
 
         if info['ext'] == 'mp3':
             options = [

From ca7ebc4e5e03a79b90ffa509cd3ce6bbf65d43fa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 14 Sep 2020 03:35:18 +0700
Subject: [PATCH 121/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 86b0e8ccb..041cf7113 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+version <unreleased>
+
+Core
++ [postprocessor/embedthumbnail] Add support for non jpg/png thumbnails
+  (#25687, #25717)
+
+Extractors
+* [rtlnl] Extend URL regular expression (#26549, #25821)
+* [youtube] Fix empty description extraction (#26575, #26006)
+* [srgssr] Extend URL regular expression (#26555, #26556, #26578)
+* [googledrive] Use redirect URLs for source format (#18877, #23919, #24689,
+  #26565)
+* [svtplay] Fix id extraction (#26576)
+* [redbulltv] Improve support for rebull.com TV localized URLs (#22063)
++ [redbulltv] Add support for new redbull.com TV URLs (#22037, #22063)
+* [soundcloud:pagedplaylist] Reduce pagination limit (#26557)
+
+
 version 2020.09.06
 
 Core

From e8c5d40bc840b9e3ec8b4e8070291a64a4fa75b2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 14 Sep 2020 03:37:36 +0700
Subject: [PATCH 122/340] release 2020.09.14

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 2 ++
 youtube_dl/version.py                            | 2 +-
 8 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index f05aa66e6..352263789 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.06
+ [debug] youtube-dl version 2020.09.14
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 29beaf437..fa6509be3 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index f96b8d2bb..70b0f2f19 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 3a175aa4d..ec17e4a33 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.06
+ [debug] youtube-dl version 2020.09.14
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 4977079de..6ac963206 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.06. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.06**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 041cf7113..4143ec2fb 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.09.14
 
 Core
 + [postprocessor/embedthumbnail] Add support for non jpg/png thumbnails
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 5c4e1d58c..367545a96 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -717,6 +717,8 @@
  - **RayWenderlichCourse**
  - **RBMARadio**
  - **RDS**: RDS.ca
+ - **RedBull**
+ - **RedBullEmbed**
  - **RedBullTV**
  - **RedBullTVRrnContent**
  - **Reddit**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 45b4d3291..5625b8324 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.09.06'
+__version__ = '2020.09.14'

From 86b7c00adca578b36138b165b0add5978972917e Mon Sep 17 00:00:00 2001
From: Ori Avtalion <ori@avtalion.name>
Date: Thu, 17 Sep 2020 23:15:44 +0300
Subject: [PATCH 123/340] [downloader/http] Retry download when urlopen times
 out (#26603) (refs #10935)

---
 youtube_dl/downloader/http.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 5046878df..e14ddce58 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -106,7 +106,12 @@ class HttpFD(FileDownloader):
                 set_range(request, range_start, range_end)
             # Establish connection
             try:
-                ctx.data = self.ydl.urlopen(request)
+                try:
+                    ctx.data = self.ydl.urlopen(request)
+                except (compat_urllib_error.URLError, ) as err:
+                    if isinstance(err.reason, socket.timeout):
+                        raise RetryDownload(err)
+                    raise err
                 # When trying to resume, Content-Range HTTP header of response has to be checked
                 # to match the value of requested Range HTTP header. This is due to a webservers
                 # that don't support resuming and serve a whole file with no Content-Range

From cdc55e666f3f9c795ed74c478c6a249d992cf93f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 18 Sep 2020 03:32:54 +0700
Subject: [PATCH 124/340] [downloader/http] Improve timeout detection when
 reading block of data (refs #10935)

---
 youtube_dl/downloader/http.py | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index e14ddce58..6ef26548d 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -238,9 +238,11 @@ class HttpFD(FileDownloader):
                 except socket.timeout as e:
                     retry(e)
                 except socket.error as e:
-                    if e.errno not in (errno.ECONNRESET, errno.ETIMEDOUT):
-                        raise
-                    retry(e)
+                    # SSLError on python 2 (inherits socket.error) may have
+                    # no errno set but this error message
+                    if e.errno in (errno.ECONNRESET, errno.ETIMEDOUT) or getattr(e, 'message') == 'The read operation timed out':
+                        retry(e)
+                    raise
 
                 byte_counter += len(data_block)
 

From f8c7bed133fe729a9e26f23e5685559e3fa12eb1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 18 Sep 2020 03:41:16 +0700
Subject: [PATCH 125/340] [extractor/common] Handle ssl.CertificateError in
 _request_webpage (closes #26601)

ssl.CertificateError is raised on some python versions <= 3.7.x
---
 youtube_dl/extractor/common.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index a61753b17..f740ddad1 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -10,6 +10,7 @@ import os
 import random
 import re
 import socket
+import ssl
 import sys
 import time
 import math
@@ -623,9 +624,12 @@ class InfoExtractor(object):
                 url_or_request = update_url_query(url_or_request, query)
             if data is not None or headers:
                 url_or_request = sanitized_Request(url_or_request, data, headers)
+        exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
+        if hasattr(ssl, 'CertificateError'):
+            exceptions.append(ssl.CertificateError)
         try:
             return self._downloader.urlopen(url_or_request)
-        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+        except tuple(exceptions) as err:
             if isinstance(err, compat_urllib_error.HTTPError):
                 if self.__can_accept_status_code(err, expected_status):
                     # Retain reference to error to prevent file object from

From 6e65a2a67e075ae8f3e4fe03c732d7772d36f5e1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Stefan=20P=C3=B6schel?=
 <basicmaster@users.noreply.github.com>
Date: Fri, 18 Sep 2020 00:26:56 +0200
Subject: [PATCH 126/340] [downloader/hls] Fix incorrect end byte in Range HTTP
 header for media segments with EXT-X-BYTERANGE (#24512) (closes #14748)

The end of the byte range is the first byte that is NOT part of the to
be downloaded range. So don't include it into the requested HTTP
download range, as this additional byte leads to a broken TS packet and
subsequently to e.g. visible video corruption.

Fixes #14748.
---
 youtube_dl/downloader/hls.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
index 84bc34928..0f2c06f40 100644
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -141,7 +141,7 @@ class HlsFD(FragmentFD):
                     count = 0
                     headers = info_dict.get('http_headers', {})
                     if byte_range:
-                        headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])
+                        headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
                     while count <= fragment_retries:
                         try:
                             success, frag_content = self._download_fragment(

From 540b9f5164d50eb99d9c988ece6eb6775ccaf94a Mon Sep 17 00:00:00 2001
From: JChris246 <43832407+JChris246@users.noreply.github.com>
Date: Fri, 18 Sep 2020 18:59:19 -0400
Subject: [PATCH 127/340] [pornhub] Fix view count extraction (#26621) (refs
 #26614)

---
 youtube_dl/extractor/pornhub.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 3567a3283..c64c870dc 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -341,7 +341,7 @@ class PornHubIE(PornHubBaseIE):
             webpage, 'uploader', fatal=False)
 
         view_count = self._extract_count(
-            r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
+            r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view')
         like_count = self._extract_count(
             r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
         dislike_count = self._extract_count(

From ad06b99dd47acc8b1bd213f079e1f36da9e3a73d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 19 Sep 2020 06:13:42 +0700
Subject: [PATCH 128/340] [extractor/common] Extract author as uploader for
 VideoObject in _json_ld

---
 youtube_dl/extractor/common.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index f740ddad1..c9b8b6337 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1268,6 +1268,7 @@ class InfoExtractor(object):
                 'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
                 'duration': parse_duration(e.get('duration')),
                 'timestamp': unified_timestamp(e.get('uploadDate')),
+                'uploader': str_or_none(e.get('author')),
                 'filesize': float_or_none(e.get('contentSize')),
                 'tbr': int_or_none(e.get('bitrate')),
                 'width': int_or_none(e.get('width')),

From ce5b904050b4610bac6d99673bbe9181a3af3db5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 19 Sep 2020 06:33:17 +0700
Subject: [PATCH 129/340] [extractor/common] Relax interaction count extraction
 in _json_ld

---
 youtube_dl/extractor/common.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index c9b8b6337..021945a89 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -68,6 +68,7 @@ from ..utils import (
     sanitized_Request,
     sanitize_filename,
     str_or_none,
+    str_to_int,
     strip_or_none,
     unescapeHTML,
     unified_strdate,
@@ -1248,7 +1249,10 @@ class InfoExtractor(object):
                 interaction_type = is_e.get('interactionType')
                 if not isinstance(interaction_type, compat_str):
                     continue
-                interaction_count = int_or_none(is_e.get('userInteractionCount'))
+                # For interaction count some sites provide string instead of
+                # an integer (as per spec) with non digit characters (e.g. ",")
+                # so extracting count with more relaxed str_to_int
+                interaction_count = str_to_int(is_e.get('userInteractionCount'))
                 if interaction_count is None:
                     continue
                 count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])

From cd85a1bb8b24eaf6a421a32a985d4c3ad4f80597 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 19 Sep 2020 06:34:34 +0700
Subject: [PATCH 130/340] [pornhub] Extract metadata from JSON-LD (closes
 #26614)

---
 youtube_dl/extractor/pornhub.py | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index c64c870dc..529f3f711 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -17,6 +17,7 @@ from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
+    merge_dicts,
     NO_DEFAULT,
     orderedSet,
     remove_quotes,
@@ -59,13 +60,14 @@ class PornHubIE(PornHubBaseIE):
                     '''
     _TESTS = [{
         'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
-        'md5': '1e19b41231a02eba417839222ac9d58e',
+        'md5': 'a6391306d050e4547f62b3f485dd9ba9',
         'info_dict': {
             'id': '648719015',
             'ext': 'mp4',
             'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
             'uploader': 'Babes',
             'upload_date': '20130628',
+            'timestamp': 1372447216,
             'duration': 361,
             'view_count': int,
             'like_count': int,
@@ -82,8 +84,8 @@ class PornHubIE(PornHubBaseIE):
             'id': '1331683002',
             'ext': 'mp4',
             'title': '重庆婷婷女王足交',
-            'uploader': 'Unknown',
             'upload_date': '20150213',
+            'timestamp': 1423804862,
             'duration': 1753,
             'view_count': int,
             'like_count': int,
@@ -121,6 +123,7 @@ class PornHubIE(PornHubBaseIE):
         'params': {
             'skip_download': True,
         },
+        'skip': 'This video has been disabled',
     }, {
         'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
         'only_matching': True,
@@ -338,7 +341,7 @@ class PornHubIE(PornHubBaseIE):
 
         video_uploader = self._html_search_regex(
             r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
-            webpage, 'uploader', fatal=False)
+            webpage, 'uploader', default=None)
 
         view_count = self._extract_count(
             r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view')
@@ -356,7 +359,11 @@ class PornHubIE(PornHubBaseIE):
             if div:
                 return re.findall(r'<a[^>]+\bhref=[^>]+>([^<]+)', div)
 
-        return {
+        info = self._search_json_ld(webpage, video_id, default={})
+        # description provided in JSON-LD is irrelevant
+        info['description'] = None
+
+        return merge_dicts({
             'id': video_id,
             'uploader': video_uploader,
             'upload_date': upload_date,
@@ -372,7 +379,7 @@ class PornHubIE(PornHubBaseIE):
             'tags': extract_list('tags'),
             'categories': extract_list('categories'),
             'subtitles': subtitles,
-        }
+        }, info)
 
 
 class PornHubPlaylistBaseIE(PornHubBaseIE):

From b856b3997ce2978f7a8c386b4ce4840fd221c45a Mon Sep 17 00:00:00 2001
From: Patrick Dessalle <patrick@dessalle.be>
Date: Wed, 28 Aug 2019 19:04:57 +0200
Subject: [PATCH 131/340] [telequebec] Add support for brightcove videos
 (closes #25833)

---
 youtube_dl/extractor/telequebec.py | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/telequebec.py b/youtube_dl/extractor/telequebec.py
index c82c94b3a..3adea7bc5 100644
--- a/youtube_dl/extractor/telequebec.py
+++ b/youtube_dl/extractor/telequebec.py
@@ -12,6 +12,8 @@ from ..utils import (
 
 
 class TeleQuebecBaseIE(InfoExtractor):
+    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/6150020952001/default_default/index.html?videoId=%s'
+
     @staticmethod
     def _limelight_result(media_id):
         return {
@@ -21,6 +23,13 @@ class TeleQuebecBaseIE(InfoExtractor):
             'ie_key': 'LimelightMedia',
         }
 
+    def _brightcove_result(self, brightcove_id):
+        return self.url_result(
+            smuggle_url(
+                self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
+                {'geo_countries': ['CA']}),
+            'BrightcoveNew', brightcove_id)
+
 
 class TeleQuebecIE(TeleQuebecBaseIE):
     _VALID_URL = r'''(?x)
@@ -37,7 +46,7 @@ class TeleQuebecIE(TeleQuebecBaseIE):
             'id': '577116881b4b439084e6b1cf4ef8b1b3',
             'ext': 'mp4',
             'title': 'Un petit choc et puis repart!',
-            'description': 'md5:b04a7e6b3f74e32d7b294cffe8658374',
+            'description': 'md5:067bc84bd6afecad85e69d1000730907',
         },
         'params': {
             'skip_download': True,
@@ -58,7 +67,10 @@ class TeleQuebecIE(TeleQuebecBaseIE):
             'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id,
             media_id)['media']
 
-        info = self._limelight_result(media_data['streamInfo']['sourceId'])
+        if media_data['streamInfo']['source'] == 'Brightcove':
+            info = self._brightcove_result(media_data['streamInfo']['sourceId'])
+        elif media_data['streamInfo']['source'] == 'Limelight':
+            info = self._limelight_result(media_data['streamInfo']['sourceId'])
         info.update({
             'title': media_data.get('title'),
             'description': try_get(

From 82ef02e936a0e2ca698048c8cd79273a22e79867 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 19 Sep 2020 07:52:42 +0700
Subject: [PATCH 132/340] [telequebec] Fix issues (closes #26368)

---
 youtube_dl/extractor/telequebec.py | 55 +++++++++++++++++++++---------
 1 file changed, 38 insertions(+), 17 deletions(-)

diff --git a/youtube_dl/extractor/telequebec.py b/youtube_dl/extractor/telequebec.py
index 3adea7bc5..b4c485b9b 100644
--- a/youtube_dl/extractor/telequebec.py
+++ b/youtube_dl/extractor/telequebec.py
@@ -12,23 +12,24 @@ from ..utils import (
 
 
 class TeleQuebecBaseIE(InfoExtractor):
-    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/6150020952001/default_default/index.html?videoId=%s'
+    @staticmethod
+    def _result(url, ie_key):
+        return {
+            '_type': 'url_transparent',
+            'url': smuggle_url(url, {'geo_countries': ['CA']}),
+            'ie_key': ie_key,
+        }
 
     @staticmethod
     def _limelight_result(media_id):
-        return {
-            '_type': 'url_transparent',
-            'url': smuggle_url(
-                'limelight:media:' + media_id, {'geo_countries': ['CA']}),
-            'ie_key': 'LimelightMedia',
-        }
+        return TeleQuebecBaseIE._result(
+            'limelight:media:' + media_id, 'LimelightMedia')
 
-    def _brightcove_result(self, brightcove_id):
-        return self.url_result(
-            smuggle_url(
-                self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
-                {'geo_countries': ['CA']}),
-            'BrightcoveNew', brightcove_id)
+    @staticmethod
+    def _brightcove_result(brightcove_id):
+        return TeleQuebecBaseIE._result(
+            'http://players.brightcove.net/6150020952001/default_default/index.html?videoId=%s'
+            % brightcove_id, 'BrightcoveNew')
 
 
 class TeleQuebecIE(TeleQuebecBaseIE):
@@ -51,6 +52,22 @@ class TeleQuebecIE(TeleQuebecBaseIE):
         'params': {
             'skip_download': True,
         },
+    }, {
+        'url': 'https://zonevideo.telequebec.tv/media/55267/le-soleil/passe-partout',
+        'info_dict': {
+            'id': '6167180337001',
+            'ext': 'mp4',
+            'title': 'Le soleil',
+            'description': 'md5:64289c922a8de2abbe99c354daffde02',
+            'uploader_id': '6150020952001',
+            'upload_date': '20200625',
+            'timestamp': 1593090307,
+        },
+        'params': {
+            'format': 'bestvideo',
+            'skip_download': True,
+        },
+        'add_ie': ['BrightcoveNew'],
     }, {
         # no description
         'url': 'http://zonevideo.telequebec.tv/media/30261',
@@ -67,10 +84,14 @@ class TeleQuebecIE(TeleQuebecBaseIE):
             'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id,
             media_id)['media']
 
-        if media_data['streamInfo']['source'] == 'Brightcove':
-            info = self._brightcove_result(media_data['streamInfo']['sourceId'])
-        elif media_data['streamInfo']['source'] == 'Limelight':
-            info = self._limelight_result(media_data['streamInfo']['sourceId'])
+        source_id = media_data['streamInfo']['sourceId']
+        source = (try_get(
+            media_data, lambda x: x['streamInfo']['source'],
+            compat_str) or 'limelight').lower()
+        if source == 'brightcove':
+            info = self._brightcove_result(source_id)
+        else:
+            info = self._limelight_result(source_id)
         info.update({
             'title': media_data.get('title'),
             'description': try_get(

From defc820b70e3f0131fad450fc1e673e18b00a625 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 20 Sep 2020 10:05:00 +0700
Subject: [PATCH 133/340] [twitch] Switch streams to GraphQL and refactor
 (closes #26535)

---
 youtube_dl/extractor/twitch.py | 142 +++++++++++++++++++--------------
 1 file changed, 81 insertions(+), 61 deletions(-)

diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index eadc48c6d..ab6654432 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -24,7 +24,6 @@ from ..utils import (
     parse_duration,
     parse_iso8601,
     qualities,
-    str_or_none,
     try_get,
     unified_timestamp,
     update_url_query,
@@ -337,19 +336,27 @@ def _make_video_result(node):
 class TwitchGraphQLBaseIE(TwitchBaseIE):
     _PAGE_LIMIT = 100
 
-    def _download_gql(self, video_id, op, variables, sha256_hash, note, fatal=True):
+    _OPERATION_HASHES = {
+        'CollectionSideBar': '27111f1b382effad0b6def325caef1909c733fe6a4fbabf54f8d491ef2cf2f14',
+        'FilterableVideoTower_Videos': 'a937f1d22e269e39a03b509f65a7490f9fc247d7f83d6ac1421523e3b68042cb',
+        'ClipsCards__User': 'b73ad2bfaecfd30a9e6c28fada15bd97032c83ec77a0440766a56fe0bd632777',
+        'ChannelCollectionsContent': '07e3691a1bad77a36aba590c351180439a40baefc1c275356f40fc7082419a84',
+        'StreamMetadata': '1c719a40e481453e5c48d9bb585d971b8b372f8ebb105b17076722264dfa5b3e',
+        'ComscoreStreamingQuery': 'e1edae8122517d013405f237ffcc124515dc6ded82480a88daef69c83b53ac01',
+        'VideoPreviewOverlay': '3006e77e51b128d838fa4e835723ca4dc9a05c5efd4466c1085215c6e437e65c',
+    }
+
+    def _download_gql(self, video_id, ops, note, fatal=True):
+        for op in ops:
+            op['extensions'] = {
+                'persistedQuery': {
+                    'version': 1,
+                    'sha256Hash': self._OPERATION_HASHES[op['operationName']],
+                }
+            }
         return self._download_json(
             'https://gql.twitch.tv/gql', video_id, note,
-            data=json.dumps({
-                'operationName': op,
-                'variables': variables,
-                'extensions': {
-                    'persistedQuery': {
-                        'version': 1,
-                        'sha256Hash': sha256_hash,
-                    }
-                }
-            }).encode(),
+            data=json.dumps(ops).encode(),
             headers={
                 'Content-Type': 'text/plain;charset=UTF-8',
                 'Client-ID': self._CLIENT_ID,
@@ -369,14 +376,15 @@ class TwitchCollectionIE(TwitchGraphQLBaseIE):
     }]
 
     _OPERATION_NAME = 'CollectionSideBar'
-    _SHA256_HASH = '27111f1b382effad0b6def325caef1909c733fe6a4fbabf54f8d491ef2cf2f14'
 
     def _real_extract(self, url):
         collection_id = self._match_id(url)
         collection = self._download_gql(
-            collection_id, self._OPERATION_NAME,
-            {'collectionID': collection_id}, self._SHA256_HASH,
-            'Downloading collection GraphQL')['data']['collection']
+            collection_id, [{
+                'operationName': self._OPERATION_NAME,
+                'variables': {'collectionID': collection_id},
+            }],
+            'Downloading collection GraphQL')[0]['data']['collection']
         title = collection.get('title')
         entries = []
         for edge in collection['items']['edges']:
@@ -403,14 +411,16 @@ class TwitchPlaylistBaseIE(TwitchGraphQLBaseIE):
             if cursor:
                 variables['cursor'] = cursor
             page = self._download_gql(
-                channel_name, self._OPERATION_NAME, variables,
-                self._SHA256_HASH,
+                channel_name, [{
+                    'operationName': self._OPERATION_NAME,
+                    'variables': variables,
+                }],
                 'Downloading %ss GraphQL page %s' % (self._NODE_KIND, page_num),
                 fatal=False)
             if not page:
                 break
             edges = try_get(
-                page, lambda x: x['data']['user'][entries_key]['edges'], list)
+                page, lambda x: x[0]['data']['user'][entries_key]['edges'], list)
             if not edges:
                 break
             for edge in edges:
@@ -553,7 +563,6 @@ class TwitchVideosIE(TwitchPlaylistBaseIE):
         'views': 'Popular',
     }
 
-    _SHA256_HASH = 'a937f1d22e269e39a03b509f65a7490f9fc247d7f83d6ac1421523e3b68042cb'
     _OPERATION_NAME = 'FilterableVideoTower_Videos'
     _ENTRY_KIND = 'video'
     _EDGE_KIND = 'VideoEdge'
@@ -622,7 +631,6 @@ class TwitchVideosClipsIE(TwitchPlaylistBaseIE):
     # NB: values other than 20 result in skipped videos
     _PAGE_LIMIT = 20
 
-    _SHA256_HASH = 'b73ad2bfaecfd30a9e6c28fada15bd97032c83ec77a0440766a56fe0bd632777'
     _OPERATION_NAME = 'ClipsCards__User'
     _ENTRY_KIND = 'clip'
     _EDGE_KIND = 'ClipEdge'
@@ -680,7 +688,6 @@ class TwitchVideosCollectionsIE(TwitchPlaylistBaseIE):
         'playlist_mincount': 3,
     }]
 
-    _SHA256_HASH = '07e3691a1bad77a36aba590c351180439a40baefc1c275356f40fc7082419a84'
     _OPERATION_NAME = 'ChannelCollectionsContent'
     _ENTRY_KIND = 'collection'
     _EDGE_KIND = 'CollectionsItemEdge'
@@ -717,7 +724,7 @@ class TwitchVideosCollectionsIE(TwitchPlaylistBaseIE):
             playlist_title='%s - Collections' % channel_name)
 
 
-class TwitchStreamIE(TwitchBaseIE):
+class TwitchStreamIE(TwitchGraphQLBaseIE):
     IE_NAME = 'twitch:stream'
     _VALID_URL = r'''(?x)
                     https?://
@@ -774,28 +781,43 @@ class TwitchStreamIE(TwitchBaseIE):
                 else super(TwitchStreamIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        channel_name = self._match_id(url)
+        channel_name = self._match_id(url).lower()
 
-        access_token = self._download_access_token(channel_name)
+        gql = self._download_gql(
+            channel_name, [{
+                'operationName': 'StreamMetadata',
+                'variables': {'channelLogin': channel_name},
+            }, {
+                'operationName': 'ComscoreStreamingQuery',
+                'variables': {
+                    'channel': channel_name,
+                    'clipSlug': '',
+                    'isClip': False,
+                    'isLive': True,
+                    'isVodOrCollection': False,
+                    'vodID': '',
+                },
+            }, {
+                'operationName': 'VideoPreviewOverlay',
+                'variables': {'login': channel_name},
+            }],
+            'Downloading stream GraphQL')
 
-        token = access_token['token']
-        channel_id = self._extract_channel_id(token, channel_name)
+        user = gql[0]['data']['user']
 
-        stream = self._call_api(
-            'kraken/streams/%s?stream_type=all' % channel_id,
-            channel_id, 'Downloading stream JSON').get('stream')
+        if not user:
+            raise ExtractorError(
+                '%s does not exist' % channel_name, expected=True)
+
+        stream = user['stream']
 
         if not stream:
-            raise ExtractorError('%s is offline' % channel_id, expected=True)
+            raise ExtractorError('%s is offline' % channel_name, expected=True)
 
-        # Channel name may be typed if different case than the original channel name
-        # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
-        # an invalid m3u8 URL. Working around by use of original channel name from stream
-        # JSON and fallback to lowercase if it's not available.
-        channel_name = try_get(
-            stream, lambda x: x['channel']['name'],
-            compat_str) or channel_name.lower()
+        access_token = self._download_access_token(channel_name)
+        token = access_token['token']
 
+        stream_id = stream.get('id') or channel_name
         query = {
             'allow_source': 'true',
             'allow_audio_only': 'true',
@@ -808,41 +830,39 @@ class TwitchStreamIE(TwitchBaseIE):
             'token': token.encode('utf-8'),
         }
         formats = self._extract_m3u8_formats(
-            '%s/api/channel/hls/%s.m3u8?%s'
-            % (self._USHER_BASE, channel_name, compat_urllib_parse_urlencode(query)),
-            channel_id, 'mp4')
+            '%s/api/channel/hls/%s.m3u8' % (self._USHER_BASE, channel_name),
+            stream_id, 'mp4', query=query)
         self._prefer_source(formats)
 
         view_count = stream.get('viewers')
-        timestamp = parse_iso8601(stream.get('created_at'))
+        timestamp = unified_timestamp(stream.get('createdAt'))
 
-        channel = stream['channel']
-        title = self._live_title(channel.get('display_name') or channel.get('name'))
-        description = channel.get('status')
+        sq_user = try_get(gql, lambda x: x[1]['data']['user'], dict) or {}
+        uploader = sq_user.get('displayName')
+        description = try_get(
+            sq_user, lambda x: x['broadcastSettings']['title'], compat_str)
 
-        thumbnails = []
-        for thumbnail_key, thumbnail_url in stream['preview'].items():
-            m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
-            if not m:
-                continue
-            thumbnails.append({
-                'url': thumbnail_url,
-                'width': int(m.group('width')),
-                'height': int(m.group('height')),
-            })
+        thumbnail = url_or_none(try_get(
+            gql, lambda x: x[2]['data']['user']['stream']['previewImageURL'],
+            compat_str))
+
+        title = uploader or channel_name
+        stream_type = stream.get('type')
+        if stream_type in ['rerun', 'live']:
+            title += ' (%s)' % stream_type
 
         return {
-            'id': str_or_none(stream.get('_id')) or channel_id,
+            'id': stream_id,
             'display_id': channel_name,
-            'title': title,
+            'title': self._live_title(title),
             'description': description,
-            'thumbnails': thumbnails,
-            'uploader': channel.get('display_name'),
-            'uploader_id': channel.get('name'),
+            'thumbnail': thumbnail,
+            'uploader': uploader,
+            'uploader_id': channel_name,
             'timestamp': timestamp,
             'view_count': view_count,
             'formats': formats,
-            'is_live': True,
+            'is_live': stream_type == 'live',
         }
 
 

From 1ca5f821c8708720d5897b48a8c8d9e3d8822f93 Mon Sep 17 00:00:00 2001
From: nixxo <c.nixxo@gmail.com>
Date: Sun, 20 Sep 2020 06:39:42 +0200
Subject: [PATCH 134/340] [redtube] Extend _VALID_URL (#26506)

---
 youtube_dl/extractor/redtube.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py
index 2d2f6a98c..a1ca791ca 100644
--- a/youtube_dl/extractor/redtube.py
+++ b/youtube_dl/extractor/redtube.py
@@ -15,7 +15,7 @@ from ..utils import (
 
 
 class RedTubeIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:(?:www\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
     _TESTS = [{
         'url': 'http://www.redtube.com/66418',
         'md5': 'fc08071233725f26b8f014dba9590005',
@@ -31,6 +31,9 @@ class RedTubeIE(InfoExtractor):
     }, {
         'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
         'only_matching': True,
+    }, {
+        'url': 'http://it.redtube.com/66418',
+        'only_matching': True,
     }]
 
     @staticmethod

From bbc3b5b4bb760ce1c04cbe0374a82c53acd5251c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 20 Sep 2020 12:23:38 +0700
Subject: [PATCH 135/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 4143ec2fb..7610cab17 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+version <unreleased>
+
+Core
+* [extractor/common] Relax interaction count extraction in _json_ld
++ [extractor/common] Extract author as uploader for VideoObject in _json_ld
+* [downloader/hls] Fix incorrect end byte in Range HTTP header for
+  media segments with EXT-X-BYTERANGE (#14748, #24512)
+* [extractor/common] Handle ssl.CertificateError in _request_webpage (#26601)
+* [downloader/http] Improve timeout detection when reading block of data
+  (#10935)
+* [downloader/http] Retry download when urlopen times out (#10935, #26603)
+
+Extractors
+* [redtube] Extend URL regular expression (#26506)
+* [twitch] Refactor
+* [twitch:stream] Switch to GraphQL and fix reruns (#26535)
++ [telequebec] Add support for brightcove videos (#25833)
+* [pornhub] Extract metadata from JSON-LD (#26614)
+* [pornhub] Fix view count extraction (#26621, #26614)
+
+
 version 2020.09.14
 
 Core

From b55715934bb7f9474f69b99e4d51cc83dee7cbef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 20 Sep 2020 12:30:45 +0700
Subject: [PATCH 136/340] release 2020.09.20

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 352263789..ce0319fe2 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.14
+ [debug] youtube-dl version 2020.09.20
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index fa6509be3..a4002603c 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 70b0f2f19..3f8b6ce2e 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index ec17e4a33..d880c225a 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.14
+ [debug] youtube-dl version 2020.09.20
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 6ac963206..dd5fb5144 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.14. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.14**
+- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 7610cab17..9b52b7bd2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.09.20
 
 Core
 * [extractor/common] Relax interaction count extraction in _json_ld
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 5625b8324..709e5c74c 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.09.14'
+__version__ = '2020.09.20'

From 0837992a226690d514eb01b7460bed4a33fddb30 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 22 Sep 2020 06:44:14 +0700
Subject: [PATCH 137/340] [downloader/http] Fix access to not yet opened stream
 in retry

---
 youtube_dl/downloader/http.py | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 6ef26548d..04da14d91 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -223,9 +223,10 @@ class HttpFD(FileDownloader):
 
             def retry(e):
                 to_stdout = ctx.tmpfilename == '-'
-                if not to_stdout:
-                    ctx.stream.close()
-                ctx.stream = None
+                if ctx.stream is not None:
+                    if not to_stdout:
+                        ctx.stream.close()
+                    ctx.stream = None
                 ctx.resume_len = byte_counter if to_stdout else os.path.getsize(encodeFilename(ctx.tmpfilename))
                 raise RetryDownload(e)
 

From c5764b3f89b66e0148a186490f522ae7c259a55e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 22 Sep 2020 07:01:59 +0700
Subject: [PATCH 138/340] [downloader/http] Properly handle missing message in
 SSLError (closes #26646)

---
 youtube_dl/downloader/http.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 04da14d91..96379caf1 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -241,7 +241,7 @@ class HttpFD(FileDownloader):
                 except socket.error as e:
                     # SSLError on python 2 (inherits socket.error) may have
                     # no errno set but this error message
-                    if e.errno in (errno.ECONNRESET, errno.ETIMEDOUT) or getattr(e, 'message') == 'The read operation timed out':
+                    if e.errno in (errno.ECONNRESET, errno.ETIMEDOUT) or getattr(e, 'message', None) == 'The read operation timed out':
                         retry(e)
                     raise
 

From adae9e844b0a40bf686a142a20c7ca30e4e1145b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 24 Sep 2020 06:36:07 +0700
Subject: [PATCH 139/340] [README.md] Fix autonumber sequence description (refs
 #26686)

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 45326c69e..cd8856828 100644
--- a/README.md
+++ b/README.md
@@ -545,7 +545,7 @@ The basic usage is not to set any template arguments when downloading a single f
  - `extractor` (string): Name of the extractor
  - `extractor_key` (string): Key name of the extractor
  - `epoch` (numeric): Unix epoch when creating the file
- - `autonumber` (numeric): Five-digit number that will be increased with each download, starting at zero
+ - `autonumber` (numeric): Number that will be increased with each download, starting at `--autonumber-start`
  - `playlist` (string): Name or id of the playlist that contains the video
  - `playlist_index` (numeric): Index of the video in the playlist padded with leading zeros according to the total length of the playlist
  - `playlist_id` (string): Playlist identifier

From 0c92f1e96b004fc7a04eac0759f115a535c8e03a Mon Sep 17 00:00:00 2001
From: Surkal <Surkal@users.noreply.github.com>
Date: Thu, 24 Sep 2020 01:46:58 +0200
Subject: [PATCH 140/340] [iprima] Improve video id extraction (#26507) (closes
 #26494)

---
 youtube_dl/extractor/iprima.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py
index 53a550c11..648ae6741 100644
--- a/youtube_dl/extractor/iprima.py
+++ b/youtube_dl/extractor/iprima.py
@@ -86,7 +86,8 @@ class IPrimaIE(InfoExtractor):
             (r'<iframe[^>]+\bsrc=["\'](?:https?:)?//(?:api\.play-backend\.iprima\.cz/prehravac/embedded|prima\.iprima\.cz/[^/]+/[^/]+)\?.*?\bid=(p\d+)',
              r'data-product="([^"]+)">',
              r'id=["\']player-(p\d+)"',
-             r'playerId\s*:\s*["\']player-(p\d+)'),
+             r'playerId\s*:\s*["\']player-(p\d+)',
+             r'\bvideos\s*=\s*["\'](p\d+)'),
             webpage, 'real id')
 
         playerpage = self._download_webpage(

From d65d89183f645a0e95910c3861491a75c26000eb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 24 Sep 2020 07:36:38 +0700
Subject: [PATCH 141/340] [expressen] Add support for di.se (closes #26670)

---
 youtube_dl/extractor/expressen.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/expressen.py b/youtube_dl/extractor/expressen.py
index f79365038..dc8b855d2 100644
--- a/youtube_dl/extractor/expressen.py
+++ b/youtube_dl/extractor/expressen.py
@@ -15,7 +15,7 @@ from ..utils import (
 class ExpressenIE(InfoExtractor):
     _VALID_URL = r'''(?x)
                     https?://
-                        (?:www\.)?expressen\.se/
+                        (?:www\.)?(?:expressen|di)\.se/
                         (?:(?:tvspelare/video|videoplayer/embed)/)?
                         tv/(?:[^/]+/)*
                         (?P<id>[^/?#&]+)
@@ -42,13 +42,16 @@ class ExpressenIE(InfoExtractor):
     }, {
         'url': 'https://www.expressen.se/videoplayer/embed/tv/ditv/ekonomistudion/experterna-har-ar-fragorna-som-avgor-valet/?embed=true&external=true&autoplay=true&startVolume=0&partnerId=di',
         'only_matching': True,
+    }, {
+        'url': 'https://www.di.se/videoplayer/embed/tv/ditv/borsmorgon/implantica-rusar-70--under-borspremiaren-hor-styrelsemedlemmen/?embed=true&external=true&autoplay=true&startVolume=0&partnerId=di',
+        'only_matching': True,
     }]
 
     @staticmethod
     def _extract_urls(webpage):
         return [
             mobj.group('url') for mobj in re.finditer(
-                r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?expressen\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1',
+                r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?(?:expressen|di)\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1',
                 webpage)]
 
     def _real_extract(self, url):

From 1050e0d09f02209cc5c6f45da46c2b79f73d96f4 Mon Sep 17 00:00:00 2001
From: Felix Yan <felixonmars@archlinux.org>
Date: Sun, 18 Oct 2020 00:02:17 +0800
Subject: [PATCH 142/340] [iqiyi] Fix typo (#26884)

---
 youtube_dl/extractor/iqiyi.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index cd11aa70f..5df674daf 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -150,7 +150,7 @@ class IqiyiSDKInterpreter(object):
             elif function in other_functions:
                 other_functions[function]()
             else:
-                raise ExtractorError('Unknown funcion %s' % function)
+                raise ExtractorError('Unknown function %s' % function)
 
         return sdk.target
 

From 605535776a8d5beba78b4d1b057d5206ddd969eb Mon Sep 17 00:00:00 2001
From: Sergio Livi <me@serl.it>
Date: Sat, 17 Oct 2020 18:14:46 +0200
Subject: [PATCH 143/340] [ustream] Add support for video.ibm.com (#26894)

---
 youtube_dl/extractor/ustream.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
index 582090d0d..9e860aeb7 100644
--- a/youtube_dl/extractor/ustream.py
+++ b/youtube_dl/extractor/ustream.py
@@ -19,7 +19,7 @@ from ..utils import (
 
 
 class UstreamIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
     IE_NAME = 'ustream'
     _TESTS = [{
         'url': 'http://www.ustream.tv/recorded/20274954',
@@ -67,12 +67,15 @@ class UstreamIE(InfoExtractor):
         'params': {
             'skip_download': True,  # m3u8 download
         },
+    }, {
+        'url': 'https://video.ibm.com/embed/recorded/128240221?&autoplay=true&controls=true&volume=100',
+        'only_matching': True,
     }]
 
     @staticmethod
     def _extract_url(webpage):
         mobj = re.search(
-            r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
+            r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:www\.)?(?:ustream\.tv|video\.ibm\.com)/embed/.+?)\1', webpage)
         if mobj is not None:
             return mobj.group('url')
 

From 4eda10499e8db831167062b0e0dbc7d10d34c1f9 Mon Sep 17 00:00:00 2001
From: Kevin O'Connor <kevin.oconnor7@gmail.com>
Date: Sat, 17 Oct 2020 13:10:41 -0400
Subject: [PATCH 144/340] [utils] Don't attempt to coerce JS strings to numbers
 in js_to_json (#26851)

The current logic in `js_to_json` tries to rewrite octal/hex numbers to
decimal. However, when the logic actually happens the `"` or `'` have
already been trimmed off. This causes what were originally strings, that
happen to look like octal/hex numbers, to get rewritten to decimal and
returned as a number rather than a string.

In practive something like:

```js
{
  "0x40": "foo",
  "040": "bar",
}
```

would get rewritten as:

```json
{
  64: "foo",
  32: "bar
}
```

This is problematic since this isn't valid JSON as you cannot have
non-string keys.
---
 test/test_utils.py  |  6 ++++++
 youtube_dl/utils.py | 12 ++++++------
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/test/test_utils.py b/test/test_utils.py
index 962fd8d75..c2d1e4fb1 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -994,6 +994,12 @@ class TestUtil(unittest.TestCase):
         on = js_to_json('{42:4.2e1}')
         self.assertEqual(json.loads(on), {'42': 42.0})
 
+        on = js_to_json('{ "0x40": "0x40" }')
+        self.assertEqual(json.loads(on), {'0x40': '0x40'})
+
+        on = js_to_json('{ "040": "040" }')
+        self.assertEqual(json.loads(on), {'040': '040'})
+
     def test_js_to_json_malformed(self):
         self.assertEqual(js_to_json('42a1'), '42"a1"')
         self.assertEqual(js_to_json('42a-1'), '42"a"-1')
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 01d9c0362..737e2810e 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -4088,12 +4088,12 @@ def js_to_json(code):
                 '\\\n': '',
                 '\\x': '\\u00',
             }.get(m.group(0), m.group(0)), v[1:-1])
-
-        for regex, base in INTEGER_TABLE:
-            im = re.match(regex, v)
-            if im:
-                i = int(im.group(1), base)
-                return '"%d":' % i if v.endswith(':') else '%d' % i
+        else:
+            for regex, base in INTEGER_TABLE:
+                im = re.match(regex, v)
+                if im:
+                    i = int(im.group(1), base)
+                    return '"%d":' % i if v.endswith(':') else '%d' % i
 
         return '"%s"' % v
 

From 7d740e7dc7149cfd93dde1fa47e9f314e72582c2 Mon Sep 17 00:00:00 2001
From: Hannu Hartikainen <hannu.hartikainen@gmail.com>
Date: Mon, 19 Oct 2020 17:56:23 +0000
Subject: [PATCH 145/340] [23video] Relax _VALID_URL (#26870)

---
 youtube_dl/extractor/twentythreevideo.py | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/twentythreevideo.py b/youtube_dl/extractor/twentythreevideo.py
index aa0c6e90f..dc5609192 100644
--- a/youtube_dl/extractor/twentythreevideo.py
+++ b/youtube_dl/extractor/twentythreevideo.py
@@ -8,8 +8,8 @@ from ..utils import int_or_none
 
 class TwentyThreeVideoIE(InfoExtractor):
     IE_NAME = '23video'
-    _VALID_URL = r'https?://video\.(?P<domain>twentythree\.net|23video\.com|filmweb\.no)/v\.ihtml/player\.html\?(?P<query>.*?\bphoto(?:_|%5f)id=(?P<id>\d+).*)'
-    _TEST = {
+    _VALID_URL = r'https?://(?P<domain>[^.]+\.(?:twentythree\.net|23video\.com|filmweb\.no))/v\.ihtml/player\.html\?(?P<query>.*?\bphoto(?:_|%5f)id=(?P<id>\d+).*)'
+    _TESTS = [{
         'url': 'https://video.twentythree.net/v.ihtml/player.html?showDescriptions=0&source=site&photo%5fid=20448876&autoPlay=1',
         'md5': '75fcf216303eb1dae9920d651f85ced4',
         'info_dict': {
@@ -21,11 +21,14 @@ class TwentyThreeVideoIE(InfoExtractor):
             'uploader_id': '12258964',
             'uploader': 'Rasmus Bysted',
         }
-    }
+    }, {
+        'url': 'https://bonnier-publications-danmark.23video.com/v.ihtml/player.html?token=f0dc46476e06e13afd5a1f84a29e31e8&source=embed&photo%5fid=36137620',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         domain, query, photo_id = re.match(self._VALID_URL, url).groups()
-        base_url = 'https://video.%s' % domain
+        base_url = 'https://%s' % domain
         photo_data = self._download_json(
             base_url + '/api/photo/list?' + query, photo_id, query={
                 'format': 'json',

From 48c5663c5f7dd9ecc4720f7c1522627665197939 Mon Sep 17 00:00:00 2001
From: Toan Nguyen <davidnguyen1501@gmail.com>
Date: Thu, 22 Oct 2020 19:15:05 +0700
Subject: [PATCH 146/340] [afreecatv] Fix typo (#26970)

---
 youtube_dl/extractor/afreecatv.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/afreecatv.py b/youtube_dl/extractor/afreecatv.py
index 6275e5209..b56abb1e6 100644
--- a/youtube_dl/extractor/afreecatv.py
+++ b/youtube_dl/extractor/afreecatv.py
@@ -275,7 +275,7 @@ class AfreecaTVIE(InfoExtractor):
         video_element = video_xml.findall(compat_xpath('./track/video'))[-1]
         if video_element is None or video_element.text is None:
             raise ExtractorError(
-                'Video %s video does not exist' % video_id, expected=True)
+                'Video %s does not exist' % video_id, expected=True)
 
         video_url = video_element.text.strip()
 

From 416da574ec0df3388f652e44f7fe71b1e3a4701f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 23 Oct 2020 21:31:37 +0700
Subject: [PATCH 147/340] [ytsearch] Fix extraction (closes #26920)

---
 youtube_dl/extractor/youtube.py | 116 +++++++++++++++++++++-----------
 1 file changed, 78 insertions(+), 38 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 02f3ab61a..bd1515380 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3181,54 +3181,94 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
     _MAX_RESULTS = float('inf')
     IE_NAME = 'youtube:search'
     _SEARCH_KEY = 'ytsearch'
-    _EXTRA_QUERY_ARGS = {}
+    _SEARCH_PARAMS = None
     _TESTS = []
 
+    def _entries(self, query, n):
+        data = {
+            'context': {
+                'client': {
+                    'clientName': 'WEB',
+                    'clientVersion': '2.20201021.03.00',
+                }
+            },
+            'query': query,
+        }
+        if self._SEARCH_PARAMS:
+            data['params'] = self._SEARCH_PARAMS
+        total = 0
+        for page_num in itertools.count(1):
+            search = self._download_json(
+                'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
+                video_id='query "%s"' % query,
+                note='Downloading page %s' % page_num,
+                errnote='Unable to download API page', fatal=False,
+                data=json.dumps(data).encode('utf8'),
+                headers={'content-type': 'application/json'})
+            if not search:
+                break
+            slr_contents = try_get(
+                search,
+                (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
+                 lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
+                list)
+            if not slr_contents:
+                break
+            isr_contents = try_get(
+                slr_contents,
+                lambda x: x[0]['itemSectionRenderer']['contents'],
+                list)
+            if not isr_contents:
+                break
+            for content in isr_contents:
+                if not isinstance(content, dict):
+                    continue
+                video = content.get('videoRenderer')
+                if not isinstance(video, dict):
+                    continue
+                video_id = video.get('videoId')
+                if not video_id:
+                    continue
+                title = try_get(video, lambda x: x['title']['runs'][0]['text'], compat_str)
+                description = try_get(video, lambda x: x['descriptionSnippet']['runs'][0]['text'], compat_str)
+                duration = parse_duration(try_get(video, lambda x: x['lengthText']['simpleText'], compat_str))
+                view_count_text = try_get(video, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
+                view_count = int_or_none(self._search_regex(
+                    r'^(\d+)', re.sub(r'\s', '', view_count_text),
+                    'view count', default=None))
+                uploader = try_get(video, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
+                total += 1
+                yield {
+                    '_type': 'url_transparent',
+                    'ie_key': YoutubeIE.ie_key(),
+                    'id': video_id,
+                    'url': video_id,
+                    'title': title,
+                    'description': description,
+                    'duration': duration,
+                    'view_count': view_count,
+                    'uploader': uploader,
+                }
+                if total == n:
+                    return
+            token = try_get(
+                slr_contents,
+                lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
+                compat_str)
+            if not token:
+                break
+            data['continuation'] = token
+
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
-
-        videos = []
-        limit = n
-
-        url_query = {
-            'search_query': query.encode('utf-8'),
-        }
-        url_query.update(self._EXTRA_QUERY_ARGS)
-        result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
-
-        for pagenum in itertools.count(1):
-            data = self._download_json(
-                result_url, video_id='query "%s"' % query,
-                note='Downloading page %s' % pagenum,
-                errnote='Unable to download API page',
-                query={'spf': 'navigate'})
-            html_content = data[1]['body']['content']
-
-            if 'class="search-message' in html_content:
-                raise ExtractorError(
-                    '[youtube] No video results', expected=True)
-
-            new_videos = list(self._process_page(html_content))
-            videos += new_videos
-            if not new_videos or len(videos) > limit:
-                break
-            next_link = self._html_search_regex(
-                r'href="(/results\?[^"]*\bsp=[^"]+)"[^>]*>\s*<span[^>]+class="[^"]*\byt-uix-button-content\b[^"]*"[^>]*>Next',
-                html_content, 'next link', default=None)
-            if next_link is None:
-                break
-            result_url = compat_urlparse.urljoin('https://www.youtube.com/', next_link)
-
-        if len(videos) > n:
-            videos = videos[:n]
-        return self.playlist_result(videos, query)
+        return self.playlist_result(self._entries(query, n), query)
 
 
 class YoutubeSearchDateIE(YoutubeSearchIE):
     IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
     _SEARCH_KEY = 'ytsearchdate'
     IE_DESC = 'YouTube.com searches, newest videos first'
-    _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
+    _SEARCH_PARAMS = 'CAI%3D'
 
 
 class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):

From 6d4733ce7be2e5bb31d1a21f68c6123074d584a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 1 Nov 2020 06:52:00 +0700
Subject: [PATCH 148/340] [youtube] Fix JS player URL extraction

---
 youtube_dl/extractor/youtube.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index bd1515380..c31731ac0 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2086,7 +2086,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
                 if cipher:
                     if 's' in url_data or self._downloader.params.get('youtube_include_dash_manifest', True):
-                        ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
+                        ASSETS_RE = (
+                            r'<script[^>]+\bsrc=("[^"]+")[^>]+\bname=["\']player_ias/base',
+                            r'"jsUrl"\s*:\s*("[^"]+")',
+                            r'"assets":.+?"js":\s*("[^"]+")')
                         jsplayer_url_json = self._search_regex(
                             ASSETS_RE,
                             embed_webpage if age_gate else video_webpage,

From b9bceba37ca277261ba9cd37931036b3a52e3e07 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 1 Nov 2020 07:34:20 +0700
Subject: [PATCH 149/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 9b52b7bd2..7ef22edfc 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+version <unreleased>
+
+Core
+* [utils] Don't attempt to coerce JS strings to numbers in js_to_json (#26851)
+* [downloader/http] Properly handle missing message in SSLError (#26646)
+* [downloader/http] Fix access to not yet opened stream in retry
+
+Extractors
+* [youtube] Fix JS player URL extraction
+* [ytsearch] Fix extraction (#26920)
+* [afreecatv] Fix typo (#26970)
+* [23video] Relax URL regular expression (#26870)
++ [ustream] Add support for video.ibm.com (#26894)
+* [iqiyi] Fix typo (#26884)
++ [expressen] Add support for di.se (#26670)
+* [iprima] Improve video id extraction (#26507, #26494)
+
+
 version 2020.09.20
 
 Core

From 34299510bbe077c08e57929d835d212487037e2a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 1 Nov 2020 08:52:27 +0700
Subject: [PATCH 150/340] release 2020.11.01

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index ce0319fe2..8bf96cb24 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.20
+ [debug] youtube-dl version 2020.11.01
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index a4002603c..61c005e91 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 3f8b6ce2e..c7eabe344 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index d880c225a..446b82e64 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.09.20
+ [debug] youtube-dl version 2020.11.01
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index dd5fb5144..69657448a 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.09.20. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.09.20**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 7ef22edfc..cfe4ab79d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.01
 
 Core
 * [utils] Don't attempt to coerce JS strings to numbers in js_to_json (#26851)
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 709e5c74c..7d89dd3e1 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.09.20'
+__version__ = '2020.11.01'

From 051071203ccedcaf6d0de0f0d83af593c78673e6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 1 Nov 2020 08:58:40 +0700
Subject: [PATCH 151/340] release 2020.11.01.1

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 youtube_dl/version.py                            | 2 +-
 6 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 8bf96cb24..846c7e2ab 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.01
+ [debug] youtube-dl version 2020.11.01.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 61c005e91..26823fc9f 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index c7eabe344..3d3ba3182 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 446b82e64..288bae2d4 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.01
+ [debug] youtube-dl version 2020.11.01.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 69657448a..77a195827 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 7d89dd3e1..6d7f14717 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.01'
+__version__ = '2020.11.01.1'

From 2de2ca6659a18b6f5ab76565e00491153ae47276 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 12 Nov 2020 06:16:37 +0700
Subject: [PATCH 152/340] [youtube] Rework extractors

WIP
---
 test/test_all_urls.py              |   22 +-
 youtube_dl/extractor/extractors.py |    9 +-
 youtube_dl/extractor/youtube.py    | 1105 ++++++++++++++--------------
 3 files changed, 566 insertions(+), 570 deletions(-)

diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index 81056a999..348744028 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -34,13 +34,13 @@ class TestAllURLsMatching(unittest.TestCase):
         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
         assertPlaylist('PL63F0C78739B09958')
-        assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
+        # assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
+        # assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
         assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
         # Top tracks
-        assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
+        # assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
 
     def test_youtube_matching(self):
         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
@@ -51,26 +51,22 @@ class TestAllURLsMatching(unittest.TestCase):
         self.assertMatch('http://www.cleanvideosearch.com/media/action/yt/watch?videoId=8v_4O44sfjM', ['youtube'])
 
     def test_youtube_channel_matching(self):
-        assertChannel = lambda url: self.assertMatch(url, ['youtube:channel'])
+        assertChannel = lambda url: self.assertMatch(url, ['youtube:tab'])
         assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM')
         assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')
         assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
 
-    def test_youtube_user_matching(self):
-        self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:user'])
+    # def test_youtube_user_matching(self):
+    #     self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab'])
 
     def test_youtube_feeds(self):
         self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
         self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
         self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
-        self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites'])
 
-    def test_youtube_show_matching(self):
-        self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
-
-    def test_youtube_search_matching(self):
-        self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
-        self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
+    # def test_youtube_search_matching(self):
+    #     self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
+    #     self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
 
     def test_youtube_extract(self):
         assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index ae7079a6a..9d7fecfe8 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1474,21 +1474,18 @@ from .yourporn import YourPornIE
 from .yourupload import YourUploadIE
 from .youtube import (
     YoutubeIE,
-    YoutubeChannelIE,
-    YoutubeFavouritesIE,
     YoutubeHistoryIE,
     YoutubeLiveIE,
+    YoutubeTabIE,
     YoutubePlaylistIE,
-    YoutubePlaylistsIE,
     YoutubeRecommendedIE,
     YoutubeSearchDateIE,
     YoutubeSearchIE,
-    YoutubeSearchURLIE,
-    YoutubeShowIE,
+    #YoutubeSearchURLIE,
     YoutubeSubscriptionsIE,
     YoutubeTruncatedIDIE,
     YoutubeTruncatedURLIE,
-    YoutubeUserIE,
+    YoutubeYtUserIE,
     YoutubeWatchLaterIE,
 )
 from .zapiks import ZapiksIE
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index c31731ac0..696dec2c1 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -16,8 +16,6 @@ from ..jsinterp import JSInterpreter
 from ..swfinterp import SWFInterpreter
 from ..compat import (
     compat_chr,
-    compat_HTTPError,
-    compat_kwargs,
     compat_parse_qs,
     compat_urllib_parse_unquote,
     compat_urllib_parse_unquote_plus,
@@ -30,10 +28,8 @@ from ..utils import (
     bool_or_none,
     clean_html,
     error_to_compat_str,
-    extract_attributes,
     ExtractorError,
     float_or_none,
-    get_element_by_attribute,
     get_element_by_id,
     int_or_none,
     mimetype2ext,
@@ -52,6 +48,7 @@ from ..utils import (
     uppercase_escape,
     url_or_none,
     urlencode_postdata,
+    urljoin,
 )
 
 
@@ -269,13 +266,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
         return True
 
-    def _download_webpage_handle(self, *args, **kwargs):
-        query = kwargs.get('query', {}).copy()
-        query['disable_polymer'] = 'true'
-        kwargs['query'] = query
-        return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
-            *args, **compat_kwargs(kwargs))
-
     def _real_initialize(self):
         if self._downloader is None:
             return
@@ -283,93 +273,34 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
         if not self._login():
             return
 
+    _DEFAULT_API_DATA = {
+        'context': {
+            'client': {
+                'clientName': 'WEB',
+                'clientVersion': '2.20201021.03.00',
+            }
+        },
+    }
 
-class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
-    # Extract entries from page with "Load more" button
-    def _entries(self, page, playlist_id):
-        more_widget_html = content_html = page
-        for page_num in itertools.count(1):
-            for entry in self._process_page(content_html):
-                yield entry
+    def _call_api(self, ep, query, video_id):
+        data = self._DEFAULT_API_DATA.copy()
+        data.update(query)
 
-            mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
-            if not mobj:
-                break
+        response = self._download_json(
+            'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
+            note='Downloading API JSON', errnote='Unable to download API page',
+            data=json.dumps(data).encode('utf8'),
+            headers={'content-type': 'application/json'},
+            query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
 
-            count = 0
-            retries = 3
-            while count <= retries:
-                try:
-                    # Downloading page may result in intermittent 5xx HTTP error
-                    # that is usually worked around with a retry
-                    more = self._download_json(
-                        'https://www.youtube.com/%s' % mobj.group('more'), playlist_id,
-                        'Downloading page #%s%s'
-                        % (page_num, ' (retry #%d)' % count if count else ''),
-                        transform_source=uppercase_escape,
-                        headers=self._YOUTUBE_CLIENT_HEADERS)
-                    break
-                except ExtractorError as e:
-                    if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
-                        count += 1
-                        if count <= retries:
-                            continue
-                    raise
+        return response
 
-            content_html = more['content_html']
-            if not content_html.strip():
-                # Some webpages show a "Load more" button but they don't
-                # have more videos
-                break
-            more_widget_html = more['load_more_widget_html']
-
-
-class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
-    def _process_page(self, content):
-        for video_id, video_title in self.extract_videos_from_page(content):
-            yield self.url_result(video_id, 'Youtube', video_id, video_title)
-
-    def extract_videos_from_page_impl(self, video_re, page, ids_in_page, titles_in_page):
-        for mobj in re.finditer(video_re, page):
-            # The link with index 0 is not the first video of the playlist (not sure if still actual)
-            if 'index' in mobj.groupdict() and mobj.group('id') == '0':
-                continue
-            video_id = mobj.group('id')
-            video_title = unescapeHTML(
-                mobj.group('title')) if 'title' in mobj.groupdict() else None
-            if video_title:
-                video_title = video_title.strip()
-            if video_title == '► Play all':
-                video_title = None
-            try:
-                idx = ids_in_page.index(video_id)
-                if video_title and not titles_in_page[idx]:
-                    titles_in_page[idx] = video_title
-            except ValueError:
-                ids_in_page.append(video_id)
-                titles_in_page.append(video_title)
-
-    def extract_videos_from_page(self, page):
-        ids_in_page = []
-        titles_in_page = []
-        self.extract_videos_from_page_impl(
-            self._VIDEO_RE, page, ids_in_page, titles_in_page)
-        return zip(ids_in_page, titles_in_page)
-
-
-class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
-    def _process_page(self, content):
-        for playlist_id in orderedSet(re.findall(
-                r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
-                content)):
-            yield self.url_result(
-                'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
-
-    def _real_extract(self, url):
-        playlist_id = self._match_id(url)
-        webpage = self._download_webpage(url, playlist_id)
-        title = self._og_search_title(webpage, fatal=False)
-        return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
+    def _extract_yt_initial_data(self, video_id, webpage):
+        return self._parse_json(
+            self._search_regex(
+                r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;',
+                webpage, 'yt initial data'),
+            video_id)
 
 
 class YoutubeIE(YoutubeBaseInfoExtractor):
@@ -430,7 +361,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                          |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
                          )
                      )?                                                       # all until now is optional -> you can pass the naked ID
-                     ([0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID
+                     (?P<id>[0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID
                      (?!.*?\blist=
                         (?:
                             %(playlist_id)s|                                  # combined list/video URLs are handled by the playlist IE
@@ -1891,6 +1822,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
                 add_dash_mpd_pr(player_response)
 
+        if not video_info and not player_response:
+            player_response = extract_player_response(
+                self._search_regex(
+                    r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
+                    'initial player response', default='{}'),
+                video_id)
+
         def extract_unavailable_message():
             messages = []
             for tag, kind in (('h1', 'message'), ('div', 'submessage')):
@@ -2564,7 +2502,502 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         }
 
 
-class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
+class YoutubeTabIE(YoutubeBaseInfoExtractor):
+    IE_DESC = 'YouTube.com tab'
+    _VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
+    IE_NAME = 'youtube:tab'
+
+    _TESTS = [{
+        # playlists, multipage
+        'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
+        'playlist_mincount': 94,
+        'info_dict': {
+            'id': 'UCqj7Cz7revf5maW9g5pgNcg',
+            'title': 'Игорь Клейнер - Playlists',
+        },
+    }, {
+        # playlists, multipage, different order
+        'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
+        'playlist_mincount': 94,
+        'info_dict': {
+            'id': 'UCqj7Cz7revf5maW9g5pgNcg',
+            'title': 'Игорь Клейнер - Playlists',
+        },
+    }, {
+        # playlists, singlepage
+        'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
+        'playlist_mincount': 4,
+        'info_dict': {
+            'id': 'ThirstForScience',
+            'title': 'ThirstForScience',
+        }
+    }, {
+        'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
+        'only_matching': True,
+    }, {
+        # basic, single video playlist
+        'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
+        'info_dict': {
+            'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
+            'uploader': 'Sergey M.',
+            'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
+            'title': 'youtube-dl public playlist',
+        },
+        'playlist_count': 1,
+    }, {
+        # empty playlist
+        'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
+        'info_dict': {
+            'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
+            'uploader': 'Sergey M.',
+            'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
+            'title': 'youtube-dl empty playlist',
+        },
+        'playlist_count': 0,
+    }, {
+        # Home tab
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Home',
+        },
+        'playlist_mincount': 2,
+    }, {
+        # Videos tab
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Videos',
+        },
+        'playlist_mincount': 975,
+    }, {
+        # Videos tab, sorted by popular
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Videos',
+        },
+        'playlist_mincount': 199,
+    }, {
+        # Playlists tab
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Playlists',
+        },
+        'playlist_mincount': 17,
+    }, {
+        # Community tab
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Community',
+        },
+        'playlist_mincount': 18,
+    }, {
+        # Channels tab
+        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
+        'info_dict': {
+            'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+            'title': 'lex will - Channels',
+        },
+        'playlist_mincount': 138,
+    }, {
+        'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
+        'only_matching': True,
+    }, {
+        'url': 'https://music.youtube.com/channel/UCT-K0qO8z6NzWrywqefBPBQ',
+        'only_matching': True,
+    }, {
+        'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
+        'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
+        'info_dict': {
+            'title': '29C3: Not my department',
+            'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
+            'uploader': 'Christiaan008',
+            'uploader_id': 'ChRiStIaAn008',
+        },
+        'playlist_count': 96,
+    }, {
+        'note': 'Large playlist',
+        'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
+        'info_dict': {
+            'title': 'Uploads from Cauchemar',
+            'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
+            'uploader': 'Cauchemar',
+            'uploader_id': 'Cauchemar89',
+        },
+        'playlist_mincount': 1123,
+    }, {
+        # even larger playlist, 8832 videos
+        'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
+        'only_matching': True,
+    }, {
+        'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
+        'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
+        'info_dict': {
+            'title': 'Uploads from Interstellar Movie',
+            'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
+            'uploader': 'Interstellar Movie',
+            'uploader_id': 'InterstellarMovie1',
+        },
+        'playlist_mincount': 21,
+    }, {
+        # https://github.com/ytdl-org/youtube-dl/issues/21844
+        'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
+        'info_dict': {
+            'title': 'Data Analysis with Dr Mike Pound',
+            'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
+            'uploader_id': 'Computerphile',
+            'uploader': 'Computerphile',
+        },
+        'playlist_mincount': 11,
+    }, {
+        'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
+        'only_matching': True,
+    }]
+
+    @classmethod
+    def suitable(cls, url):
+        return False if YoutubeLiveIE.suitable(url) else super(
+            YoutubeTabIE, cls).suitable(url)
+
+    def _extract_channel_id(self, webpage):
+        channel_id = self._html_search_meta(
+            'channelId', webpage, 'channel id', default=None)
+        if channel_id:
+            return channel_id
+        channel_url = self._html_search_meta(
+            ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
+             'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
+             'twitter:app:url:googleplay'), webpage, 'channel url')
+        return self._search_regex(
+            r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
+            channel_url, 'channel id')
+
+    @staticmethod
+    def _extract_grid_item_renderer(item):
+        for item_kind in ('Playlist', 'Video', 'Channel'):
+            renderer = item.get('grid%sRenderer' % item_kind)
+            if renderer:
+                return renderer
+
+    def _extract_video(self, renderer):
+        video_id = renderer.get('videoId')
+        title = try_get(
+            renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+        description = try_get(
+            renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
+            compat_str)
+        duration = parse_duration(try_get(
+            renderer, lambda x: x['lengthText']['simpleText'], compat_str))
+        view_count_text = try_get(
+            renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
+        view_count = int_or_none(self._search_regex(
+            r'^(\d+)', re.sub(r'\s', '', view_count_text),
+            'view count', default=None))
+        uploader = try_get(
+            renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
+        return {
+            '_type': 'url_transparent',
+            'ie_key': YoutubeIE.ie_key(),
+            'id': video_id,
+            'url': video_id,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'view_count': view_count,
+            'uploader': uploader,
+        }
+
+    def _grid_entries(self, grid_renderer):
+        for item in grid_renderer['items']:
+            if not isinstance(item, dict):
+                continue
+            renderer = self._extract_grid_item_renderer(item)
+            if not isinstance(renderer, dict):
+                continue
+            title = try_get(
+                renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+            # playlist
+            playlist_id = renderer.get('playlistId')
+            if playlist_id:
+                yield self.url_result(
+                    'https://www.youtube.com/playlist?list=%s' % playlist_id,
+                    ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
+                    video_title=title)
+            # video
+            video_id = renderer.get('videoId')
+            if video_id:
+                yield self._extract_video(renderer)
+            # channel
+            channel_id = renderer.get('channelId')
+            if channel_id:
+                title = try_get(
+                    renderer, lambda x: x['title']['simpleText'], compat_str)
+                yield self.url_result(
+                    'https://www.youtube.com/channel/%s' % channel_id,
+                    ie=YoutubeTabIE.ie_key(), video_title=title)
+
+    def _shelf_entries_trimmed(self, shelf_renderer):
+        renderer = try_get(
+            shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
+        if not renderer:
+            return
+        # TODO: add support for nested playlists so each shelf is processed
+        # as separate playlist
+        # TODO: this includes only first N items
+        for entry in self._grid_entries(renderer):
+            yield entry
+
+    def _shelf_entries(self, shelf_renderer):
+        ep = try_get(
+            shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
+            compat_str)
+        shelf_url = urljoin('https://www.youtube.com', ep)
+        if not shelf_url:
+            return
+        title = try_get(
+            shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+        yield self.url_result(shelf_url, video_title=title)
+
+    def _playlist_entries(self, video_list_renderer):
+        for content in video_list_renderer['contents']:
+            if not isinstance(content, dict):
+                continue
+            renderer = content.get('playlistVideoRenderer')
+            if not isinstance(renderer, dict):
+                continue
+            video_id = renderer.get('videoId')
+            if not video_id:
+                continue
+            yield self._extract_video(renderer)
+
+    def _video_entry(self, video_renderer):
+        video_id = video_renderer.get('videoId')
+        if video_id:
+            return self._extract_video(video_renderer)
+
+    def _post_thread_entries(self, post_thread_renderer):
+        post_renderer = try_get(
+            post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
+        if not post_renderer:
+            return
+        # video attachment
+        video_renderer = try_get(
+            post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
+        video_id = None
+        if video_renderer:
+            entry = self._video_entry(video_renderer)
+            if entry:
+                yield entry
+        # inline video links
+        runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
+        for run in runs:
+            if not isinstance(run, dict):
+                continue
+            ep_url = try_get(
+                run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
+            if not ep_url:
+                continue
+            if not YoutubeIE.suitable(ep_url):
+                continue
+            ep_video_id = YoutubeIE._match_id(ep_url)
+            if video_id == ep_video_id:
+                continue
+            yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
+
+    def _post_thread_continuation_entries(self, post_thread_continuation):
+        contents = post_thread_continuation.get('contents')
+        if not isinstance(contents, list):
+            return
+        for content in contents:
+            renderer = content.get('backstagePostThreadRenderer')
+            if not isinstance(renderer, dict):
+                continue
+            for entry in self._post_thread_entries(renderer):
+                yield entry
+
+    @staticmethod
+    def _extract_next_continuation_data(renderer):
+        next_continuation = try_get(
+            renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict)
+        if not next_continuation:
+            return
+        continuation = next_continuation.get('continuation')
+        if not continuation:
+            return
+        ctp = next_continuation.get('clickTrackingParams')
+        return {
+            'ctoken': continuation,
+            'continuation': continuation,
+            'itct': ctp,
+        }
+
+    @classmethod
+    def _extract_continuation(cls, renderer):
+        next_continuation = cls._extract_next_continuation_data(renderer)
+        if next_continuation:
+            return next_continuation
+        contents = renderer.get('contents')
+        if not isinstance(contents, list):
+            return
+        for content in contents:
+            if not isinstance(content, dict):
+                continue
+            continuation_ep = try_get(
+                content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
+                dict)
+            if not continuation_ep:
+                continue
+            continuation = try_get(
+                continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
+            if not continuation:
+                continue
+            ctp = continuation_ep.get('clickTrackingParams')
+            if not ctp:
+                continue
+            return {
+                'ctoken': continuation,
+                'continuation': continuation,
+                'itct': ctp,
+            }
+
+    def _entries(self, tab):
+        continuation = None
+        slr_contents = tab['sectionListRenderer']['contents']
+        for slr_content in slr_contents:
+            if not isinstance(slr_content, dict):
+                continue
+            is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
+            if not is_renderer:
+                continue
+            isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
+            for isr_content in isr_contents:
+                if not isinstance(isr_content, dict):
+                    continue
+                renderer = isr_content.get('playlistVideoListRenderer')
+                if renderer:
+                    for entry in self._playlist_entries(renderer):
+                        yield entry
+                    continuation = self._extract_continuation(renderer)
+                    continue
+                renderer = isr_content.get('gridRenderer')
+                if renderer:
+                    for entry in self._grid_entries(renderer):
+                        yield entry
+                    continuation = self._extract_continuation(renderer)
+                    continue
+                renderer = isr_content.get('shelfRenderer')
+                if renderer:
+                    for entry in self._shelf_entries(renderer):
+                        yield entry
+                    continue
+                renderer = isr_content.get('backstagePostThreadRenderer')
+                if renderer:
+                    for entry in self._post_thread_entries(renderer):
+                        yield entry
+                    continuation = self._extract_continuation(renderer)
+                    continue
+                renderer = isr_content.get('videoRenderer')
+                if renderer:
+                    entry = self._video_entry(renderer)
+                    if entry:
+                        yield entry
+
+            if not continuation:
+                continuation = self._extract_continuation(is_renderer)
+
+        for page_num in itertools.count(1):
+            if not continuation:
+                break
+            browse = self._download_json(
+                'https://www.youtube.com/browse_ajax', None,
+                'Downloading page %d' % page_num,
+                headers={
+                    'x-youtube-client-name': '1',
+                    'x-youtube-client-version': '2.20201030.01.00',
+                }, query=continuation, fatal=False)
+            if not browse:
+                break
+            response = try_get(browse, lambda x: x[1]['response'], dict)
+            if not response:
+                break
+
+            continuation_contents = try_get(
+                response, lambda x: x['continuationContents'], dict)
+            if continuation_contents:
+                continuation_renderer = continuation_contents.get('playlistVideoListContinuation')
+                if continuation_renderer:
+                    for entry in self._playlist_entries(continuation_renderer):
+                        yield entry
+                    continuation = self._extract_continuation(continuation_renderer)
+                    continue
+                continuation_renderer = continuation_contents.get('gridContinuation')
+                if continuation_renderer:
+                    for entry in self._grid_entries(continuation_renderer):
+                        yield entry
+                    continuation = self._extract_continuation(continuation_renderer)
+                    continue
+                continuation_renderer = continuation_contents.get('itemSectionContinuation')
+                if continuation_renderer:
+                    for entry in self._post_thread_continuation_entries(continuation_renderer):
+                        yield entry
+                    continuation = self._extract_continuation(continuation_renderer)
+                    continue
+
+            continuation_items = try_get(
+                response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
+            if continuation_items:
+                continuation_item = continuation_items[0]
+                if not isinstance(continuation_item, dict):
+                    continue
+                renderer = continuation_item.get('playlistVideoRenderer')
+                if renderer:
+                    video_list_renderer = {'contents': continuation_items}
+                    for entry in self._playlist_entries(video_list_renderer):
+                        yield entry
+                    continuation = self._extract_continuation(video_list_renderer)
+                    continue
+
+            break
+
+    @staticmethod
+    def _extract_selected_tab(tabs):
+        for tab in tabs:
+            if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
+                return tab['tabRenderer']
+        else:
+            raise ExtractorError('Unable to find selected tab')
+
+    def _real_extract(self, url):
+        channel_id = self._match_id(url)
+        url = compat_urlparse.urlunparse(
+            compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
+        webpage = self._download_webpage(url, channel_id)
+        data = self._extract_yt_initial_data(channel_id, webpage)
+        tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
+        selected_tab = self._extract_selected_tab(tabs)
+        channel_title = try_get(
+            data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
+            compat_str)
+        channel_external_id = try_get(
+            data, lambda x: x['metadata']['channelMetadataRenderer']['externalId'],
+            compat_str)
+        tab_title = selected_tab.get('title')
+        title = channel_title or channel_id
+        if tab_title:
+            title += ' - %s' % tab_title
+        return self.playlist_result(
+            self._entries(selected_tab['content']),
+            playlist_id=channel_external_id or channel_id,
+            playlist_title=title)
+
+
+class YoutubePlaylistIE(InfoExtractor):
     IE_DESC = 'YouTube.com playlists'
     _VALID_URL = r"""(?x)(?:
                         (?:https?://)?
@@ -2591,39 +3024,8 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
                      |
                         (%(playlist_id)s)
                      )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
-    _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
-    _VIDEO_RE_TPL = r'href="\s*/watch\?v=%s(?:&amp;(?:[^"]*?index=(?P<index>\d+))?(?:[^>]+>(?P<title>[^<]+))?)?'
-    _VIDEO_RE = _VIDEO_RE_TPL % r'(?P<id>[0-9A-Za-z_-]{11})'
     IE_NAME = 'youtube:playlist'
     _TESTS = [{
-        'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
-        'info_dict': {
-            'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
-            'uploader': 'Sergey M.',
-            'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
-            'title': 'youtube-dl public playlist',
-        },
-        'playlist_count': 1,
-    }, {
-        'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
-        'info_dict': {
-            'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
-            'uploader': 'Sergey M.',
-            'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
-            'title': 'youtube-dl empty playlist',
-        },
-        'playlist_count': 0,
-    }, {
-        'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
-        'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
-        'info_dict': {
-            'title': '29C3: Not my department',
-            'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
-            'uploader': 'Christiaan008',
-            'uploader_id': 'ChRiStIaAn008',
-        },
-        'playlist_count': 96,
-    }, {
         'note': 'issue #673',
         'url': 'PLBB231211A4F62143',
         'info_dict': {
@@ -2632,17 +3034,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
             'uploader': 'Wickydoo',
             'uploader_id': 'Wickydoo',
         },
-        'playlist_mincount': 26,
-    }, {
-        'note': 'Large playlist',
-        'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
-        'info_dict': {
-            'title': 'Uploads from Cauchemar',
-            'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
-            'uploader': 'Cauchemar',
-            'uploader_id': 'Cauchemar89',
-        },
-        'playlist_mincount': 799,
+        'playlist_mincount': 29,
     }, {
         'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
         'info_dict': {
@@ -2663,7 +3055,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
         }
     }, {
         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
-        'playlist_mincount': 485,
+        'playlist_mincount': 982,
         'info_dict': {
             'title': '2018 Chinese New Singles (11/6 updated)',
             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
@@ -2679,16 +3071,6 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
             'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
         },
         'skip': 'This playlist does not exist',
-    }, {
-        'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
-        'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
-        'info_dict': {
-            'title': 'Uploads from Interstellar Movie',
-            'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
-            'uploader': 'Interstellar Movie',
-            'uploader_id': 'InterstellarMovie1',
-        },
-        'playlist_mincount': 21,
     }, {
         # Playlist URL that does not actually serve a playlist
         'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
@@ -2733,16 +3115,6 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
             'noplaylist': True,
             'skip_download': True,
         },
-    }, {
-        # https://github.com/ytdl-org/youtube-dl/issues/21844
-        'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
-        'info_dict': {
-            'title': 'Data Analysis with Dr Mike Pound',
-            'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
-            'uploader_id': 'Computerphile',
-            'uploader': 'Computerphile',
-        },
-        'playlist_mincount': 11,
     }, {
         'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
         'only_matching': True,
@@ -2753,153 +3125,15 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
         # music album playlist
         'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
         'only_matching': True,
-    }, {
-        'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
-        'only_matching': True,
     }, {
         'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
         'only_matching': True,
     }]
 
-    def _real_initialize(self):
-        self._login()
-
-    def extract_videos_from_page(self, page):
-        ids_in_page = []
-        titles_in_page = []
-
-        for item in re.findall(
-                r'(<[^>]*\bdata-video-id\s*=\s*["\'][0-9A-Za-z_-]{11}[^>]+>)', page):
-            attrs = extract_attributes(item)
-            video_id = attrs['data-video-id']
-            video_title = unescapeHTML(attrs.get('data-title'))
-            if video_title:
-                video_title = video_title.strip()
-            ids_in_page.append(video_id)
-            titles_in_page.append(video_title)
-
-        # Fallback with old _VIDEO_RE
-        self.extract_videos_from_page_impl(
-            self._VIDEO_RE, page, ids_in_page, titles_in_page)
-
-        # Relaxed fallbacks
-        self.extract_videos_from_page_impl(
-            r'href="\s*/watch\?v\s*=\s*(?P<id>[0-9A-Za-z_-]{11})', page,
-            ids_in_page, titles_in_page)
-        self.extract_videos_from_page_impl(
-            r'data-video-ids\s*=\s*["\'](?P<id>[0-9A-Za-z_-]{11})', page,
-            ids_in_page, titles_in_page)
-
-        return zip(ids_in_page, titles_in_page)
-
-    def _extract_mix(self, playlist_id):
-        # The mixes are generated from a single video
-        # the id of the playlist is just 'RD' + video_id
-        ids = []
-        last_id = playlist_id[-11:]
-        for n in itertools.count(1):
-            url = 'https://www.youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
-            webpage = self._download_webpage(
-                url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
-            new_ids = orderedSet(re.findall(
-                r'''(?xs)data-video-username=".*?".*?
-                           href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
-                webpage))
-            # Fetch new pages until all the videos are repeated, it seems that
-            # there are always 51 unique videos.
-            new_ids = [_id for _id in new_ids if _id not in ids]
-            if not new_ids:
-                break
-            ids.extend(new_ids)
-            last_id = ids[-1]
-
-        url_results = self._ids_to_results(ids)
-
-        search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
-        title_span = (
-            search_title('playlist-title')
-            or search_title('title long-title')
-            or search_title('title'))
-        title = clean_html(title_span)
-
-        return self.playlist_result(url_results, playlist_id, title)
-
-    def _extract_playlist(self, playlist_id):
-        url = self._TEMPLATE_URL % playlist_id
-        page = self._download_webpage(url, playlist_id)
-
-        # the yt-alert-message now has tabindex attribute (see https://github.com/ytdl-org/youtube-dl/issues/11604)
-        for match in re.findall(r'<div class="yt-alert-message"[^>]*>([^<]+)</div>', page):
-            match = match.strip()
-            # Check if the playlist exists or is private
-            mobj = re.match(r'[^<]*(?:The|This) playlist (?P<reason>does not exist|is private)[^<]*', match)
-            if mobj:
-                reason = mobj.group('reason')
-                message = 'This playlist %s' % reason
-                if 'private' in reason:
-                    message += ', use --username or --netrc to access it'
-                message += '.'
-                raise ExtractorError(message, expected=True)
-            elif re.match(r'[^<]*Invalid parameters[^<]*', match):
-                raise ExtractorError(
-                    'Invalid parameters. Maybe URL is incorrect.',
-                    expected=True)
-            elif re.match(r'[^<]*Choose your language[^<]*', match):
-                continue
-            else:
-                self.report_warning('Youtube gives an alert message: ' + match)
-
-        playlist_title = self._html_search_regex(
-            r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
-            page, 'title', default=None)
-
-        _UPLOADER_BASE = r'class=["\']pl-header-details[^>]+>\s*<li>\s*<a[^>]+\bhref='
-        uploader = self._html_search_regex(
-            r'%s["\']/(?:user|channel)/[^>]+>([^<]+)' % _UPLOADER_BASE,
-            page, 'uploader', default=None)
-        mobj = re.search(
-            r'%s(["\'])(?P<path>/(?:user|channel)/(?P<uploader_id>.+?))\1' % _UPLOADER_BASE,
-            page)
-        if mobj:
-            uploader_id = mobj.group('uploader_id')
-            uploader_url = compat_urlparse.urljoin(url, mobj.group('path'))
-        else:
-            uploader_id = uploader_url = None
-
-        has_videos = True
-
-        if not playlist_title:
-            try:
-                # Some playlist URLs don't actually serve a playlist (e.g.
-                # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
-                next(self._entries(page, playlist_id))
-            except StopIteration:
-                has_videos = False
-
-        playlist = self.playlist_result(
-            self._entries(page, playlist_id), playlist_id, playlist_title)
-        playlist.update({
-            'uploader': uploader,
-            'uploader_id': uploader_id,
-            'uploader_url': uploader_url,
-        })
-
-        return has_videos, playlist
-
-    def _check_download_just_video(self, url, playlist_id):
-        # Check if it's a video-specific URL
-        query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
-        video_id = query_dict.get('v', [None])[0] or self._search_regex(
-            r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url,
-            'video id', default=None)
-        if video_id:
-            if self._downloader.params.get('noplaylist'):
-                self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
-                return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
-            else:
-                self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
-                return video_id, None
-        return None, None
+    @classmethod
+    def suitable(cls, url):
+        return False if YoutubeTabIE.suitable(url) else super(
+            YoutubePlaylistIE, cls).suitable(url)
 
     def _real_extract(self, url):
         # Extract playlist id
@@ -2907,184 +3141,23 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
         if mobj is None:
             raise ExtractorError('Invalid URL: %s' % url)
         playlist_id = mobj.group(1) or mobj.group(2)
-
-        video_id, video = self._check_download_just_video(url, playlist_id)
-        if video:
-            return video
-
-        if playlist_id.startswith(('RD', 'UL', 'PU')):
-            # Mixes require a custom extraction process
-            return self._extract_mix(playlist_id)
-
-        has_videos, playlist = self._extract_playlist(playlist_id)
-        if has_videos or not video_id:
-            return playlist
-
-        # Some playlist URLs don't actually serve a playlist (see
-        # https://github.com/ytdl-org/youtube-dl/issues/10537).
-        # Fallback to plain video extraction if there is a video id
-        # along with playlist id.
-        return self.url_result(video_id, 'Youtube', video_id=video_id)
+        return self.url_result(
+            'https://www.youtube.com/playlist?list=%s' % playlist_id,
+            ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
 
 
-class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
-    IE_DESC = 'YouTube.com channels'
-    _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie|kids)?\.com|(?:www\.)?invidio\.us)/channel/(?P<id>[0-9A-Za-z_-]+)'
-    _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
-    _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
-    IE_NAME = 'youtube:channel'
+class YoutubeYtUserIE(InfoExtractor):
+    _VALID_URL = r'ytuser:(?P<id>.+)'
     _TESTS = [{
-        'note': 'paginated channel',
-        'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
-        'playlist_mincount': 91,
-        'info_dict': {
-            'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
-            'title': 'Uploads from lex will',
-            'uploader': 'lex will',
-            'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
-        }
-    }, {
-        'note': 'Age restricted channel',
-        # from https://www.youtube.com/user/DeusExOfficial
-        'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
-        'playlist_mincount': 64,
-        'info_dict': {
-            'id': 'UUs0ifCMCm1icqRbqhUINa0w',
-            'title': 'Uploads from Deus Ex',
-            'uploader': 'Deus Ex',
-            'uploader_id': 'DeusExOfficial',
-        },
-    }, {
-        'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
-        'only_matching': True,
-    }, {
-        'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
-        'only_matching': True,
-    }]
-
-    @classmethod
-    def suitable(cls, url):
-        return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
-                else super(YoutubeChannelIE, cls).suitable(url))
-
-    def _build_template_url(self, url, channel_id):
-        return self._TEMPLATE_URL % channel_id
-
-    def _real_extract(self, url):
-        channel_id = self._match_id(url)
-
-        url = self._build_template_url(url, channel_id)
-
-        # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
-        # Workaround by extracting as a playlist if managed to obtain channel playlist URL
-        # otherwise fallback on channel by page extraction
-        channel_page = self._download_webpage(
-            url + '?view=57', channel_id,
-            'Downloading channel page', fatal=False)
-        if channel_page is False:
-            channel_playlist_id = False
-        else:
-            channel_playlist_id = self._html_search_meta(
-                'channelId', channel_page, 'channel id', default=None)
-            if not channel_playlist_id:
-                channel_url = self._html_search_meta(
-                    ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
-                    channel_page, 'channel url', default=None)
-                if channel_url:
-                    channel_playlist_id = self._search_regex(
-                        r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
-                        channel_url, 'channel id', default=None)
-        if channel_playlist_id and channel_playlist_id.startswith('UC'):
-            playlist_id = 'UU' + channel_playlist_id[2:]
-            return self.url_result(
-                compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
-
-        channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
-        autogenerated = re.search(r'''(?x)
-                class="[^"]*?(?:
-                    channel-header-autogenerated-label|
-                    yt-channel-title-autogenerated
-                )[^"]*"''', channel_page) is not None
-
-        if autogenerated:
-            # The videos are contained in a single page
-            # the ajax pages can't be used, they are empty
-            entries = [
-                self.url_result(
-                    video_id, 'Youtube', video_id=video_id,
-                    video_title=video_title)
-                for video_id, video_title in self.extract_videos_from_page(channel_page)]
-            return self.playlist_result(entries, channel_id)
-
-        try:
-            next(self._entries(channel_page, channel_id))
-        except StopIteration:
-            alert_message = self._html_search_regex(
-                r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
-                channel_page, 'alert', default=None, group='alert')
-            if alert_message:
-                raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
-
-        return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
-
-
-class YoutubeUserIE(YoutubeChannelIE):
-    IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
-    _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results|shared)(?:$|[^a-z_A-Z0-9%-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_%-]+)'
-    _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
-    IE_NAME = 'youtube:user'
-
-    _TESTS = [{
-        'url': 'https://www.youtube.com/user/TheLinuxFoundation',
-        'playlist_mincount': 320,
-        'info_dict': {
-            'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
-            'title': 'Uploads from The Linux Foundation',
-            'uploader': 'The Linux Foundation',
-            'uploader_id': 'TheLinuxFoundation',
-        }
-    }, {
-        # Only available via https://www.youtube.com/c/12minuteathlete/videos
-        # but not https://www.youtube.com/user/12minuteathlete/videos
-        'url': 'https://www.youtube.com/c/12minuteathlete/videos',
-        'playlist_mincount': 249,
-        'info_dict': {
-            'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
-            'title': 'Uploads from 12 Minute Athlete',
-            'uploader': '12 Minute Athlete',
-            'uploader_id': 'the12minuteathlete',
-        }
-    }, {
         'url': 'ytuser:phihag',
         'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/c/gametrailers',
-        'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/c/Pawe%C5%82Zadro%C5%BCniak',
-        'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/gametrailers',
-        'only_matching': True,
-    }, {
-        # This channel is not available, geo restricted to JP
-        'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
-        'only_matching': True,
     }]
 
-    @classmethod
-    def suitable(cls, url):
-        # Don't return True if the url can be extracted with other youtube
-        # extractor, the regex would is too permissive and it would match.
-        other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
-        if any(ie.suitable(url) for ie in other_yt_ies):
-            return False
-        else:
-            return super(YoutubeUserIE, cls).suitable(url)
-
-    def _build_template_url(self, url, channel_id):
-        mobj = re.match(self._VALID_URL, url)
-        return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
+    def _real_extract(self, url):
+        user_id = self._match_id(url)
+        return self.url_result(
+            'https://www.youtube.com/user/%s' % user_id,
+            ie=YoutubeTabIE.ie_key(), video_id=user_id)
 
 
 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
@@ -3139,45 +3212,7 @@ class YoutubeLiveIE(YoutubeBaseInfoExtractor):
         return self.url_result(base_url)
 
 
-class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
-    IE_DESC = 'YouTube.com user/channel playlists'
-    _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel|c)/(?P<id>[^/]+)/playlists'
-    IE_NAME = 'youtube:playlists'
-
-    _TESTS = [{
-        'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
-        'playlist_mincount': 4,
-        'info_dict': {
-            'id': 'ThirstForScience',
-            'title': 'ThirstForScience',
-        },
-    }, {
-        # with "Load more" button
-        'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
-        'playlist_mincount': 70,
-        'info_dict': {
-            'id': 'igorkle1',
-            'title': 'Игорь Клейнер',
-        },
-    }, {
-        'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
-        'playlist_mincount': 17,
-        'info_dict': {
-            'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
-            'title': 'Chem Player',
-        },
-        'skip': 'Blocked',
-    }, {
-        'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
-        'only_matching': True,
-    }]
-
-
-class YoutubeSearchBaseInfoExtractor(YoutubePlaylistBaseInfoExtractor):
-    _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
-
-
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeSearchBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com searches'
     # there doesn't appear to be a real limit, for example if you search for
     # 'python' you get more than 8.000.000 results
@@ -3274,7 +3309,8 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
     _SEARCH_PARAMS = 'CAI%3D'
 
 
-class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
+r"""
+class YoutubeSearchURLIE(YoutubeSearchIE):
     IE_DESC = 'YouTube.com search URLs'
     IE_NAME = 'youtube:search_url'
     _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
@@ -3294,25 +3330,7 @@ class YoutubeSearchURLIE(YoutubeSearchBaseInfoExtractor):
         query = compat_urllib_parse_unquote_plus(mobj.group('query'))
         webpage = self._download_webpage(url, query)
         return self.playlist_result(self._process_page(webpage), playlist_title=query)
-
-
-class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
-    IE_DESC = 'YouTube.com (multi-season) shows'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/show/(?P<id>[^?#]*)'
-    IE_NAME = 'youtube:show'
-    _TESTS = [{
-        'url': 'https://www.youtube.com/show/airdisasters',
-        'playlist_mincount': 5,
-        'info_dict': {
-            'id': 'airdisasters',
-            'title': 'Air Disasters',
-        }
-    }]
-
-    def _real_extract(self, url):
-        playlist_id = self._match_id(url)
-        return super(YoutubeShowIE, self)._real_extract(
-            'https://www.youtube.com/show/%s/playlists' % playlist_id)
+"""
 
 
 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
@@ -3369,37 +3387,22 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
             self._entries(page), playlist_title=self._PLAYLIST_TITLE)
 
 
-class YoutubeWatchLaterIE(YoutubePlaylistIE):
+class YoutubeWatchLaterIE(InfoExtractor):
     IE_NAME = 'youtube:watchlater'
     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
     _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
 
     _TESTS = [{
-        'url': 'https://www.youtube.com/playlist?list=WL',
+        'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
         'only_matching': True,
     }, {
-        'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
+        'url': 'https://www.youtube.com/feed/watch_later',
         'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        _, video = self._check_download_just_video(url, 'WL')
-        if video:
-            return video
-        _, playlist = self._extract_playlist('WL')
-        return playlist
-
-
-class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
-    IE_NAME = 'youtube:favorites'
-    IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
-    _LOGIN_REQUIRED = True
-
-    def _real_extract(self, url):
-        webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
-        playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
-        return self.url_result(playlist_id, 'YoutubePlaylist')
+        return self.url_result(
+            'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
 
 
 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):

From 5bbdadd5f886c15d1d765a21a463af899f8c30e3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 12 Nov 2020 06:18:16 +0700
Subject: [PATCH 153/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index cfe4ab79d..73524a182 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,9 @@
+version <unreleased>
+
+Extractors
+* [youtube] Rework extractors
+
+
 version 2020.11.01
 
 Core

From 28f9568a8460088de18210d060058598e5237022 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 12 Nov 2020 06:23:46 +0700
Subject: [PATCH 154/340] release 2020.11.12

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 8 ++------
 youtube_dl/version.py                            | 2 +-
 8 files changed, 16 insertions(+), 20 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 846c7e2ab..a10c9fd83 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.01.1
+ [debug] youtube-dl version 2020.11.12
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 26823fc9f..9cc120d3e 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 3d3ba3182..29bd5f5ac 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 288bae2d4..cc33a993f 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.01.1
+ [debug] youtube-dl version 2020.11.12
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 77a195827..cd577ecef 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.01.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.01.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 73524a182..1ef7ea7b6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.12
 
 Extractors
 * [youtube] Rework extractors
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 367545a96..0c77d017e 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -1131,20 +1131,16 @@
  - **YourPorn**
  - **YourUpload**
  - **youtube**: YouTube.com
- - **youtube:channel**: YouTube.com channels
- - **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
  - **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
  - **youtube:live**: YouTube.com live streams
  - **youtube:playlist**: YouTube.com playlists
- - **youtube:playlists**: YouTube.com user/channel playlists
  - **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
  - **youtube:search**: YouTube.com searches
  - **youtube:search:date**: YouTube.com searches, newest videos first
- - **youtube:search_url**: YouTube.com search URLs
- - **youtube:show**: YouTube.com (multi-season) shows
  - **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
- - **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
+ - **youtube:tab**: YouTube.com tab
  - **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
+ - **YoutubeYtUser**
  - **Zapiks**
  - **Zaq1**
  - **Zattoo**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 6d7f14717..04cd207ab 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.01.1'
+__version__ = '2020.11.12'

From 1fb034d029c8b7feafe45f64e6a0808663ad315e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 16 Nov 2020 21:03:56 +0700
Subject: [PATCH 155/340] [youtube] Remove RIAA copyrighted media from tests as
 per [1]

1. Github dmca and 1201 notice re youtube-dl 9-21-20
---
 youtube_dl/extractor/youtube.py | 145 +-------------------------------
 1 file changed, 1 insertion(+), 144 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 696dec2c1..248682a41 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -509,48 +509,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'end_time': 9,
             }
         },
-        {
-            'url': 'https://www.youtube.com/watch?v=UxxajLWwzqY',
-            'note': 'Test generic use_cipher_signature video (#897)',
-            'info_dict': {
-                'id': 'UxxajLWwzqY',
-                'ext': 'mp4',
-                'upload_date': '20120506',
-                'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
-                'alt_title': 'I Love It (feat. Charli XCX)',
-                'description': 'md5:19a2f98d9032b9311e686ed039564f63',
-                'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
-                         'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
-                         'iconic ep', 'iconic', 'love', 'it'],
-                'duration': 180,
-                'uploader': 'Icona Pop',
-                'uploader_id': 'IconaPop',
-                'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IconaPop',
-                'creator': 'Icona Pop',
-                'track': 'I Love It (feat. Charli XCX)',
-                'artist': 'Icona Pop',
-            }
-        },
-        {
-            'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
-            'note': 'Test VEVO video with age protection (#956)',
-            'info_dict': {
-                'id': '07FYdnEawAQ',
-                'ext': 'mp4',
-                'upload_date': '20130703',
-                'title': 'Justin Timberlake - Tunnel Vision (Official Music Video) (Explicit)',
-                'alt_title': 'Tunnel Vision',
-                'description': 'md5:07dab3356cde4199048e4c7cd93471e1',
-                'duration': 419,
-                'uploader': 'justintimberlakeVEVO',
-                'uploader_id': 'justintimberlakeVEVO',
-                'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
-                'creator': 'Justin Timberlake',
-                'track': 'Tunnel Vision',
-                'artist': 'Justin Timberlake',
-                'age_limit': 18,
-            }
-        },
         {
             'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
             'note': 'Embed-only video (#1746)',
@@ -567,7 +525,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             }
         },
         {
-            'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
+            'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
             'note': 'Use the first video ID in the URL',
             'info_dict': {
                 'id': 'BaW_jenozKc',
@@ -626,24 +584,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'format': '141/bestaudio[ext=m4a]',
             },
         },
-        # JS player signature function name containing $
-        {
-            'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
-            'info_dict': {
-                'id': 'nfWlot6h_JM',
-                'ext': 'm4a',
-                'title': 'Taylor Swift - Shake It Off',
-                'description': 'md5:307195cd21ff7fa352270fe884570ef0',
-                'duration': 242,
-                'uploader': 'TaylorSwiftVEVO',
-                'uploader_id': 'TaylorSwiftVEVO',
-                'upload_date': '20140818',
-            },
-            'params': {
-                'youtube_include_dash_manifest': True,
-                'format': '141/bestaudio[ext=m4a]',
-            },
-        },
         # Controversy video
         {
             'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
@@ -675,22 +615,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'age_limit': 18,
             },
         },
-        # Age-gate video with encrypted signature
-        {
-            'url': 'https://www.youtube.com/watch?v=6kLq3WMV1nU',
-            'info_dict': {
-                'id': '6kLq3WMV1nU',
-                'ext': 'mp4',
-                'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
-                'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
-                'duration': 246,
-                'uploader': 'LloydVEVO',
-                'uploader_id': 'LloydVEVO',
-                'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
-                'upload_date': '20110629',
-                'age_limit': 18,
-            },
-        },
         # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
         # YouTube Red ad is not captured for creator
         {
@@ -1105,73 +1029,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'skip_download': True,
             },
         },
-        {
-            # Youtube Music Auto-generated description
-            # Retrieve 'artist' field from 'Artist:' in video description
-            # when it is present on youtube music video
-            'url': 'https://www.youtube.com/watch?v=k0jLE7tTwjY',
-            'info_dict': {
-                'id': 'k0jLE7tTwjY',
-                'ext': 'mp4',
-                'title': 'Latch Feat. Sam Smith',
-                'description': 'md5:3cb1e8101a7c85fcba9b4fb41b951335',
-                'upload_date': '20150110',
-                'uploader': 'Various Artists - Topic',
-                'uploader_id': 'UCNkEcmYdjrH4RqtNgh7BZ9w',
-                'artist': 'Disclosure',
-                'track': 'Latch Feat. Sam Smith',
-                'album': 'Latch Featuring Sam Smith',
-                'release_date': '20121008',
-                'release_year': 2012,
-            },
-            'params': {
-                'skip_download': True,
-            },
-        },
-        {
-            # Youtube Music Auto-generated description
-            # handle multiple artists on youtube music video
-            'url': 'https://www.youtube.com/watch?v=74qn0eJSjpA',
-            'info_dict': {
-                'id': '74qn0eJSjpA',
-                'ext': 'mp4',
-                'title': 'Eastside',
-                'description': 'md5:290516bb73dcbfab0dcc4efe6c3de5f2',
-                'upload_date': '20180710',
-                'uploader': 'Benny Blanco - Topic',
-                'uploader_id': 'UCzqz_ksRu_WkIzmivMdIS7A',
-                'artist': 'benny blanco, Halsey, Khalid',
-                'track': 'Eastside',
-                'album': 'Eastside',
-                'release_date': '20180713',
-                'release_year': 2018,
-            },
-            'params': {
-                'skip_download': True,
-            },
-        },
-        {
-            # Youtube Music Auto-generated description
-            # handle youtube music video with release_year and no release_date
-            'url': 'https://www.youtube.com/watch?v=-hcAI0g-f5M',
-            'info_dict': {
-                'id': '-hcAI0g-f5M',
-                'ext': 'mp4',
-                'title': 'Put It On Me',
-                'description': 'md5:f6422397c07c4c907c6638e1fee380a5',
-                'upload_date': '20180426',
-                'uploader': 'Matt Maeson - Topic',
-                'uploader_id': 'UCnEkIGqtGcQMLk73Kp-Q5LQ',
-                'artist': 'Matt Maeson',
-                'track': 'Put It On Me',
-                'album': 'The Hearse',
-                'release_date': None,
-                'release_year': 2018,
-            },
-            'params': {
-                'skip_download': True,
-            },
-        },
         {
             'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
             'only_matching': True,

From 1737ea69b978f7ebe0f52c526d3dda5ba5c196b6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 15:55:07 +0100
Subject: [PATCH 156/340] [cnbc] fix extraction

---
 youtube_dl/extractor/cnbc.py | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/youtube_dl/extractor/cnbc.py b/youtube_dl/extractor/cnbc.py
index 6889b0f40..7b9f4536a 100644
--- a/youtube_dl/extractor/cnbc.py
+++ b/youtube_dl/extractor/cnbc.py
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
 
 from .common import InfoExtractor
 from ..utils import smuggle_url
@@ -38,7 +39,7 @@ class CNBCIE(InfoExtractor):
 
 
 class CNBCVideoIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?cnbc\.com/video/(?:[^/]+/)+(?P<id>[^./?#&]+)'
+    _VALID_URL = r'https?://(?:www\.)?cnbc\.com(?P<path>/video/(?:[^/]+/)+(?P<id>[^./?#&]+)\.html)'
     _TEST = {
         'url': 'https://www.cnbc.com/video/2018/07/19/trump-i-dont-necessarily-agree-with-raising-rates.html',
         'info_dict': {
@@ -56,11 +57,15 @@ class CNBCVideoIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        display_id = self._match_id(url)
-        webpage = self._download_webpage(url, display_id)
-        video_id = self._search_regex(
-            r'content_id["\']\s*:\s*["\'](\d+)', webpage, display_id,
-            'video id')
+        path, display_id = re.match(self._VALID_URL, url).groups()
+        video_id = self._download_json(
+            'https://webql-redesign.cnbcfm.com/graphql', display_id, query={
+                'query': '''{
+  page(path: "%s") {
+    vcpsId
+  }
+}''' % path,
+            })['data']['page']['vcpsId']
         return self.url_result(
-            'http://video.cnbc.com/gallery/?video=%s' % video_id,
+            'http://video.cnbc.com/gallery/?video=%d' % video_id,
             CNBCIE.ie_key())

From efc589b86578ad98025aa0a9ccfa5db3195c7deb Mon Sep 17 00:00:00 2001
From: Edward Betts <edward@4angle.com>
Date: Mon, 16 Nov 2020 15:08:20 +0000
Subject: [PATCH 157/340] [devscripts/make_lazy_extractors] Correct a spelling
 mistake (#26991)

---
 devscripts/make_lazy_extractors.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/devscripts/make_lazy_extractors.py b/devscripts/make_lazy_extractors.py
index 0a1762dbc..878ae72b1 100644
--- a/devscripts/make_lazy_extractors.py
+++ b/devscripts/make_lazy_extractors.py
@@ -61,7 +61,7 @@ def build_lazy_ie(ie, name):
     return s
 
 
-# find the correct sorting and add the required base classes so that sublcasses
+# find the correct sorting and add the required base classes so that subclasses
 # can be correctly created
 classes = _ALL_CLASSES[:-1]
 ordered_cls = []

From 2ea9c97432a5342f70ed87d440cb1ec97a21cbde Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 16:18:37 +0100
Subject: [PATCH 158/340] [nbc] fix NBCNews/Today/MSNBC extraction

---
 youtube_dl/extractor/nbc.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index 6f3cb3003..ea5f5a315 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -10,7 +10,6 @@ from .adobepass import AdobePassIE
 from ..compat import compat_urllib_parse_unquote
 from ..utils import (
     int_or_none,
-    js_to_json,
     parse_duration,
     smuggle_url,
     try_get,
@@ -394,8 +393,8 @@ class NBCNewsIE(ThePlatformIE):
         webpage = self._download_webpage(url, video_id)
 
         data = self._parse_json(self._search_regex(
-            r'window\.__data\s*=\s*({.+});', webpage,
-            'bootstrap json'), video_id, js_to_json)
+            r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
+            webpage, 'bootstrap json'), video_id)['props']['initialState']
         video_data = try_get(data, lambda x: x['video']['current'], dict)
         if not video_data:
             video_data = data['article']['content'][0]['primaryMedia']['video']

From 650aec4a984c3286b015de9870bef4e51bf6303f Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 16:21:48 +0100
Subject: [PATCH 159/340] [usanetwork] fix extraction

---
 youtube_dl/extractor/usanetwork.py | 82 ++++++------------------------
 1 file changed, 16 insertions(+), 66 deletions(-)

diff --git a/youtube_dl/extractor/usanetwork.py b/youtube_dl/extractor/usanetwork.py
index 54c7495cc..e3784e55f 100644
--- a/youtube_dl/extractor/usanetwork.py
+++ b/youtube_dl/extractor/usanetwork.py
@@ -1,74 +1,24 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-from .adobepass import AdobePassIE
-from ..utils import (
-    NO_DEFAULT,
-    smuggle_url,
-    update_url_query,
-)
+from .nbc import NBCIE
 
 
-class USANetworkIE(AdobePassIE):
-    _VALID_URL = r'https?://(?:www\.)?usanetwork\.com/(?:[^/]+/videos|movies)/(?P<id>[^/?#]+)'
-    _TEST = {
-        'url': 'http://www.usanetwork.com/mrrobot/videos/hpe-cybersecurity',
-        'md5': '33c0d2ba381571b414024440d08d57fd',
+class USANetworkIE(NBCIE):
+    _VALID_URL = r'https?(?P<permalink>://(?:www\.)?usanetwork\.com/[^/]+/video/[^/]+/(?P<id>\d+))'
+    _TESTS = [{
+        'url': 'https://www.usanetwork.com/peacock-trailers/video/intelligence-trailer/4185302',
         'info_dict': {
-            'id': '3086229',
+            'id': '4185302',
             'ext': 'mp4',
-            'title': 'HPE Cybersecurity',
-            'description': 'The more we digitize our world, the more vulnerable we are.',
-            'upload_date': '20160818',
-            'timestamp': 1471535460,
-            'uploader': 'NBCU-USA',
+            'title': 'Intelligence (Trailer)',
+            'description': 'A maverick NSA agent enlists the help of a junior systems analyst in a workplace power grab.',
+            'upload_date': '20200715',
+            'timestamp': 1594785600,
+            'uploader': 'NBCU-MPAT',
         },
-    }
-
-    def _real_extract(self, url):
-        display_id = self._match_id(url)
-        webpage = self._download_webpage(url, display_id)
-
-        def _x(name, default=NO_DEFAULT):
-            return self._search_regex(
-                r'data-%s\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % name,
-                webpage, name, default=default, group='value')
-
-        video_id = _x('mpx-guid')
-        title = _x('episode-title')
-        mpx_account_id = _x('mpx-account-id', '2304992029')
-
-        query = {
-            'mbr': 'true',
-        }
-        if _x('is-full-episode', None) == '1':
-            query['manifest'] = 'm3u'
-
-        if _x('is-entitlement', None) == '1':
-            adobe_pass = {}
-            drupal_settings = self._search_regex(
-                r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);',
-                webpage, 'drupal settings', fatal=False)
-            if drupal_settings:
-                drupal_settings = self._parse_json(drupal_settings, video_id, fatal=False)
-                if drupal_settings:
-                    adobe_pass = drupal_settings.get('adobePass', {})
-            resource = self._get_mvpd_resource(
-                adobe_pass.get('adobePassResourceId', 'usa'),
-                title, video_id, _x('episode-rating', 'TV-14'))
-            query['auth'] = self._extract_mvpd_auth(
-                url, video_id, adobe_pass.get('adobePassRequestorId', 'usa'), resource)
-
-        info = self._search_json_ld(webpage, video_id, default={})
-        info.update({
-            '_type': 'url_transparent',
-            'url': smuggle_url(update_url_query(
-                'http://link.theplatform.com/s/HNK2IC/media/guid/%s/%s' % (mpx_account_id, video_id),
-                query), {'force_smil_url': True}),
-            'id': video_id,
-            'title': title,
-            'series': _x('show-title', None),
-            'episode': title,
-            'ie_key': 'ThePlatform',
-        })
-        return info
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+    }]

From 059fa9aa8119154d4b92ff31f7f65d68675b83be Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 16:35:08 +0100
Subject: [PATCH 160/340] [vlive] fix extraction

---
 youtube_dl/extractor/vlive.py | 333 +++++++++++-----------------------
 1 file changed, 105 insertions(+), 228 deletions(-)

diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
index f79531e6f..df1dc78dd 100644
--- a/youtube_dl/extractor/vlive.py
+++ b/youtube_dl/extractor/vlive.py
@@ -1,25 +1,30 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-import time
 import itertools
+import json
 
-from .common import InfoExtractor
 from .naver import NaverBaseIE
-from ..compat import compat_str
+from ..compat import (
+    compat_HTTPError,
+    compat_str,
+)
 from ..utils import (
     ExtractorError,
+    int_or_none,
     merge_dicts,
-    remove_start,
     try_get,
     urlencode_postdata,
 )
 
 
-class VLiveIE(NaverBaseIE):
+class VLiveBaseIE(NaverBaseIE):
+    _APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
+
+
+class VLiveIE(VLiveBaseIE):
     IE_NAME = 'vlive'
-    _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/(?:video|embed)/(?P<id>[0-9]+)'
     _NETRC_MACHINE = 'vlive'
     _TESTS = [{
         'url': 'http://www.vlive.tv/video/1326',
@@ -27,7 +32,7 @@ class VLiveIE(NaverBaseIE):
         'info_dict': {
             'id': '1326',
             'ext': 'mp4',
-            'title': "[V LIVE] Girl's Day's Broadcast",
+            'title': "Girl's Day's Broadcast",
             'creator': "Girl's Day",
             'view_count': int,
             'uploader_id': 'muploader_a',
@@ -37,7 +42,7 @@ class VLiveIE(NaverBaseIE):
         'info_dict': {
             'id': '16937',
             'ext': 'mp4',
-            'title': '[V LIVE] 첸백시 걍방',
+            'title': '첸백시 걍방',
             'creator': 'EXO',
             'view_count': int,
             'subtitles': 'mincount:12',
@@ -58,12 +63,11 @@ class VLiveIE(NaverBaseIE):
             'subtitles': 'mincount:10',
         },
         'skip': 'This video is only available for CH+ subscribers',
+    }, {
+        'url': 'https://www.vlive.tv/embed/1326',
+        'only_matching': True,
     }]
 
-    @classmethod
-    def suitable(cls, url):
-        return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url)
-
     def _real_initialize(self):
         self._login()
 
@@ -95,173 +99,122 @@ class VLiveIE(NaverBaseIE):
         if not is_logged_in():
             raise ExtractorError('Unable to log in', expected=True)
 
+    def _call_api(self, path_template, video_id, fields=None):
+        query = {'appId': self._APP_ID}
+        if fields:
+            query['fields'] = fields
+        return self._download_json(
+            'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id,
+            'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0],
+            headers={'Referer': 'https://www.vlive.tv/'}, query=query)
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(
-            'https://www.vlive.tv/video/%s' % video_id, video_id)
+        try:
+            post = self._call_api(
+                'post/v1.0/officialVideoPost-%s', video_id,
+                'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId}')
+        except ExtractorError as e:
+            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+                self.raise_login_required(json.loads(e.cause.read().decode())['message'])
+            raise
 
-        VIDEO_PARAMS_RE = r'\bvlive\.video\.init\(([^)]+)'
-        VIDEO_PARAMS_FIELD = 'video params'
+        video = post['officialVideo']
 
-        params = self._parse_json(self._search_regex(
-            VIDEO_PARAMS_RE, webpage, VIDEO_PARAMS_FIELD, default=''), video_id,
-            transform_source=lambda s: '[' + s + ']', fatal=False)
+        def get_common_fields():
+            channel = post.get('channel') or {}
+            return {
+                'title': video.get('title'),
+                'creator': post.get('author', {}).get('nickname'),
+                'channel': channel.get('channelName'),
+                'channel_id': channel.get('channelCode'),
+                'duration': int_or_none(video.get('playTime')),
+                'view_count': int_or_none(video.get('playCount')),
+                'like_count': int_or_none(video.get('likeCount')),
+                'comment_count': int_or_none(video.get('commentCount')),
+            }
 
-        if not params or len(params) < 7:
-            params = self._search_regex(
-                VIDEO_PARAMS_RE, webpage, VIDEO_PARAMS_FIELD)
-            params = [p.strip(r'"') for p in re.split(r'\s*,\s*', params)]
-
-        status, long_video_id, key = params[2], params[5], params[6]
-        status = remove_start(status, 'PRODUCT_')
-
-        if status in ('LIVE_ON_AIR', 'BIG_EVENT_ON_AIR'):
-            return self._live(video_id, webpage)
-        elif status in ('VOD_ON_AIR', 'BIG_EVENT_INTRO'):
-            return self._replay(video_id, webpage, long_video_id, key)
-
-        if status == 'LIVE_END':
-            raise ExtractorError('Uploading for replay. Please wait...',
-                                 expected=True)
-        elif status == 'COMING_SOON':
-            raise ExtractorError('Coming soon!', expected=True)
-        elif status == 'CANCELED':
-            raise ExtractorError('We are sorry, '
-                                 'but the live broadcast has been canceled.',
-                                 expected=True)
-        elif status == 'ONLY_APP':
-            raise ExtractorError('Unsupported video type', expected=True)
-        else:
-            raise ExtractorError('Unknown status %s' % status)
-
-    def _get_common_fields(self, webpage):
-        title = self._og_search_title(webpage)
-        creator = self._html_search_regex(
-            r'<div[^>]+class="info_area"[^>]*>\s*(?:<em[^>]*>.*?</em\s*>\s*)?<a\s+[^>]*>([^<]+)',
-            webpage, 'creator', fatal=False)
-        thumbnail = self._og_search_thumbnail(webpage)
-        return {
-            'title': title,
-            'creator': creator,
-            'thumbnail': thumbnail,
-        }
-
-    def _live(self, video_id, webpage):
-        init_page = self._download_init_page(video_id)
-
-        live_params = self._search_regex(
-            r'"liveStreamInfo"\s*:\s*(".*"),',
-            init_page, 'live stream info')
-        live_params = self._parse_json(live_params, video_id)
-        live_params = self._parse_json(live_params, video_id)
-
-        formats = []
-        for vid in live_params.get('resolutions', []):
-            formats.extend(self._extract_m3u8_formats(
-                vid['cdnUrl'], video_id, 'mp4',
-                m3u8_id=vid.get('name'),
-                fatal=False, live=True))
-        self._sort_formats(formats)
-
-        info = self._get_common_fields(webpage)
-        info.update({
-            'title': self._live_title(info['title']),
-            'id': video_id,
-            'formats': formats,
-            'is_live': True,
-        })
-        return info
-
-    def _replay(self, video_id, webpage, long_video_id, key):
-        if '' in (long_video_id, key):
-            init_page = self._download_init_page(video_id)
-            video_info = self._parse_json(self._search_regex(
-                (r'(?s)oVideoStatus\s*=\s*({.+?})\s*</script',
-                 r'(?s)oVideoStatus\s*=\s*({.+})'), init_page, 'video info'),
-                video_id)
-            if video_info.get('status') == 'NEED_CHANNEL_PLUS':
-                self.raise_login_required(
-                    'This video is only available for CH+ subscribers')
-            long_video_id, key = video_info['vid'], video_info['inkey']
-
-        return merge_dicts(
-            self._get_common_fields(webpage),
-            self._extract_video_info(video_id, long_video_id, key))
-
-    def _download_init_page(self, video_id):
-        return self._download_webpage(
-            'https://www.vlive.tv/video/init/view',
-            video_id, note='Downloading live webpage',
-            data=urlencode_postdata({'videoSeq': video_id}),
-            headers={
-                'Referer': 'https://www.vlive.tv/video/%s' % video_id,
-                'Content-Type': 'application/x-www-form-urlencoded'
-            })
+        video_type = video.get('type')
+        if video_type == 'VOD':
+            inkey = self._call_api('video/v1.0/vod/%s/inkey', video_id)['inkey']
+            vod_id = video['vodId']
+            return merge_dicts(
+                get_common_fields(),
+                self._extract_video_info(video_id, vod_id, inkey))
+        elif video_type == 'LIVE':
+            status = video.get('status')
+            if status == 'ON_AIR':
+                stream_url = self._call_api(
+                    'old/v3/live/%s/playInfo',
+                    video_id)['result']['adaptiveStreamUrl']
+                formats = self._extract_m3u8_formats(stream_url, video_id, 'mp4')
+                info = get_common_fields()
+                info.update({
+                    'title': self._live_title(video['title']),
+                    'id': video_id,
+                    'formats': formats,
+                    'is_live': True,
+                })
+                return info
+            elif status == 'ENDED':
+                raise ExtractorError(
+                    'Uploading for replay. Please wait...', expected=True)
+            elif status == 'RESERVED':
+                raise ExtractorError('Coming soon!', expected=True)
+            elif video.get('exposeStatus') == 'CANCEL':
+                raise ExtractorError(
+                    'We are sorry, but the live broadcast has been canceled.',
+                    expected=True)
+            else:
+                raise ExtractorError('Unknown status ' + status)
 
 
-class VLiveChannelIE(InfoExtractor):
+class VLiveChannelIE(VLiveBaseIE):
     IE_NAME = 'vlive:channel'
-    _VALID_URL = r'https?://channels\.vlive\.tv/(?P<id>[0-9A-Z]+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?:channels\.vlive\.tv|(?:(?:www|m)\.)?vlive\.tv/channel)/(?P<id>[0-9A-Z]+)'
+    _TESTS = [{
         'url': 'http://channels.vlive.tv/FCD4B',
         'info_dict': {
             'id': 'FCD4B',
             'title': 'MAMAMOO',
         },
         'playlist_mincount': 110
-    }
-    _APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
+    }, {
+        'url': 'https://www.vlive.tv/channel/FCD4B',
+        'only_matching': True,
+    }]
+
+    def _call_api(self, path, channel_key_suffix, channel_value, note, query):
+        q = {
+            'app_id': self._APP_ID,
+            'channel' + channel_key_suffix: channel_value,
+        }
+        q.update(query)
+        return self._download_json(
+            'http://api.vfan.vlive.tv/vproxy/channelplus/' + path,
+            channel_value, note='Downloading ' + note, query=q)['result']
 
     def _real_extract(self, url):
         channel_code = self._match_id(url)
 
-        webpage = self._download_webpage(
-            'http://channels.vlive.tv/%s/video' % channel_code, channel_code)
+        channel_seq = self._call_api(
+            'decodeChannelCode', 'Code', channel_code,
+            'decode channel code', {})['channelSeq']
 
-        app_id = None
-
-        app_js_url = self._search_regex(
-            r'<script[^>]+src=(["\'])(?P<url>http.+?/app\.js.*?)\1',
-            webpage, 'app js', default=None, group='url')
-
-        if app_js_url:
-            app_js = self._download_webpage(
-                app_js_url, channel_code, 'Downloading app JS', fatal=False)
-            if app_js:
-                app_id = self._search_regex(
-                    r'Global\.VFAN_APP_ID\s*=\s*[\'"]([^\'"]+)[\'"]',
-                    app_js, 'app id', default=None)
-
-        app_id = app_id or self._APP_ID
-
-        channel_info = self._download_json(
-            'http://api.vfan.vlive.tv/vproxy/channelplus/decodeChannelCode',
-            channel_code, note='Downloading decode channel code',
-            query={
-                'app_id': app_id,
-                'channelCode': channel_code,
-                '_': int(time.time())
-            })
-
-        channel_seq = channel_info['result']['channelSeq']
         channel_name = None
         entries = []
 
         for page_num in itertools.count(1):
-            video_list = self._download_json(
-                'http://api.vfan.vlive.tv/vproxy/channelplus/getChannelVideoList',
-                channel_code, note='Downloading channel list page #%d' % page_num,
-                query={
-                    'app_id': app_id,
-                    'channelSeq': channel_seq,
+            video_list = self._call_api(
+                'getChannelVideoList', 'Seq', channel_seq,
+                'channel list page #%d' % page_num, {
                     # Large values of maxNumOfRows (~300 or above) may cause
                     # empty responses (see [1]), e.g. this happens for [2] that
                     # has more than 300 videos.
                     # 1. https://github.com/ytdl-org/youtube-dl/issues/13830
                     # 2. http://channels.vlive.tv/EDBF.
                     'maxNumOfRows': 100,
-                    '_': int(time.time()),
                     'pageNo': page_num
                 }
             )
@@ -269,11 +222,11 @@ class VLiveChannelIE(InfoExtractor):
             if not channel_name:
                 channel_name = try_get(
                     video_list,
-                    lambda x: x['result']['channelInfo']['channelName'],
+                    lambda x: x['channelInfo']['channelName'],
                     compat_str)
 
             videos = try_get(
-                video_list, lambda x: x['result']['videoList'], list)
+                video_list, lambda x: x['videoList'], list)
             if not videos:
                 break
 
@@ -289,79 +242,3 @@ class VLiveChannelIE(InfoExtractor):
 
         return self.playlist_result(
             entries, channel_code, channel_name)
-
-
-class VLivePlaylistIE(InfoExtractor):
-    IE_NAME = 'vlive:playlist'
-    _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)'
-    _VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s'
-    _TESTS = [{
-        # regular working playlist
-        'url': 'https://www.vlive.tv/video/117956/playlist/117963',
-        'info_dict': {
-            'id': '117963',
-            'title': '아이돌룸(IDOL ROOM) 41회 - (여자)아이들'
-        },
-        'playlist_mincount': 10
-    }, {
-        # playlist with no playlistVideoSeqs
-        'url': 'http://www.vlive.tv/video/22867/playlist/22912',
-        'info_dict': {
-            'id': '22867',
-            'ext': 'mp4',
-            'title': '[V LIVE] Valentine Day Message from MINA',
-            'creator': 'TWICE',
-            'view_count': int
-        },
-        'params': {
-            'skip_download': True,
-        }
-    }]
-
-    def _build_video_result(self, video_id, message):
-        self.to_screen(message)
-        return self.url_result(
-            self._VIDEO_URL_TEMPLATE % video_id,
-            ie=VLiveIE.ie_key(), video_id=video_id)
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id, playlist_id = mobj.group('video_id', 'id')
-
-        if self._downloader.params.get('noplaylist'):
-            return self._build_video_result(
-                video_id,
-                'Downloading just video %s because of --no-playlist'
-                % video_id)
-
-        self.to_screen(
-            'Downloading playlist %s - add --no-playlist to just download video'
-            % playlist_id)
-
-        webpage = self._download_webpage(
-            'http://www.vlive.tv/video/%s/playlist/%s'
-            % (video_id, playlist_id), playlist_id)
-
-        raw_item_ids = self._search_regex(
-            r'playlistVideoSeqs\s*=\s*(\[[^]]+\])', webpage,
-            'playlist video seqs', default=None, fatal=False)
-
-        if not raw_item_ids:
-            return self._build_video_result(
-                video_id,
-                'Downloading just video %s because no playlist was found'
-                % video_id)
-
-        item_ids = self._parse_json(raw_item_ids, playlist_id)
-
-        entries = [
-            self.url_result(
-                self._VIDEO_URL_TEMPLATE % item_id, ie=VLiveIE.ie_key(),
-                video_id=compat_str(item_id))
-            for item_id in item_ids]
-
-        playlist_name = self._html_search_regex(
-            r'<div[^>]+class="[^"]*multicam_playlist[^>]*>\s*<h3[^>]+>([^<]+)',
-            webpage, 'playlist title', fatal=False)
-
-        return self.playlist_result(entries, playlist_id, playlist_name)

From f22fa82d7ffed28e0a1dd7b8370a699ee5d894d6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 16:36:56 +0100
Subject: [PATCH 161/340] [extractors] Remove VLivePlaylistIE import

---
 youtube_dl/extractor/extractors.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 9d7fecfe8..302ce6be4 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1357,7 +1357,6 @@ from .vk import (
 from .vlive import (
     VLiveIE,
     VLiveChannelIE,
-    VLivePlaylistIE
 )
 from .vodlocker import VodlockerIE
 from .vodpl import VODPlIE

From fe13087cd152c7eb98ea418bad0419a15c767c08 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 17:17:52 +0100
Subject: [PATCH 162/340] [rai] fix RaiPlay extraction

---
 youtube_dl/extractor/rai.py | 61 ++++++++++++++++++++-----------------
 1 file changed, 33 insertions(+), 28 deletions(-)

diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index 207a6c247..bee2d53f5 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 
 import re
@@ -16,7 +17,6 @@ from ..utils import (
     int_or_none,
     parse_duration,
     strip_or_none,
-    try_get,
     unescapeHTML,
     unified_strdate,
     unified_timestamp,
@@ -141,6 +141,7 @@ class RaiPlayIE(RaiBaseIE):
             'series': 'La Casa Bianca',
             'season': '2016',
         },
+        'skip': 'This content is not available',
     }, {
         'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
         'md5': '8970abf8caf8aef4696e7b1f2adfc696',
@@ -148,14 +149,12 @@ class RaiPlayIE(RaiBaseIE):
             'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
             'ext': 'mp4',
             'title': 'Report del 07/04/2014',
-            'alt_title': 'S2013/14 - Puntata del 07/04/2014',
-            'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
+            'alt_title': 'St 2013/14 - Espresso nel caffè - 07/04/2014',
+            'description': 'md5:d730c168a58f4bb35600fc2f881ec04e',
             'thumbnail': r're:^https?://.*\.jpg$',
-            'uploader': 'Rai 5',
-            'creator': 'Rai 5',
+            'uploader': 'Rai Gulp',
             'duration': 6160,
             'series': 'Report',
-            'season_number': 5,
             'season': '2013/14',
         },
         'params': {
@@ -167,48 +166,51 @@ class RaiPlayIE(RaiBaseIE):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        url, video_id = mobj.group('url', 'id')
+        url, video_id = re.match(self._VALID_URL, url).groups()
 
         media = self._download_json(
-            '%s?json' % url, video_id, 'Downloading video JSON')
+            url.replace('.html', '.json'), video_id, 'Downloading video JSON')
 
         title = media['name']
 
         video = media['video']
 
-        relinker_info = self._extract_relinker_info(video['contentUrl'], video_id)
+        relinker_info = self._extract_relinker_info(video['content_url'], video_id)
         self._sort_formats(relinker_info['formats'])
 
         thumbnails = []
-        if 'images' in media:
-            for _, value in media.get('images').items():
-                if value:
-                    thumbnails.append({
-                        'url': value.replace('[RESOLUTION]', '600x400')
-                    })
+        for _, value in media.get('images', {}).items():
+            if value:
+                thumbnails.append({
+                    'url': urljoin(url, value),
+                })
 
-        timestamp = unified_timestamp(try_get(
-            media, lambda x: x['availabilities'][0]['start'], compat_str))
+        date_published = media.get('date_published')
+        time_published = media.get('time_published')
+        if date_published and time_published:
+            date_published += ' ' + time_published
 
         subtitles = self._extract_subtitles(url, video.get('subtitles'))
 
+        program_info = media.get('program_info') or {}
+        season = media.get('season')
+
         info = {
             'id': video_id,
             'title': self._live_title(title) if relinker_info.get(
                 'is_live') else title,
-            'alt_title': media.get('subtitle'),
+            'alt_title': strip_or_none(media.get('subtitle')),
             'description': media.get('description'),
             'uploader': strip_or_none(media.get('channel')),
-            'creator': strip_or_none(media.get('editor')),
+            'creator': strip_or_none(media.get('editor') or None),
             'duration': parse_duration(video.get('duration')),
-            'timestamp': timestamp,
+            'timestamp': unified_timestamp(date_published),
             'thumbnails': thumbnails,
-            'series': try_get(
-                media, lambda x: x['isPartOf']['name'], compat_str),
-            'season_number': int_or_none(try_get(
-                media, lambda x: x['isPartOf']['numeroStagioni'])),
-            'season': media.get('stagione') or None,
+            'series': program_info.get('name'),
+            'season_number': int_or_none(season),
+            'season': season if (season and not season.isdigit()) else None,
+            'episode': media.get('episode_title'),
+            'episode_number': int_or_none(media.get('episode')),
             'subtitles': subtitles,
         }
 
@@ -300,7 +302,8 @@ class RaiIE(RaiBaseIE):
             'thumbnail': r're:^https?://.*\.jpg$',
             'duration': 1758,
             'upload_date': '20140612',
-        }
+        },
+        'skip': 'This content is available only in Italy',
     }, {
         # with ContentItem in many metas
         'url': 'http://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html',
@@ -316,7 +319,7 @@ class RaiIE(RaiBaseIE):
     }, {
         # with ContentItem in og:url
         'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html',
-        'md5': '11959b4e44fa74de47011b5799490adf',
+        'md5': '6865dd00cf0bbf5772fdd89d59bd768a',
         'info_dict': {
             'id': 'efb17665-691c-45d5-a60c-5301333cbb0c',
             'ext': 'mp4',
@@ -338,6 +341,7 @@ class RaiIE(RaiBaseIE):
             'thumbnail': r're:^https?://.*\.jpg$',
             'upload_date': '20141221',
         },
+        'skip': 'This content is not available',
     }, {
         # initEdizione('ContentItem-...'
         'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined',
@@ -360,6 +364,7 @@ class RaiIE(RaiBaseIE):
         'params': {
             'skip_download': True,
         },
+        'skip': 'This content is available only in Italy',
     }, {
         # HLS live stream with ContentItem in og:url
         'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html',

From 3f1748b9445e9d9367d29221c4b7bf9b88895e4e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 17:26:09 +0100
Subject: [PATCH 163/340] [bandcamp] fix extraction

---
 youtube_dl/extractor/bandcamp.py | 149 ++++++++++++-------------------
 1 file changed, 58 insertions(+), 91 deletions(-)

diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
index f14b407dc..731c7c25c 100644
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -1,3 +1,4 @@
+# coding: utf-8
 from __future__ import unicode_literals
 
 import random
@@ -5,10 +6,7 @@ import re
 import time
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_str,
-    compat_urlparse,
-)
+from ..compat import compat_str
 from ..utils import (
     ExtractorError,
     float_or_none,
@@ -17,30 +15,32 @@ from ..utils import (
     parse_filesize,
     str_or_none,
     try_get,
-    unescapeHTML,
     update_url_query,
     unified_strdate,
     unified_timestamp,
     url_or_none,
+    urljoin,
 )
 
 
 class BandcampIE(InfoExtractor):
-    _VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
+    _VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<id>[^/?#&]+)'
     _TESTS = [{
         'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
         'md5': 'c557841d5e50261777a6585648adf439',
         'info_dict': {
             'id': '1812978515',
             'ext': 'mp3',
-            'title': "youtube-dl  \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
+            'title': "youtube-dl  \"'/\\ä↭ - youtube-dl  \"'/\\ä↭ - youtube-dl test song \"'/\\ä↭",
             'duration': 9.8485,
+            'uploader': 'youtube-dl  "\'/\\ä↭',
+            'upload_date': '20121129',
+            'timestamp': 1354224127,
         },
         '_skip': 'There is a limit of 200 free downloads / month for the test song'
     }, {
         # free download
         'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
-        'md5': '853e35bf34aa1d6fe2615ae612564b36',
         'info_dict': {
             'id': '2650410135',
             'ext': 'aiff',
@@ -79,11 +79,16 @@ class BandcampIE(InfoExtractor):
         },
     }]
 
+    def _extract_data_attr(self, webpage, video_id, attr='tralbum', fatal=True):
+        return self._parse_json(self._html_search_regex(
+            r'data-%s=(["\'])({.+?})\1' % attr, webpage,
+            attr + ' data', group=2), video_id, fatal=fatal)
+
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        title = mobj.group('title')
+        title = self._match_id(url)
         webpage = self._download_webpage(url, title)
-        thumbnail = self._html_search_meta('og:image', webpage, default=None)
+        tralbum = self._extract_data_attr(webpage, title)
+        thumbnail = self._og_search_thumbnail(webpage)
 
         track_id = None
         track = None
@@ -91,10 +96,7 @@ class BandcampIE(InfoExtractor):
         duration = None
 
         formats = []
-        track_info = self._parse_json(
-            self._search_regex(
-                r'trackinfo\s*:\s*\[\s*({.+?})\s*\]\s*,\s*?\n',
-                webpage, 'track info', default='{}'), title)
+        track_info = try_get(tralbum, lambda x: x['trackinfo'][0], dict)
         if track_info:
             file_ = track_info.get('file')
             if isinstance(file_, dict):
@@ -111,37 +113,25 @@ class BandcampIE(InfoExtractor):
                         'abr': int_or_none(abr_str),
                     })
             track = track_info.get('title')
-            track_id = str_or_none(track_info.get('track_id') or track_info.get('id'))
+            track_id = str_or_none(
+                track_info.get('track_id') or track_info.get('id'))
             track_number = int_or_none(track_info.get('track_num'))
             duration = float_or_none(track_info.get('duration'))
 
-        def extract(key):
-            return self._search_regex(
-                r'\b%s\s*["\']?\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % key,
-                webpage, key, default=None, group='value')
-
-        artist = extract('artist')
-        album = extract('album_title')
+        embed = self._extract_data_attr(webpage, title, 'embed', False)
+        current = tralbum.get('current') or {}
+        artist = embed.get('artist') or current.get('artist') or tralbum.get('artist')
         timestamp = unified_timestamp(
-            extract('publish_date') or extract('album_publish_date'))
-        release_date = unified_strdate(extract('album_release_date'))
+            current.get('publish_date') or tralbum.get('album_publish_date'))
 
-        download_link = self._search_regex(
-            r'freeDownloadPage\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
-            'download link', default=None, group='url')
+        download_link = tralbum.get('freeDownloadPage')
         if download_link:
-            track_id = self._search_regex(
-                r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
-                webpage, 'track id')
+            track_id = compat_str(tralbum['id'])
 
             download_webpage = self._download_webpage(
                 download_link, track_id, 'Downloading free downloads page')
 
-            blob = self._parse_json(
-                self._search_regex(
-                    r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
-                    'blob', group='blob'),
-                track_id, transform_source=unescapeHTML)
+            blob = self._extract_data_attr(download_webpage, track_id, 'blob')
 
             info = try_get(
                 blob, (lambda x: x['digital_items'][0],
@@ -207,20 +197,20 @@ class BandcampIE(InfoExtractor):
             'thumbnail': thumbnail,
             'uploader': artist,
             'timestamp': timestamp,
-            'release_date': release_date,
+            'release_date': unified_strdate(tralbum.get('album_release_date')),
             'duration': duration,
             'track': track,
             'track_number': track_number,
             'track_id': track_id,
             'artist': artist,
-            'album': album,
+            'album': embed.get('album_title'),
             'formats': formats,
         }
 
 
-class BandcampAlbumIE(InfoExtractor):
+class BandcampAlbumIE(BandcampIE):
     IE_NAME = 'Bandcamp:album'
-    _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
+    _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<id>[^/?#&]+))?'
 
     _TESTS = [{
         'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
@@ -230,7 +220,10 @@ class BandcampAlbumIE(InfoExtractor):
                 'info_dict': {
                     'id': '1353101989',
                     'ext': 'mp3',
-                    'title': 'Intro',
+                    'title': 'Blazo - Intro',
+                    'timestamp': 1311756226,
+                    'upload_date': '20110727',
+                    'uploader': 'Blazo',
                 }
             },
             {
@@ -238,7 +231,10 @@ class BandcampAlbumIE(InfoExtractor):
                 'info_dict': {
                     'id': '38097443',
                     'ext': 'mp3',
-                    'title': 'Kero One - Keep It Alive (Blazo remix)',
+                    'title': 'Blazo - Kero One - Keep It Alive (Blazo remix)',
+                    'timestamp': 1311757238,
+                    'upload_date': '20110727',
+                    'uploader': 'Blazo',
                 }
             },
         ],
@@ -294,41 +290,31 @@ class BandcampAlbumIE(InfoExtractor):
                 else super(BandcampAlbumIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        uploader_id = mobj.group('subdomain')
-        album_id = mobj.group('album_id')
+        uploader_id, album_id = re.match(self._VALID_URL, url).groups()
         playlist_id = album_id or uploader_id
         webpage = self._download_webpage(url, playlist_id)
-        track_elements = re.findall(
-            r'(?s)<div[^>]*>(.*?<a[^>]+href="([^"]+?)"[^>]+itemprop="url"[^>]*>.*?)</div>', webpage)
-        if not track_elements:
+        tralbum = self._extract_data_attr(webpage, playlist_id)
+        track_info = tralbum.get('trackinfo')
+        if not track_info:
             raise ExtractorError('The page doesn\'t contain any tracks')
         # Only tracks with duration info have songs
         entries = [
             self.url_result(
-                compat_urlparse.urljoin(url, t_path),
-                ie=BandcampIE.ie_key(),
-                video_title=self._search_regex(
-                    r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
-                    elem_content, 'track title', fatal=False))
-            for elem_content, t_path in track_elements
-            if self._html_search_meta('duration', elem_content, default=None)]
+                urljoin(url, t['title_link']), BandcampIE.ie_key(),
+                str_or_none(t.get('track_id') or t.get('id')), t.get('title'))
+            for t in track_info
+            if t.get('duration')]
 
-        title = self._html_search_regex(
-            r'album_title\s*:\s*"((?:\\.|[^"\\])+?)"',
-            webpage, 'title', fatal=False)
-        if title:
-            title = title.replace(r'\"', '"')
         return {
             '_type': 'playlist',
             'uploader_id': uploader_id,
             'id': playlist_id,
-            'title': title,
+            'title': try_get(tralbum, lambda x: x['current']['title'], compat_str),
             'entries': entries,
         }
 
 
-class BandcampWeeklyIE(InfoExtractor):
+class BandcampWeeklyIE(BandcampIE):
     IE_NAME = 'Bandcamp:weekly'
     _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
     _TESTS = [{
@@ -343,29 +329,23 @@ class BandcampWeeklyIE(InfoExtractor):
             'release_date': '20170404',
             'series': 'Bandcamp Weekly',
             'episode': 'Magic Moments',
-            'episode_number': 208,
             'episode_id': '224',
-        }
+        },
+        'params': {
+            'format': 'opus-lo',
+        },
     }, {
         'url': 'https://bandcamp.com/?blah/blah@&show=228',
         'only_matching': True
     }]
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
+        show_id = self._match_id(url)
+        webpage = self._download_webpage(url, show_id)
 
-        blob = self._parse_json(
-            self._search_regex(
-                r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
-                'blob', group='blob'),
-            video_id, transform_source=unescapeHTML)
+        blob = self._extract_data_attr(webpage, show_id, 'blob')
 
-        show = blob['bcw_show']
-
-        # This is desired because any invalid show id redirects to `bandcamp.com`
-        # which happens to expose the latest Bandcamp Weekly episode.
-        show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
+        show = blob['bcw_data'][show_id]
 
         formats = []
         for format_id, format_url in show['audio_stream'].items():
@@ -390,20 +370,8 @@ class BandcampWeeklyIE(InfoExtractor):
         if subtitle:
             title += ' - %s' % subtitle
 
-        episode_number = None
-        seq = blob.get('bcw_seq')
-
-        if seq and isinstance(seq, list):
-            try:
-                episode_number = next(
-                    int_or_none(e.get('episode_number'))
-                    for e in seq
-                    if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
-            except StopIteration:
-                pass
-
         return {
-            'id': video_id,
+            'id': show_id,
             'title': title,
             'description': show.get('desc') or show.get('short_desc'),
             'duration': float_or_none(show.get('audio_duration')),
@@ -411,7 +379,6 @@ class BandcampWeeklyIE(InfoExtractor):
             'release_date': unified_strdate(show.get('published_date')),
             'series': 'Bandcamp Weekly',
             'episode': show.get('subtitle'),
-            'episode_number': episode_number,
-            'episode_id': compat_str(video_id),
+            'episode_id': show_id,
             'formats': formats
         }

From 9448a203126105d6462299bddbe3a6a32bc017fd Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 18:57:33 +0100
Subject: [PATCH 164/340] [condenast] fix extraction and extract subtitles

---
 youtube_dl/extractor/condenast.py | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py
index ed278fefc..d5e77af32 100644
--- a/youtube_dl/extractor/condenast.py
+++ b/youtube_dl/extractor/condenast.py
@@ -16,6 +16,8 @@ from ..utils import (
     mimetype2ext,
     orderedSet,
     parse_iso8601,
+    strip_or_none,
+    try_get,
 )
 
 
@@ -82,6 +84,7 @@ class CondeNastIE(InfoExtractor):
             'uploader': 'gq',
             'upload_date': '20170321',
             'timestamp': 1490126427,
+            'description': 'How much grimmer would things be if these people were competent?',
         },
     }, {
         # JS embed
@@ -93,7 +96,7 @@ class CondeNastIE(InfoExtractor):
             'title': '3D printed TSA Travel Sentry keys really do open TSA locks',
             'uploader': 'arstechnica',
             'upload_date': '20150916',
-            'timestamp': 1442434955,
+            'timestamp': 1442434920,
         }
     }, {
         'url': 'https://player.cnevids.com/inline/video/59138decb57ac36b83000005.js?target=js-cne-player',
@@ -196,6 +199,13 @@ class CondeNastIE(InfoExtractor):
             })
         self._sort_formats(formats)
 
+        subtitles = {}
+        for t, caption in video_info.get('captions', {}).items():
+            caption_url = caption.get('src')
+            if not (t in ('vtt', 'srt', 'tml') and caption_url):
+                continue
+            subtitles.setdefault('en', []).append({'url': caption_url})
+
         return {
             'id': video_id,
             'formats': formats,
@@ -208,6 +218,7 @@ class CondeNastIE(InfoExtractor):
             'season': video_info.get('season_title'),
             'timestamp': parse_iso8601(video_info.get('premiere_date')),
             'categories': video_info.get('categories'),
+            'subtitles': subtitles,
         }
 
     def _real_extract(self, url):
@@ -225,8 +236,16 @@ class CondeNastIE(InfoExtractor):
         if url_type == 'series':
             return self._extract_series(url, webpage)
         else:
-            params = self._extract_video_params(webpage, display_id)
-            info = self._search_json_ld(
-                webpage, display_id, fatal=False)
+            video = try_get(self._parse_json(self._search_regex(
+                r'__PRELOADED_STATE__\s*=\s*({.+?});', webpage,
+                'preload state', '{}'), display_id),
+                lambda x: x['transformed']['video'])
+            if video:
+                params = {'videoId': video['id']}
+                info = {'description': strip_or_none(video.get('description'))}
+            else:
+                params = self._extract_video_params(webpage, display_id)
+                info = self._search_json_ld(
+                    webpage, display_id, fatal=False)
             info.update(self._extract_video(params))
             return info

From ec2a2ab44132e8000cf0a0a81b793c3ea5fc1903 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 19:04:55 +0100
Subject: [PATCH 165/340] [lbry] Add new extractor

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/lbry.py       | 85 ++++++++++++++++++++++++++++++
 2 files changed, 86 insertions(+)
 create mode 100644 youtube_dl/extractor/lbry.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 302ce6be4..b2baf8057 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -529,6 +529,7 @@ from .laola1tv import (
     EHFTVIE,
     ITTFIE,
 )
+from .lbry import LBRYIE
 from .lci import LCIIE
 from .lcp import (
     LcpPlayIE,
diff --git a/youtube_dl/extractor/lbry.py b/youtube_dl/extractor/lbry.py
new file mode 100644
index 000000000..587deac90
--- /dev/null
+++ b/youtube_dl/extractor/lbry.py
@@ -0,0 +1,85 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    determine_ext,
+    ExtractorError,
+    int_or_none,
+    mimetype2ext,
+    try_get,
+)
+
+
+class LBRYIE(InfoExtractor):
+    IE_NAME = 'lbry.tv'
+    _VALID_URL = r'https?://(?:www\.)?lbry\.tv/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
+    _TESTS = [{
+        # Video
+        'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
+        'md5': '65bd7ec1f6744ada55da8e4c48a2edf9',
+        'info_dict': {
+            'id': '17f983b61f53091fb8ea58a9c56804e4ff8cff4d',
+            'ext': 'mp4',
+            'title': 'First day in LBRY? Start HERE!',
+            'description': 'md5:f6cb5c704b332d37f5119313c2c98f51',
+            'timestamp': 1595694354,
+            'upload_date': '20200725',
+        }
+    }, {
+        # Audio
+        'url': 'https://lbry.tv/@LBRYFoundation:0/Episode-1:e',
+        'md5': 'c94017d3eba9b49ce085a8fad6b98d00',
+        'info_dict': {
+            'id': 'e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
+            'ext': 'mp3',
+            'title': 'The LBRY Foundation Community Podcast Episode 1 - Introduction, Streaming on LBRY, Transcoding',
+            'description': 'md5:661ac4f1db09f31728931d7b88807a61',
+            'timestamp': 1591312601,
+            'upload_date': '20200604',
+        }
+    }]
+
+    def _call_api_proxy(self, method, display_id, params):
+        return self._download_json(
+            'https://api.lbry.tv/api/v1/proxy', display_id,
+            headers={'Content-Type': 'application/json-rpc'},
+            data=json.dumps({
+                'method': method,
+                'params': params,
+            }).encode())['result']
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url).replace(':', '#')
+        uri = 'lbry://' + display_id
+        result = self._call_api_proxy(
+            'resolve', display_id, {'urls': [uri]})[uri]
+        result_value = result['value']
+        if result_value.get('stream_type') not in ('video', 'audio'):
+            raise ExtractorError('Unsupported URL', expected=True)
+        streaming_url = self._call_api_proxy(
+            'get', display_id, {'uri': uri})['streaming_url']
+        source = result_value.get('source') or {}
+        media = result_value.get('video') or result_value.get('audio') or {}
+        signing_channel = result_value.get('signing_channel') or {}
+
+        return {
+            'id': result['claim_id'],
+            'title': result_value['title'],
+            'thumbnail': try_get(result_value, lambda x: x['thumbnail']['url'], compat_str),
+            'description': result_value.get('description'),
+            'license': result_value.get('license'),
+            'timestamp': int_or_none(result.get('timestamp')),
+            'tags': result_value.get('tags'),
+            'width': int_or_none(media.get('width')),
+            'height': int_or_none(media.get('height')),
+            'duration': int_or_none(media.get('duration')),
+            'channel': signing_channel.get('name'),
+            'channel_id': signing_channel.get('claim_id'),
+            'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
+            'filesize': int_or_none(source.get('size')),
+            'url': streaming_url,
+        }

From 6d3bdcf2177ce75f8f95731186a4794412b9776d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 19:17:10 +0100
Subject: [PATCH 166/340] [lrt] fix extraction

---
 youtube_dl/extractor/lrt.py | 91 +++++++++++++++----------------------
 1 file changed, 36 insertions(+), 55 deletions(-)

diff --git a/youtube_dl/extractor/lrt.py b/youtube_dl/extractor/lrt.py
index f5c997ef4..a89434adb 100644
--- a/youtube_dl/extractor/lrt.py
+++ b/youtube_dl/extractor/lrt.py
@@ -5,28 +5,26 @@ import re
 
 from .common import InfoExtractor
 from ..utils import (
-    determine_ext,
-    int_or_none,
-    parse_duration,
-    remove_end,
+    clean_html,
+    merge_dicts,
 )
 
 
 class LRTIE(InfoExtractor):
     IE_NAME = 'lrt.lt'
-    _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
+    _VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
     _TESTS = [{
         # m3u8 download
-        'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
-        'md5': 'fe44cf7e4ab3198055f2c598fc175cb0',
+        'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
+        'md5': '85cb2bb530f31d91a9c65b479516ade4',
         'info_dict': {
-            'id': '54391',
+            'id': '2000127261',
             'ext': 'mp4',
-            'title': 'Septynios Kauno dienos',
-            'description': 'md5:24d84534c7dc76581e59f5689462411a',
-            'duration': 1783,
-            'view_count': int,
-            'like_count': int,
+            'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
+            'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
+            'duration': 3035,
+            'timestamp': 1604079000,
+            'upload_date': '20201030',
         },
     }, {
         # direct mp3 download
@@ -43,52 +41,35 @@ class LRTIE(InfoExtractor):
         },
     }]
 
+    def _extract_js_var(self, webpage, var_name, default):
+        return self._search_regex(
+            r'%s\s*=\s*(["\'])((?:(?!\1).)+)\1' % var_name,
+            webpage, var_name.replace('_', ' '), default, group=2)
+
     def _real_extract(self, url):
-        video_id = self._match_id(url)
+        path, video_id = re.match(self._VALID_URL, url).groups()
         webpage = self._download_webpage(url, video_id)
 
-        title = remove_end(self._og_search_title(webpage), ' - LRT')
+        media_url = self._extract_js_var(webpage, 'main_url', path)
+        media = self._download_json(self._extract_js_var(
+            webpage, 'media_info_url',
+            'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
+            video_id, query={'url': media_url})
+        jw_data = self._parse_jwplayer_data(
+            media['playlist_item'], video_id, base_url=url)
 
-        formats = []
-        for _, file_url in re.findall(
-                r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage):
-            ext = determine_ext(file_url)
-            if ext not in ('m3u8', 'mp3'):
+        json_ld_data = self._search_json_ld(webpage, video_id)
+
+        tags = []
+        for tag in media.get('tags', []):
+            tag_name = tag.get('name')
+            if not tag_name:
                 continue
-            # mp3 served as m3u8 produces stuttered media file
-            if ext == 'm3u8' and '.mp3' in file_url:
-                continue
-            if ext == 'm3u8':
-                formats.extend(self._extract_m3u8_formats(
-                    file_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                    fatal=False))
-            elif ext == 'mp3':
-                formats.append({
-                    'url': file_url,
-                    'vcodec': 'none',
-                })
-        self._sort_formats(formats)
+            tags.append(tag_name)
 
-        thumbnail = self._og_search_thumbnail(webpage)
-        description = self._og_search_description(webpage)
-        duration = parse_duration(self._search_regex(
-            r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1',
-            webpage, 'duration', default=None, group='duration'))
-
-        view_count = int_or_none(self._html_search_regex(
-            r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>',
-            webpage, 'view count', fatal=False, group='count'))
-        like_count = int_or_none(self._search_regex(
-            r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<',
-            webpage, 'like count', fatal=False, group='count'))
-
-        return {
-            'id': video_id,
-            'title': title,
-            'formats': formats,
-            'thumbnail': thumbnail,
-            'description': description,
-            'duration': duration,
-            'view_count': view_count,
-            'like_count': like_count,
+        clean_info = {
+            'description': clean_html(media.get('content')),
+            'tags': tags,
         }
+
+        return merge_dicts(clean_info, jw_data, json_ld_data)

From fe07e788bf7718d429d6fc7e4bcb0c761ffd2cfc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 01:30:43 +0700
Subject: [PATCH 167/340] [utils] Skip ! prefixed code in js_to_json

---
 test/test_utils.py  | 22 ++++++++++++++++++++++
 youtube_dl/utils.py |  5 +++--
 2 files changed, 25 insertions(+), 2 deletions(-)

diff --git a/test/test_utils.py b/test/test_utils.py
index c2d1e4fb1..925a21d34 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -937,6 +937,28 @@ class TestUtil(unittest.TestCase):
         self.assertEqual(d['x'], 1)
         self.assertEqual(d['y'], 'a')
 
+        # Just drop ! prefix for now though this results in a wrong value
+        on = js_to_json('''{
+            a: !0,
+            b: !1,
+            c: !!0,
+            d: !!42.42,
+            e: !!![],
+            f: !"abc",
+            g: !"",
+            !42: 42
+        }''')
+        self.assertEqual(json.loads(on), {
+            'a': 0,
+            'b': 1,
+            'c': 0,
+            'd': 42.42,
+            'e': [],
+            'f': "abc",
+            'g': "",
+            '42': 42
+        })
+
         on = js_to_json('["abc", "def",]')
         self.assertEqual(json.loads(on), ['abc', 'def'])
 
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 737e2810e..321f903ab 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -4078,7 +4078,7 @@ def js_to_json(code):
         v = m.group(0)
         if v in ('true', 'false', 'null'):
             return v
-        elif v.startswith('/*') or v.startswith('//') or v == ',':
+        elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
             return ""
 
         if v[0] in ("'", '"'):
@@ -4103,7 +4103,8 @@ def js_to_json(code):
         {comment}|,(?={skip}[\]}}])|
         (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
         \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
-        [0-9]+(?={skip}:)
+        [0-9]+(?={skip}:)|
+        !+
         '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
 
 

From 2e7fa18bb96dac31bda754f9f7f0ab21a6513166 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 01:32:48 +0700
Subject: [PATCH 168/340] [xtube] Fix extraction (closes #26996)

---
 youtube_dl/extractor/xtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py
index 01b253dcb..18969058f 100644
--- a/youtube_dl/extractor/xtube.py
+++ b/youtube_dl/extractor/xtube.py
@@ -90,7 +90,7 @@ class XTubeIE(InfoExtractor):
         title, thumbnail, duration = [None] * 3
 
         config = self._parse_json(self._search_regex(
-            r'playerConf\s*=\s*({.+?})\s*,\s*\n', webpage, 'config',
+            r'playerConf\s*=\s*({.+?})\s*,\s*(?:\n|loaderConf)', webpage, 'config',
             default='{}'), video_id, transform_source=js_to_json, fatal=False)
         if config:
             config = config.get('mainRoll')

From a80b23c373202b35a2b00ba2455d0e2fcf8d366f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 02:57:26 +0700
Subject: [PATCH 169/340] [servus] Fix extraction (closes #26872, closes
 #26967, closes #26983, closes #27000)

---
 youtube_dl/extractor/servus.py | 106 ++++++++++++++++++++++++++++-----
 1 file changed, 91 insertions(+), 15 deletions(-)

diff --git a/youtube_dl/extractor/servus.py b/youtube_dl/extractor/servus.py
index 9401bf2cf..206bc1801 100644
--- a/youtube_dl/extractor/servus.py
+++ b/youtube_dl/extractor/servus.py
@@ -1,9 +1,15 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    float_or_none,
+    int_or_none,
+    unified_timestamp,
+    urlencode_postdata,
+    url_or_none,
+)
 
 
 class ServusIE(InfoExtractor):
@@ -19,13 +25,22 @@ class ServusIE(InfoExtractor):
     _TESTS = [{
         # new URL schema
         'url': 'https://www.servustv.com/videos/aa-1t6vbu5pw1w12/',
-        'md5': '3e1dd16775aa8d5cbef23628cfffc1f4',
+        'md5': '60474d4c21f3eb148838f215c37f02b9',
         'info_dict': {
             'id': 'AA-1T6VBU5PW1W12',
             'ext': 'mp4',
             'title': 'Die Grünen aus Sicht des Volkes',
+            'alt_title': 'Talk im Hangar-7 Voxpops Gruene',
             'description': 'md5:1247204d85783afe3682644398ff2ec4',
             'thumbnail': r're:^https?://.*\.jpg',
+            'duration': 62.442,
+            'timestamp': 1605193976,
+            'upload_date': '20201112',
+            'series': 'Talk im Hangar-7',
+            'season': 'Season 9',
+            'season_number': 9,
+            'episode': 'Episode 31 - September 14',
+            'episode_number': 31,
         }
     }, {
         # old URL schema
@@ -44,26 +59,87 @@ class ServusIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url).upper()
-        webpage = self._download_webpage(url, video_id)
 
-        title = self._search_regex(
-            (r'videoLabel\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
-             r'<h\d+[^>]+\bclass=["\']heading--(?:one|two)["\'][^>]*>(?P<title>[^<]+)'),
-            webpage, 'title', default=None,
-            group='title') or self._og_search_title(webpage)
-        title = re.sub(r'\s*-\s*Servus TV\s*$', '', title)
-        description = self._og_search_description(webpage)
-        thumbnail = self._og_search_thumbnail(webpage)
+        token = self._download_json(
+            'https://auth.redbullmediahouse.com/token', video_id,
+            'Downloading token', data=urlencode_postdata({
+                'grant_type': 'client_credentials',
+            }), headers={
+                'Authorization': 'Basic SVgtMjJYNEhBNFdEM1cxMTpEdDRVSkFLd2ZOMG5IMjB1NGFBWTBmUFpDNlpoQ1EzNA==',
+            })
+        access_token = token['access_token']
+        token_type = token.get('token_type', 'Bearer')
 
-        formats = self._extract_m3u8_formats(
-            'https://stv.rbmbtnx.net/api/v1/manifests/%s.m3u8' % video_id,
-            video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
+        video = self._download_json(
+            'https://sparkle-api.liiift.io/api/v1/stv/channels/international/assets/%s' % video_id,
+            video_id, 'Downloading video JSON', headers={
+                'Authorization': '%s %s' % (token_type, access_token),
+            })
+
+        formats = []
+        thumbnail = None
+        for resource in video['resources']:
+            if not isinstance(resource, dict):
+                continue
+            format_url = url_or_none(resource.get('url'))
+            if not format_url:
+                continue
+            extension = resource.get('extension')
+            type_ = resource.get('type')
+            if extension == 'jpg' or type_ == 'reference_keyframe':
+                thumbnail = format_url
+                continue
+            ext = determine_ext(format_url)
+            if type_ == 'dash' or ext == 'mpd':
+                formats.extend(self._extract_mpd_formats(
+                    format_url, video_id, mpd_id='dash', fatal=False))
+            elif type_ == 'hls' or ext == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id='hls', fatal=False))
+            elif extension == 'mp4' or ext == 'mp4':
+                formats.append({
+                    'url': format_url,
+                    'format_id': type_,
+                    'width': int_or_none(resource.get('width')),
+                    'height': int_or_none(resource.get('height')),
+                })
         self._sort_formats(formats)
 
+        attrs = {}
+        for attribute in video['attributes']:
+            if not isinstance(attribute, dict):
+                continue
+            key = attribute.get('fieldKey')
+            value = attribute.get('fieldValue')
+            if not key or not value:
+                continue
+            attrs[key] = value
+
+        title = attrs.get('title_stv') or video_id
+        alt_title = attrs.get('title')
+        description = attrs.get('long_description') or attrs.get('short_description')
+        series = attrs.get('label')
+        season = attrs.get('season')
+        episode = attrs.get('chapter')
+        duration = float_or_none(attrs.get('duration'), scale=1000)
+        season_number = int_or_none(self._search_regex(
+            r'Season (\d+)', season or '', 'season number', default=None))
+        episode_number = int_or_none(self._search_regex(
+            r'Episode (\d+)', episode or '', 'episode number', default=None))
+
         return {
             'id': video_id,
             'title': title,
+            'alt_title': alt_title,
             'description': description,
             'thumbnail': thumbnail,
+            'duration': duration,
+            'timestamp': unified_timestamp(video.get('lastPublished')),
+            'series': series,
+            'season': season,
+            'season_number': season_number,
+            'episode': episode,
+            'episode_number': episode_number,
             'formats': formats,
         }

From f4093b34f6ae8f8f3603b20270232950d52933ae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 03:02:07 +0700
Subject: [PATCH 170/340] [servus] Add support for pm-wissen.com (closes
 #25869)

---
 youtube_dl/extractor/servus.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/servus.py b/youtube_dl/extractor/servus.py
index 206bc1801..1610ddc2c 100644
--- a/youtube_dl/extractor/servus.py
+++ b/youtube_dl/extractor/servus.py
@@ -18,7 +18,7 @@ class ServusIE(InfoExtractor):
                         (?:www\.)?
                         (?:
                             servus\.com/(?:(?:at|de)/p/[^/]+|tv/videos)|
-                            servustv\.com/videos
+                            (?:servustv|pm-wissen)\.com/videos
                         )
                         /(?P<id>[aA]{2}-\w+|\d+-\d+)
                     '''
@@ -55,6 +55,9 @@ class ServusIE(InfoExtractor):
     }, {
         'url': 'https://www.servus.com/tv/videos/1380889096408-1235196658/',
         'only_matching': True,
+    }, {
+        'url': 'https://www.pm-wissen.com/videos/aa-24mus4g2w2112/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From 11f3471c4be16d0f848c72a4b4915f5f81d4f337 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 03:24:23 +0700
Subject: [PATCH 171/340] [ndr:embed:base] Extract subtitles (closes #25447,
 closes #26106)

---
 youtube_dl/extractor/ndr.py | 38 +++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py
index 2447c812e..ddd828d92 100644
--- a/youtube_dl/extractor/ndr.py
+++ b/youtube_dl/extractor/ndr.py
@@ -81,6 +81,29 @@ class NDRIE(NDRBaseIE):
         'params': {
             'skip_download': True,
         },
+    }, {
+        # with subtitles
+        'url': 'https://www.ndr.de/fernsehen/sendungen/extra_3/extra-3-Satiremagazin-mit-Christian-Ehring,sendung1091858.html',
+        'info_dict': {
+            'id': 'extra18674',
+            'display_id': 'extra-3-Satiremagazin-mit-Christian-Ehring',
+            'ext': 'mp4',
+            'title': 'Extra 3 vom 11.11.2020 mit Christian Ehring',
+            'description': 'md5:42ee53990a715eaaf4dc7f13a3bd56c6',
+            'uploader': 'ndrtv',
+            'upload_date': '20201113',
+            'duration': 1749,
+            'subtitles': {
+                'de': [{
+                    'ext': 'ttml',
+                    'url': r're:^https://www\.ndr\.de.+',
+                }],
+            },
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'expected_warnings': ['Unable to download f4m manifest'],
     }, {
         'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html',
         'only_matching': True,
@@ -239,6 +262,20 @@ class NDREmbedBaseIE(InfoExtractor):
                 'preference': quality_key(thumbnail.get('quality')),
             })
 
+        subtitles = {}
+        tracks = config.get('tracks')
+        if tracks and isinstance(tracks, list):
+            for track in tracks:
+                if not isinstance(track, dict):
+                    continue
+                track_url = urljoin(url, track.get('src'))
+                if not track_url:
+                    continue
+                subtitles.setdefault(track.get('srclang') or 'de', []).append({
+                    'url': track_url,
+                    'ext': 'ttml',
+                })
+
         return {
             'id': video_id,
             'title': title,
@@ -248,6 +285,7 @@ class NDREmbedBaseIE(InfoExtractor):
             'duration': duration,
             'thumbnails': thumbnails,
             'formats': formats,
+            'subtitles': subtitles,
         }
 
 

From 91dcde8a381d1c442e5b56fa1d3652fdd3f4496d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 16 Nov 2020 21:27:51 +0100
Subject: [PATCH 172/340] [lrt] fix extraction with empty tags(closes #20264)

---
 youtube_dl/extractor/lrt.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/lrt.py b/youtube_dl/extractor/lrt.py
index a89434adb..89d549858 100644
--- a/youtube_dl/extractor/lrt.py
+++ b/youtube_dl/extractor/lrt.py
@@ -61,7 +61,7 @@ class LRTIE(InfoExtractor):
         json_ld_data = self._search_json_ld(webpage, video_id)
 
         tags = []
-        for tag in media.get('tags', []):
+        for tag in (media.get('tags') or []):
             tag_name = tag.get('name')
             if not tag_name:
                 continue

From 6699b6ce41ca04d1d8782b943dfacfa41e7102eb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 03:40:02 +0700
Subject: [PATCH 173/340] [youtube:tab] Fix extraction with cookies provided
 (closes #27005)

---
 youtube_dl/extractor/youtube.py | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 248682a41..22af03832 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2723,7 +2723,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                 'itct': ctp,
             }
 
-    def _entries(self, tab):
+    def _entries(self, tab, identity_token):
         continuation = None
         slr_contents = tab['sectionListRenderer']['contents']
         for slr_content in slr_contents:
@@ -2768,16 +2768,20 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             if not continuation:
                 continuation = self._extract_continuation(is_renderer)
 
+        headers = {
+            'x-youtube-client-name': '1',
+            'x-youtube-client-version': '2.20201112.04.01',
+        }
+        if identity_token:
+            headers['x-youtube-identity-token'] = identity_token
+
         for page_num in itertools.count(1):
             if not continuation:
                 break
             browse = self._download_json(
                 'https://www.youtube.com/browse_ajax', None,
                 'Downloading page %d' % page_num,
-                headers={
-                    'x-youtube-client-name': '1',
-                    'x-youtube-client-version': '2.20201030.01.00',
-                }, query=continuation, fatal=False)
+                headers=headers, query=continuation, fatal=False)
             if not browse:
                 break
             response = try_get(browse, lambda x: x[1]['response'], dict)
@@ -2848,8 +2852,11 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         title = channel_title or channel_id
         if tab_title:
             title += ' - %s' % tab_title
+        identity_token = self._search_regex(
+            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+            'identity token', default=None)
         return self.playlist_result(
-            self._entries(selected_tab['content']),
+            self._entries(selected_tab['content'], identity_token),
             playlist_id=channel_external_id or channel_id,
             playlist_title=title)
 

From e3cad6bd99f32ef5f146d803b60be182d34a83b8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 03:50:12 +0700
Subject: [PATCH 174/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 1ef7ea7b6..65aa524a1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,27 @@
+version <unreleased>
+
+Core
+* [utils] Skip ! prefixed code in js_to_json
+
+Extractors
+* [youtube:tab] Fix extraction with cookies provided (#27005)
+* [lrt] Fix extraction with empty tags (#20264)
++ [ndr:embed:base] Extract subtitles (#25447, #26106)
++ [servus] Add support for pm-wissen.com (#25869)
+* [servus] Fix extraction (#26872, #26967, #26983, #27000)
+* [xtube] Fix extraction (#26996)
+* [lrt] Fix extraction
++ [lbry] Add support for lbry.tv
++ [condenast] Extract subtitles
+* [condenast] Fix extraction
+* [bandcamp] Fix extraction (#26681, #26684)
+* [rai] Fix RaiPlay extraction (#26064, #26096)
+* [vlive] Fix extraction
+* [usanetwork] Fix extraction
+* [nbc] Fix NBCNews/Today/MSNBC extraction
+* [cnbc] Fix extraction
+
+
 version 2020.11.12
 
 Extractors

From b92e95aa0159f114f1066fbbefed128d8c850c60 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 03:59:54 +0700
Subject: [PATCH 175/340] release 2020.11.17

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 2 +-
 youtube_dl/version.py                            | 2 +-
 8 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index a10c9fd83..80baffa2a 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.12
+ [debug] youtube-dl version 2020.11.17
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 9cc120d3e..ee4215296 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 29bd5f5ac..4c3834fa5 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index cc33a993f..3ad3e7409 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.12
+ [debug] youtube-dl version 2020.11.17
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index cd577ecef..aabbfe83c 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.12. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.12**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 65aa524a1..254d0ef1b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.17
 
 Core
 * [utils] Skip ! prefixed code in js_to_json
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 0c77d017e..86b5ad726 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -417,6 +417,7 @@
  - **la7.it**
  - **laola1tv**
  - **laola1tv:embed**
+ - **lbry.tv**
  - **LCI**
  - **Lcp**
  - **LcpPlay**
@@ -1042,7 +1043,6 @@
  - **vk:wallpost**
  - **vlive**
  - **vlive:channel**
- - **vlive:playlist**
  - **Vodlocker**
  - **VODPl**
  - **VODPlatform**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 04cd207ab..ed18392a0 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.12'
+__version__ = '2020.11.17'

From 7d509c613ba66492ede723188d4254bb1427f4a2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 04:28:35 +0700
Subject: [PATCH 176/340] [youtube] Fix chapters extraction (closes #26005)

---
 youtube_dl/extractor/youtube.py | 16 +++-------------
 1 file changed, 3 insertions(+), 13 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 22af03832..1a395b6e1 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1465,21 +1465,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
     def _extract_chapters_from_json(self, webpage, video_id, duration):
         if not webpage:
             return
-        player = self._parse_json(
-            self._search_regex(
-                r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
-                'player args', default='{}'),
-            video_id, fatal=False)
-        if not player or not isinstance(player, dict):
-            return
-        watch_next_response = player.get('watch_next_response')
-        if not isinstance(watch_next_response, compat_str):
-            return
-        response = self._parse_json(watch_next_response, video_id, fatal=False)
-        if not response or not isinstance(response, dict):
+        data = self._extract_yt_initial_data(video_id, webpage)
+        if not data or not isinstance(data, dict):
             return
         chapters_list = try_get(
-            response,
+            data,
             lambda x: x['playerOverlays']
                        ['playerOverlayRenderer']
                        ['decoratedPlayerBarRenderer']

From f8c749f12c3b88fc97b4d2a2d9934483589b50a3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 17 Nov 2020 07:01:41 +0700
Subject: [PATCH 177/340] [youtube:tab] Fix playlist title extraction (closes
 #27015)

---
 youtube_dl/extractor/youtube.py | 36 +++++++++++++++++++--------------
 1 file changed, 21 insertions(+), 15 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1a395b6e1..9333e48e4 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2825,30 +2825,36 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             raise ExtractorError('Unable to find selected tab')
 
     def _real_extract(self, url):
-        channel_id = self._match_id(url)
+        item_id = self._match_id(url)
         url = compat_urlparse.urlunparse(
             compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
-        webpage = self._download_webpage(url, channel_id)
-        data = self._extract_yt_initial_data(channel_id, webpage)
+        webpage = self._download_webpage(url, item_id)
+        data = self._extract_yt_initial_data(item_id, webpage)
         tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
         selected_tab = self._extract_selected_tab(tabs)
-        channel_title = try_get(
-            data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
-            compat_str)
-        channel_external_id = try_get(
-            data, lambda x: x['metadata']['channelMetadataRenderer']['externalId'],
-            compat_str)
-        tab_title = selected_tab.get('title')
-        title = channel_title or channel_id
-        if tab_title:
-            title += ' - %s' % tab_title
+        renderer = try_get(
+            data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
+        if renderer:
+            channel_title = renderer.get('title') or item_id
+            tab_title = selected_tab.get('title')
+            title = channel_title or item_id
+            if tab_title:
+                title += ' - %s' % tab_title
+            description = renderer.get('description')
+            playlist_id = renderer.get('externalId')
+        renderer = try_get(
+            data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+        if renderer:
+            title = renderer.get('title')
+            description = None
+            playlist_id = item_id
         identity_token = self._search_regex(
             r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
             'identity token', default=None)
         return self.playlist_result(
             self._entries(selected_tab['content'], identity_token),
-            playlist_id=channel_external_id or channel_id,
-            playlist_title=title)
+            playlist_id=playlist_id, playlist_title=title,
+            playlist_description=description)
 
 
 class YoutubePlaylistIE(InfoExtractor):

From 5b867c15a8443c79e1521053c19db3ae5f679625 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 17 Nov 2020 13:11:35 +0100
Subject: [PATCH 178/340] [urplay] fix extraction(closes #26828)

---
 youtube_dl/extractor/urplay.py | 77 ++++++++++++++++++++++------------
 1 file changed, 51 insertions(+), 26 deletions(-)

diff --git a/youtube_dl/extractor/urplay.py b/youtube_dl/extractor/urplay.py
index 6030b7cb5..10b817760 100644
--- a/youtube_dl/extractor/urplay.py
+++ b/youtube_dl/extractor/urplay.py
@@ -2,7 +2,11 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import unified_timestamp
+from ..utils import (
+    dict_get,
+    int_or_none,
+    unified_timestamp,
+)
 
 
 class URPlayIE(InfoExtractor):
@@ -15,8 +19,8 @@ class URPlayIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'UR Samtiden - Livet, universum och rymdens märkliga musik : Om vetenskap, kritiskt tänkande och motstånd',
             'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a',
-            'timestamp': 1513512768,
-            'upload_date': '20171217',
+            'timestamp': 1513292400,
+            'upload_date': '20171214',
         },
     }, {
         'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde',
@@ -25,7 +29,7 @@ class URPlayIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Tripp, Trapp, Träd : Sovkudde',
             'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
-            'timestamp': 1440093600,
+            'timestamp': 1440086400,
             'upload_date': '20150820',
         },
     }, {
@@ -35,37 +39,58 @@ class URPlayIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
+        url = url.replace('skola.se/Produkter', 'play.se/program')
         webpage = self._download_webpage(url, video_id)
-        urplayer_data = self._parse_json(self._search_regex(
-            r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
-        host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
+        urplayer_data = self._parse_json(self._html_search_regex(
+            r'data-react-class="components/Player/Player"[^>]+data-react-props="({.+?})"',
+            webpage, 'urplayer data'), video_id)['currentProduct']
+        episode = urplayer_data['title']
+        raw_streaming_info = urplayer_data['streamingInfo']['raw']
+        host = self._download_json(
+            'http://streaming-loadbalancer.ur.se/loadbalancer.json',
+            video_id)['redirect']
 
         formats = []
-        for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)):
-            file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr)
+        for k, v in raw_streaming_info.items():
+            if not (k in ('sd', 'hd') and isinstance(v, dict)):
+                continue
+            file_http = v.get('location')
             if file_http:
                 formats.extend(self._extract_wowza_formats(
-                    'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['rtmp', 'rtsp']))
+                    'http://%s/%splaylist.m3u8' % (host, file_http),
+                    video_id, skip_protocols=['f4m', 'rtmp', 'rtsp']))
         self._sort_formats(formats)
 
-        subtitles = {}
-        for subtitle in urplayer_data.get('subtitles', []):
-            subtitle_url = subtitle.get('file')
-            kind = subtitle.get('kind')
-            if not subtitle_url or (kind and kind != 'captions'):
-                continue
-            subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
-                'url': subtitle_url,
-            })
+        image = urplayer_data.get('image') or {}
+        thumbnails = []
+        for k, v in image.items():
+            t = {
+                'id': k,
+                'url': v,
+            }
+            wh = k.split('x')
+            if len(wh) == 2:
+                t.update({
+                    'width': int_or_none(wh[0]),
+                    'height': int_or_none(wh[1]),
+                })
+            thumbnails.append(t)
+
+        series = urplayer_data.get('series') or {}
+        series_title = dict_get(series, ('seriesTitle', 'title')) or dict_get(urplayer_data, ('seriesTitle', 'mainTitle'))
 
         return {
             'id': video_id,
-            'title': urplayer_data['title'],
-            'description': self._og_search_description(webpage),
-            'thumbnail': urplayer_data.get('image'),
-            'timestamp': unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'), webpage, 'timestamp')),
-            'series': urplayer_data.get('series_title'),
-            'subtitles': subtitles,
+            'title': '%s : %s' % (series_title, episode) if series_title else episode,
+            'description': urplayer_data.get('description'),
+            'thumbnails': thumbnails,
+            'timestamp': unified_timestamp(urplayer_data.get('publishedAt')),
+            'series': series_title,
             'formats': formats,
+            'duration': int_or_none(urplayer_data.get('duration')),
+            'categories': urplayer_data.get('categories'),
+            'tags': urplayer_data.get('keywords'),
+            'season': series.get('label'),
+            'episode': episode,
+            'episode_number': int_or_none(urplayer_data.get('episodeNumber')),
         }

From cb2b9a22a5a53dd63f26db7509f4438a19261e36 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 17 Nov 2020 14:46:02 +0100
Subject: [PATCH 179/340] [bandcamp] extract playlist_description(closes
 #22684)

---
 youtube_dl/extractor/bandcamp.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
index 731c7c25c..69e673a26 100644
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -270,6 +270,7 @@ class BandcampAlbumIE(BandcampIE):
             'title': '"Entropy" EP',
             'uploader_id': 'jstrecords',
             'id': 'entropy-ep',
+            'description': 'md5:0ff22959c943622972596062f2f366a5',
         },
         'playlist_mincount': 3,
     }, {
@@ -279,6 +280,7 @@ class BandcampAlbumIE(BandcampIE):
             'id': 'we-are-the-plague',
             'title': 'WE ARE THE PLAGUE',
             'uploader_id': 'insulters',
+            'description': 'md5:b3cf845ee41b2b1141dc7bde9237255f',
         },
         'playlist_count': 2,
     }]
@@ -305,11 +307,14 @@ class BandcampAlbumIE(BandcampIE):
             for t in track_info
             if t.get('duration')]
 
+        current = tralbum.get('current') or {}
+
         return {
             '_type': 'playlist',
             'uploader_id': uploader_id,
             'id': playlist_id,
-            'title': try_get(tralbum, lambda x: x['current']['title'], compat_str),
+            'title': current.get('title'),
+            'description': current.get('about'),
             'entries': entries,
         }
 

From aa613ef7e1efe9f799a1209659f8d9d01e3de221 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 17 Nov 2020 19:13:38 +0100
Subject: [PATCH 180/340] [malltv] fix extraction(closes #27035)

---
 youtube_dl/extractor/malltv.py | 60 ++++++++++++++++++++++++++--------
 1 file changed, 46 insertions(+), 14 deletions(-)

diff --git a/youtube_dl/extractor/malltv.py b/youtube_dl/extractor/malltv.py
index 6f4fd927f..fadfd9338 100644
--- a/youtube_dl/extractor/malltv.py
+++ b/youtube_dl/extractor/malltv.py
@@ -1,10 +1,16 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import merge_dicts
+from ..utils import (
+    clean_html,
+    dict_get,
+    float_or_none,
+    int_or_none,
+    merge_dicts,
+    parse_duration,
+    try_get,
+)
 
 
 class MallTVIE(InfoExtractor):
@@ -17,7 +23,7 @@ class MallTVIE(InfoExtractor):
             'display_id': '18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
             'ext': 'mp4',
             'title': '18 miliard pro neziskovky. Opravdu jsou sportovci nebo Člověk v tísni pijavice?',
-            'description': 'md5:25fc0ec42a72ba602b602c683fa29deb',
+            'description': 'md5:db7d5744a4bd4043d9d98324aa72ab35',
             'duration': 216,
             'timestamp': 1538870400,
             'upload_date': '20181007',
@@ -37,20 +43,46 @@ class MallTVIE(InfoExtractor):
         webpage = self._download_webpage(
             url, display_id, headers=self.geo_verification_headers())
 
-        SOURCE_RE = r'(<source[^>]+\bsrc=(?:(["\'])(?:(?!\2).)+|[^\s]+)/(?P<id>[\da-z]+)/index)\b'
+        video = self._parse_json(self._search_regex(
+            r'videoObject\s*=\s*JSON\.parse\(JSON\.stringify\(({.+?})\)\);',
+            webpage, 'video object'), display_id)
+        video_source = video['VideoSource']
         video_id = self._search_regex(
-            SOURCE_RE, webpage, 'video id', group='id')
+            r'/([\da-z]+)/index\b', video_source, 'video id')
 
-        media = self._parse_html5_media_entries(
-            url, re.sub(SOURCE_RE, r'\1.m3u8', webpage), video_id,
-            m3u8_id='hls', m3u8_entry_protocol='m3u8_native')[0]
+        formats = self._extract_m3u8_formats(
+            video_source + '.m3u8', video_id, 'mp4', 'm3u8_native')
+        self._sort_formats(formats)
+
+        subtitles = {}
+        for s in (video.get('Subtitles') or {}):
+            s_url = s.get('Url')
+            if not s_url:
+                continue
+            subtitles.setdefault(s.get('Language') or 'cz', []).append({
+                'url': s_url,
+            })
+
+        entity_counts = video.get('EntityCounts') or {}
+
+        def get_count(k):
+            v = entity_counts.get(k + 's') or {}
+            return int_or_none(dict_get(v, ('Count', 'StrCount')))
 
         info = self._search_json_ld(webpage, video_id, default={})
 
-        return merge_dicts(media, info, {
+        return merge_dicts({
             'id': video_id,
             'display_id': display_id,
-            'title': self._og_search_title(webpage, default=None) or display_id,
-            'description': self._og_search_description(webpage, default=None),
-            'thumbnail': self._og_search_thumbnail(webpage, default=None),
-        })
+            'title': video.get('Title'),
+            'description': clean_html(video.get('Description')),
+            'thumbnail': video.get('ThumbnailUrl'),
+            'formats': formats,
+            'subtitles': subtitles,
+            'duration': int_or_none(video.get('DurationSeconds')) or parse_duration(video.get('Duration')),
+            'view_count': get_count('View'),
+            'like_count': get_count('Like'),
+            'dislike_count': get_count('Dislike'),
+            'average_rating': float_or_none(try_get(video, lambda x: x['EntityRating']['AvarageRate'])),
+            'comment_count': get_count('Comment'),
+        }, info)

From 284f8306dffb3dbeeca3f99ef4c32ed4fcd571c3 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 17 Nov 2020 20:32:50 +0100
Subject: [PATCH 181/340] [youtube:tab] fix view_count extraction(closes
 #27051)

---
 youtube_dl/extractor/youtube.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 9333e48e4..4089e2aba 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2543,8 +2543,8 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             renderer, lambda x: x['lengthText']['simpleText'], compat_str))
         view_count_text = try_get(
             renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or ''
-        view_count = int_or_none(self._search_regex(
-            r'^(\d+)', re.sub(r'\s', '', view_count_text),
+        view_count = str_to_int(self._search_regex(
+            r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
             'view count', default=None))
         uploader = try_get(
             renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)

From 2864179293ba16189544b28356647886960a48fe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 18 Nov 2020 03:32:42 +0700
Subject: [PATCH 182/340] [youtube] Improve extraction

+ Add support for --no-playlist (closes #27009)
* Improve playlist and mix extraction (closes #26390, closes #26509, closes #26534, closes #27011)
+ Extract playlist uploader data
* Update tests
---
 test/test_all_urls.py           |   9 +-
 youtube_dl/extractor/youtube.py | 229 +++++++++++++++++++-------------
 2 files changed, 142 insertions(+), 96 deletions(-)

diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index 348744028..56a08bed8 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -31,16 +31,17 @@ class TestAllURLsMatching(unittest.TestCase):
 
     def test_youtube_playlist_matching(self):
         assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
+        assertTab = lambda url: self.assertMatch(url, ['youtube:tab'])
         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
         assertPlaylist('PL63F0C78739B09958')
-        # assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
+        assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
         assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        # assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
-        assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
+        assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
+        assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')  # 668
         self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
         # Top tracks
-        # assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
+        assertTab('https://www.youtube.com/playlist?list=MCUS.20142101')
 
     def test_youtube_matching(self):
         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 4089e2aba..79f87aa85 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -45,6 +45,7 @@ from ..utils import (
     unescapeHTML,
     unified_strdate,
     unsmuggle_url,
+    update_url_query,
     uppercase_escape,
     url_or_none,
     urlencode_postdata,
@@ -65,7 +66,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
     # If True it will raise an error if no login info is provided
     _LOGIN_REQUIRED = False
 
-    _PLAYLIST_ID_RE = r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
+    _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
 
     _YOUTUBE_CLIENT_HEADERS = {
         'x-youtube-client-name': '1',
@@ -974,10 +975,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'url': 'sJL6WA-aGkQ',
             'only_matching': True,
         },
-        {
-            'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
-            'only_matching': True,
-        },
         {
             'url': 'https://invidio.us/watch?v=BaW_jenozKc',
             'only_matching': True,
@@ -2351,7 +2348,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
 class YoutubeTabIE(YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com tab'
-    _VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
+    _VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|(?:playlist|watch)\?.*?\blist=)(?P<id>[^/?#&]+)'
     IE_NAME = 'youtube:tab'
 
     _TESTS = [{
@@ -2361,6 +2358,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCqj7Cz7revf5maW9g5pgNcg',
             'title': 'Игорь Клейнер - Playlists',
+            'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
         },
     }, {
         # playlists, multipage, different order
@@ -2369,14 +2367,16 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCqj7Cz7revf5maW9g5pgNcg',
             'title': 'Игорь Клейнер - Playlists',
+            'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
         },
     }, {
         # playlists, singlepage
         'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
         'playlist_mincount': 4,
         'info_dict': {
-            'id': 'ThirstForScience',
-            'title': 'ThirstForScience',
+            'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
+            'title': 'ThirstForScience - Playlists',
+            'description': 'md5:609399d937ea957b0f53cbffb747a14c',
         }
     }, {
         'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
@@ -2407,6 +2407,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Home',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 2,
     }, {
@@ -2415,6 +2416,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Videos',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 975,
     }, {
@@ -2423,6 +2425,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Videos',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 199,
     }, {
@@ -2431,6 +2434,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Playlists',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 17,
     }, {
@@ -2439,6 +2443,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Community',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 18,
     }, {
@@ -2447,6 +2452,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Channels',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 138,
     }, {
@@ -2465,7 +2471,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             'title': '29C3: Not my department',
             'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
             'uploader': 'Christiaan008',
-            'uploader_id': 'ChRiStIaAn008',
+            'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
         },
         'playlist_count': 96,
     }, {
@@ -2475,7 +2481,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             'title': 'Uploads from Cauchemar',
             'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
             'uploader': 'Cauchemar',
-            'uploader_id': 'Cauchemar89',
+            'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
         },
         'playlist_mincount': 1123,
     }, {
@@ -2489,7 +2495,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             'title': 'Uploads from Interstellar Movie',
             'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
             'uploader': 'Interstellar Movie',
-            'uploader_id': 'InterstellarMovie1',
+            'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
         },
         'playlist_mincount': 21,
     }, {
@@ -2498,13 +2504,43 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'info_dict': {
             'title': 'Data Analysis with Dr Mike Pound',
             'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
-            'uploader_id': 'Computerphile',
+            'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
             'uploader': 'Computerphile',
         },
         'playlist_mincount': 11,
     }, {
         'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
         'only_matching': True,
+    }, {
+        # Playlist URL that does not actually serve a playlist
+        'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
+        'info_dict': {
+            'id': 'FqZTN594JQw',
+            'ext': 'webm',
+            'title': "Smiley's People 01 detective, Adventure Series, Action",
+            'uploader': 'STREEM',
+            'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
+            'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
+            'upload_date': '20150526',
+            'license': 'Standard YouTube License',
+            'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
+            'categories': ['People & Blogs'],
+            'tags': list,
+            'view_count': int,
+            'like_count': int,
+            'dislike_count': int,
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'skip': 'This video is not available.',
+        'add_ie': [YoutubeIE.ie_key()],
+    }, {
+        'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
+        'only_matching': True,
     }]
 
     @classmethod
@@ -2535,7 +2571,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
     def _extract_video(self, renderer):
         video_id = renderer.get('videoId')
         title = try_get(
-            renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+            renderer,
+            (lambda x: x['title']['runs'][0]['text'],
+             lambda x: x['title']['simpleText']), compat_str)
         description = try_get(
             renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
             compat_str)
@@ -2615,7 +2653,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         for content in video_list_renderer['contents']:
             if not isinstance(content, dict):
                 continue
-            renderer = content.get('playlistVideoRenderer')
+            renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
             if not isinstance(renderer, dict):
                 continue
             video_id = renderer.get('videoId')
@@ -2715,7 +2753,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
 
     def _entries(self, tab, identity_token):
         continuation = None
-        slr_contents = tab['sectionListRenderer']['contents']
+        slr_contents = try_get(tab, lambda x: x['sectionListRenderer']['contents'], list) or []
         for slr_content in slr_contents:
             if not isinstance(slr_content, dict):
                 continue
@@ -2824,13 +2862,30 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         else:
             raise ExtractorError('Unable to find selected tab')
 
-    def _real_extract(self, url):
-        item_id = self._match_id(url)
-        url = compat_urlparse.urlunparse(
-            compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
-        webpage = self._download_webpage(url, item_id)
-        data = self._extract_yt_initial_data(item_id, webpage)
-        tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
+    @staticmethod
+    def _extract_uploader(data):
+        uploader = {}
+        sidebar_renderer = try_get(
+            data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
+        if sidebar_renderer:
+            for item in sidebar_renderer:
+                if not isinstance(item, dict):
+                    continue
+                renderer = item.get('playlistSidebarSecondaryInfoRenderer')
+                if not isinstance(renderer, dict):
+                    continue
+                owner = try_get(
+                    renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
+                if owner:
+                    uploader['uploader'] = owner.get('text')
+                    uploader['uploader_id'] = try_get(
+                        owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
+                    uploader['uploader_url'] = urljoin(
+                        'https://www.youtube.com/',
+                        try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
+        return uploader
+
+    def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
         selected_tab = self._extract_selected_tab(tabs)
         renderer = try_get(
             data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
@@ -2848,42 +2903,69 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             title = renderer.get('title')
             description = None
             playlist_id = item_id
-        identity_token = self._search_regex(
-            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
-            'identity token', default=None)
-        return self.playlist_result(
+        playlist = self.playlist_result(
             self._entries(selected_tab['content'], identity_token),
             playlist_id=playlist_id, playlist_title=title,
             playlist_description=description)
+        playlist.update(self._extract_uploader(data))
+        return playlist
+
+    def _extract_from_playlist(self, item_id, data, playlist):
+        title = playlist.get('title') or try_get(
+            data, lambda x: x['titleText']['simpleText'], compat_str)
+        playlist_id = playlist.get('playlistId') or item_id
+        return self.playlist_result(
+            self._playlist_entries(playlist), playlist_id=playlist_id,
+            playlist_title=title)
+
+    def _real_extract(self, url):
+        item_id = self._match_id(url)
+        url = compat_urlparse.urlunparse(
+            compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
+        # Handle both video/playlist URLs
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        video_id = qs.get('v', [None])[0]
+        playlist_id = qs.get('list', [None])[0]
+        if video_id and playlist_id:
+            if self._downloader.params.get('noplaylist'):
+                self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
+                return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+            self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+        webpage = self._download_webpage(url, item_id)
+        identity_token = self._search_regex(
+            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+            'identity token', default=None)
+        data = self._extract_yt_initial_data(item_id, webpage)
+        tabs = try_get(
+            data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
+        if tabs:
+            return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
+        playlist = try_get(
+            data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+        if playlist:
+            return self._extract_from_playlist(item_id, data, playlist)
+        # Fallback to video extraction if no playlist alike page is recognized
+        if video_id:
+            return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+        # Failed to recognize
+        raise ExtractorError('Unable to recognize tab page')
 
 
 class YoutubePlaylistIE(InfoExtractor):
     IE_DESC = 'YouTube.com playlists'
-    _VALID_URL = r"""(?x)(?:
+    _VALID_URL = r'''(?x)(?:
                         (?:https?://)?
                         (?:\w+\.)?
                         (?:
                             (?:
                                 youtube(?:kids)?\.com|
-                                invidio\.us
+                                invidio\.us|
+                                youtu\.be
                             )
-                            /
-                            (?:
-                               (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
-                               \? (?:.*?[&;])*? (?:p|a|list)=
-                            |  p/
-                            )|
-                            youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
-                        )
-                        (
-                            (?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
-                            # Top tracks, they can also include dots
-                            |(?:MC)[\w\.]*
-                        )
-                        .*
-                     |
-                        (%(playlist_id)s)
-                     )""" % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
+                            /.*?\?.*?\blist=
+                        )?
+                        (?P<id>%(playlist_id)s)
+                     )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
     IE_NAME = 'youtube:playlist'
     _TESTS = [{
         'note': 'issue #673',
@@ -2892,7 +2974,7 @@ class YoutubePlaylistIE(InfoExtractor):
             'title': '[OLD]Team Fortress 2 (Class-based LP)',
             'id': 'PLBB231211A4F62143',
             'uploader': 'Wickydoo',
-            'uploader_id': 'Wickydoo',
+            'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
         },
         'playlist_mincount': 29,
     }, {
@@ -2920,41 +3002,8 @@ class YoutubePlaylistIE(InfoExtractor):
             'title': '2018 Chinese New Singles (11/6 updated)',
             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
             'uploader': 'LBK',
-            'uploader_id': 'sdragonfang',
+            'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
         }
-    }, {
-        'note': 'Embedded SWF player',
-        'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
-        'playlist_count': 4,
-        'info_dict': {
-            'title': 'JODA7',
-            'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
-        },
-        'skip': 'This playlist does not exist',
-    }, {
-        # Playlist URL that does not actually serve a playlist
-        'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
-        'info_dict': {
-            'id': 'FqZTN594JQw',
-            'ext': 'webm',
-            'title': "Smiley's People 01 detective, Adventure Series, Action",
-            'uploader': 'STREEM',
-            'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
-            'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
-            'upload_date': '20150526',
-            'license': 'Standard YouTube License',
-            'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
-            'categories': ['People & Blogs'],
-            'tags': list,
-            'view_count': int,
-            'like_count': int,
-            'dislike_count': int,
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'skip': 'This video is not available.',
-        'add_ie': [YoutubeIE.ie_key()],
     }, {
         'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
         'info_dict': {
@@ -2985,9 +3034,6 @@ class YoutubePlaylistIE(InfoExtractor):
         # music album playlist
         'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
         'only_matching': True,
-    }, {
-        'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
-        'only_matching': True,
     }]
 
     @classmethod
@@ -2996,13 +3042,12 @@ class YoutubePlaylistIE(InfoExtractor):
             YoutubePlaylistIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        # Extract playlist id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
-        playlist_id = mobj.group(1) or mobj.group(2)
+        playlist_id = self._match_id(url)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        if not qs:
+            qs = {'list': playlist_id}
         return self.url_result(
-            'https://www.youtube.com/playlist?list=%s' % playlist_id,
+            update_url_query('https://www.youtube.com/playlist', qs),
             ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
 
 
@@ -3250,13 +3295,13 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
 class YoutubeWatchLaterIE(InfoExtractor):
     IE_NAME = 'youtube:watchlater'
     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
+    _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/watch_later|:ytwatchlater'
 
     _TESTS = [{
-        'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
+        'url': 'https://www.youtube.com/feed/watch_later',
         'only_matching': True,
     }, {
-        'url': 'https://www.youtube.com/feed/watch_later',
+        'url': ':ytwatchlater',
         'only_matching': True,
     }]
 

From 2d7a29081c7419e4148cd0829b4f68a608c78496 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 18 Nov 2020 04:09:02 +0700
Subject: [PATCH 183/340] [spiegel] Fix extraction (closes #24206, closes
 #24767)

Code picked from PR #24767 since original repo is not available due to takedown.
---
 youtube_dl/extractor/extractors.py |   3 +-
 youtube_dl/extractor/spiegel.py    | 161 +++++------------------------
 youtube_dl/extractor/spiegeltv.py  |  17 ---
 3 files changed, 29 insertions(+), 152 deletions(-)
 delete mode 100644 youtube_dl/extractor/spiegeltv.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index b2baf8057..11ef47261 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1054,8 +1054,7 @@ from .spankbang import (
     SpankBangPlaylistIE,
 )
 from .spankwire import SpankwireIE
-from .spiegel import SpiegelIE, SpiegelArticleIE
-from .spiegeltv import SpiegeltvIE
+from .spiegel import SpiegelIE
 from .spike import (
     BellatorIE,
     ParamountNetworkIE,
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index 4df7f4ddc..2da32b9b2 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -1,159 +1,54 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from .nexx import (
-    NexxIE,
-    NexxEmbedIE,
-)
-from .spiegeltv import SpiegeltvIE
-from ..compat import compat_urlparse
-from ..utils import (
-    parse_duration,
-    strip_or_none,
-    unified_timestamp,
-)
+from .jwplatform import JWPlatformIE
 
 
 class SpiegelIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
+    _UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
+    _VALID_URL = r'https?://(?:www\.)?(?:spiegel|manager-magazin)\.de(?:/[^/]+)+/[^/]*-(?P<id>[0-9]+|%s)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$' % _UUID_RE
     _TESTS = [{
         'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
-        'md5': 'b57399839d055fccfeb9a0455c439868',
+        'md5': '50c7948883ec85a3e431a0a44b7ad1d6',
         'info_dict': {
-            'id': '563747',
+            'id': 'II0BUyxY',
+            'display_id': '1259285',
             'ext': 'mp4',
-            'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv',
+            'title': 'Vulkan Tungurahua in Ecuador ist wieder aktiv - DER SPIEGEL - Wissenschaft',
             'description': 'md5:8029d8310232196eb235d27575a8b9f4',
-            'duration': 49,
+            'duration': 48.0,
             'upload_date': '20130311',
-            'timestamp': 1362994320,
+            'timestamp': 1362997920,
         },
     }, {
         'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
-        'md5': '5b6c2f4add9d62912ed5fc78a1faed80',
-        'info_dict': {
-            'id': '580988',
-            'ext': 'mp4',
-            'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers',
-            'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
-            'duration': 983,
-            'upload_date': '20131115',
-            'timestamp': 1384546642,
-        },
-    }, {
-        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
-        'md5': '97b91083a672d72976faa8433430afb9',
-        'info_dict': {
-            'id': '601883',
-            'ext': 'mp4',
-            'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
-            'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
-            'upload_date': '20140904',
-            'timestamp': 1409834160,
-        }
-    }, {
-        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
         'only_matching': True,
     }, {
-        # nexx video
+        'url': 'https://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.spiegel.de/panorama/urteile-im-goldmuenzenprozess-haftstrafen-fuer-clanmitglieder-a-aae8df48-43c1-4c61-867d-23f0a2d254b7',
+        'only_matching': True,
+    }, {
         'url': 'http://www.spiegel.de/video/spiegel-tv-magazin-ueber-guellekrise-in-schleswig-holstein-video-99012776.html',
         'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        metadata_url = 'http://www.spiegel.de/video/metadata/video-%s.json' % video_id
-        handle = self._request_webpage(metadata_url, video_id)
-
-        # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
-        if SpiegeltvIE.suitable(handle.geturl()):
-            return self.url_result(handle.geturl(), 'Spiegeltv')
-
-        video_data = self._parse_json(self._webpage_read_content(
-            handle, metadata_url, video_id), video_id)
-        title = video_data['title']
-        nexx_id = video_data['nexxOmniaId']
-        domain_id = video_data.get('nexxOmniaDomain') or '748'
-
-        return {
-            '_type': 'url_transparent',
-            'id': video_id,
-            'url': 'nexx:%s:%s' % (domain_id, nexx_id),
-            'title': title,
-            'description': strip_or_none(video_data.get('teaser')),
-            'duration': parse_duration(video_data.get('duration')),
-            'timestamp': unified_timestamp(video_data.get('datum')),
-            'ie_key': NexxIE.ie_key(),
-        }
-
-
-class SpiegelArticleIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
-    IE_NAME = 'Spiegel:Article'
-    IE_DESC = 'Articles on spiegel.de'
-    _TESTS = [{
+    }, {
         'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
-        'info_dict': {
-            'id': '1516455',
-            'ext': 'mp4',
-            'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
-            'description': 're:^Patrick Kämnitz gehört.{100,}',
-            'upload_date': '20140825',
-        },
-    }, {
-        'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
-        'info_dict': {
-
-        },
-        'playlist_count': 6,
-    }, {
-        # Nexx iFrame embed
-        'url': 'http://www.spiegel.de/sptv/spiegeltv/spiegel-tv-ueber-schnellste-katapult-achterbahn-der-welt-taron-a-1137884.html',
-        'info_dict': {
-            'id': '161464',
-            'ext': 'mp4',
-            'title': 'Nervenkitzel Achterbahn',
-            'alt_title': 'Karussellbauer in Deutschland',
-            'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc',
-            'release_year': 2005,
-            'creator': 'SPIEGEL TV',
-            'thumbnail': r're:^https?://.*\.jpg$',
-            'duration': 2761,
-            'timestamp': 1394021479,
-            'upload_date': '20140305',
-        },
-        'params': {
-            'format': 'bestvideo',
-            'skip_download': True,
-        },
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
-
-        # Single video on top of the page
-        video_link = self._search_regex(
-            r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
-            'video page URL', default=None)
-        if video_link:
-            video_url = compat_urlparse.urljoin(
-                self.http_scheme() + '//spiegel.de/', video_link)
-            return self.url_result(video_url)
-
-        # Multiple embedded videos
-        embeds = re.findall(
-            r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
-            webpage)
-        entries = [
-            self.url_result(compat_urlparse.urljoin(
-                self.http_scheme() + '//spiegel.de/', embed_path))
-            for embed_path in embeds]
-        if embeds:
-            return self.playlist_result(entries)
-
-        return self.playlist_from_matches(
-            NexxEmbedIE._extract_urls(webpage), ie=NexxEmbedIE.ie_key())
+        media_id = self._html_search_regex(
+            r'(&#34;|["\'])mediaId\1\s*:\s*(&#34;|["\'])(?P<id>(?:(?!\2).)+)\2',
+            webpage, 'media id', group='id')
+        return {
+            '_type': 'url_transparent',
+            'id': video_id,
+            'display_id': video_id,
+            'url': 'jwplatform:%s' % media_id,
+            'title': self._og_search_title(webpage, default=None),
+            'ie_key': JWPlatformIE.ie_key(),
+        }
diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py
deleted file mode 100644
index 6ccf4c342..000000000
--- a/youtube_dl/extractor/spiegeltv.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-from .nexx import NexxIE
-
-
-class SpiegeltvIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P<id>\d+)'
-    _TEST = {
-        'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/',
-        'only_matching': True,
-    }
-
-    def _real_extract(self, url):
-        return self.url_result(
-            'https://api.nexx.cloud/v3/748/videos/byid/%s'
-            % self._match_id(url), ie=NexxIE.ie_key())

From bb2b89e077bf3d105b89d48dc523318a87763e4d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 18 Nov 2020 04:11:58 +0700
Subject: [PATCH 184/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 254d0ef1b..6864bbef3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+version <unreleased>
+
+Extractors
+* [spiegel] Fix extraction (#24206, #24767)
+* [youtube] Improve extraction
+    + Add support for --no-playlist (#27009)
+    * Improve playlist and mix extraction (#26390, #26509, #26534, #27011)
+    + Extract playlist uploader data
+* [youtube:tab] Fix view count extraction (#27051)
+* [malltv] Fix extraction (#27035)
++ [bandcamp] Extract playlist description (#22684)
+* [urplay] Fix extraction (#26828)
+* [youtube:tab] Fix playlist title extraction (#27015)
+* [youtube] Fix chapters extraction (#26005)
+
+
 version 2020.11.17
 
 Core

From 9360936f26cba46c0ccfb668fc463cda3226e151 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 18 Nov 2020 04:15:30 +0700
Subject: [PATCH 185/340] release 2020.11.18

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 2 --
 youtube_dl/version.py                            | 2 +-
 8 files changed, 14 insertions(+), 16 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 80baffa2a..64a2eb736 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.17
+ [debug] youtube-dl version 2020.11.18
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index ee4215296..c0c789673 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 4c3834fa5..c90dbf30c 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 3ad3e7409..4c9d295de 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.17
+ [debug] youtube-dl version 2020.11.18
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index aabbfe83c..a6040daee 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.17. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.17**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 6864bbef3..4d404a56e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.18
 
 Extractors
 * [spiegel] Fix extraction (#24206, #24767)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 86b5ad726..9f0cd6ff6 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -824,8 +824,6 @@
  - **SpankBangPlaylist**
  - **Spankwire**
  - **Spiegel**
- - **Spiegel:Article**: Articles on spiegel.de
- - **Spiegeltv**
  - **sport.francetvinfo.fr**
  - **Sport5**
  - **SportBox**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index ed18392a0..d4c6b936f 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.17'
+__version__ = '2020.11.18'

From 8a6c5b080600465e08f5504f147c9042211c123b Mon Sep 17 00:00:00 2001
From: gdzx <6490707+gdzx@users.noreply.github.com>
Date: Tue, 17 Nov 2020 23:06:19 +0100
Subject: [PATCH 186/340] [francetv] Add fallback video url extraction (#27047)

Fallback on another API endpoint when no video formats are found.

Closes ytdl-org#22561
---
 youtube_dl/extractor/francetv.py | 32 ++++++++++++++++++++++++++++----
 1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index 81b468c7d..d776cf1dd 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -128,17 +128,37 @@ class FranceTVIE(InfoExtractor):
 
         is_live = None
 
-        formats = []
+        videos = []
+
         for video in info['videos']:
             if video['statut'] != 'ONLINE':
                 continue
+            if not video['url']:
+                continue
+            videos.append(video)
+
+        if not videos:
+            for device_type in ['desktop', 'mobile']:
+                fallback_info = self._download_json(
+                    'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
+                    video_id, 'Downloading fallback %s video JSON' % device_type, query={
+                        'device_type': device_type,
+                        'browser': 'chrome',
+                    }, fatal=False)
+
+                if fallback_info and fallback_info.get('video'):
+                    videos.append(fallback_info['video'])
+
+        formats = []
+        for video in videos:
             video_url = video['url']
             if not video_url:
                 continue
             if is_live is None:
-                is_live = (try_get(
-                    video, lambda x: x['plages_ouverture'][0]['direct'],
-                    bool) is True) or '/live.francetv.fr/' in video_url
+                is_live = ((try_get(
+                    video, lambda x: x['plages_ouverture'][0]['direct'], bool) is True)
+                    or video.get('is_live') is True
+                    or '/live.francetv.fr/' in video_url)
             format_id = video['format']
             ext = determine_ext(video_url)
             if ext == 'f4m':
@@ -154,6 +174,9 @@ class FranceTVIE(InfoExtractor):
                     sign(video_url, format_id), video_id, 'mp4',
                     entry_protocol='m3u8_native', m3u8_id=format_id,
                     fatal=False))
+            elif ext == 'mpd':
+                formats.extend(self._extract_mpd_formats(
+                    sign(video_url, format_id), video_id, mpd_id=format_id, fatal=False))
             elif video_url.startswith('rtmp'):
                 formats.append({
                     'url': video_url,
@@ -166,6 +189,7 @@ class FranceTVIE(InfoExtractor):
                         'url': video_url,
                         'format_id': format_id,
                     })
+
         self._sort_formats(formats)
 
         title = info['titre']

From d65628ef037c5c927cf2990d91f30cc222724171 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 17 Nov 2020 23:16:04 +0100
Subject: [PATCH 187/340] [francetv] improve info extraction

---
 youtube_dl/extractor/francetv.py | 23 ++++++++++++-----------
 1 file changed, 12 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index d776cf1dd..92f0851e3 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -17,6 +17,7 @@ from ..utils import (
     parse_duration,
     try_get,
     url_or_none,
+    urljoin,
 )
 from .dailymotion import DailymotionIE
 
@@ -130,10 +131,10 @@ class FranceTVIE(InfoExtractor):
 
         videos = []
 
-        for video in info['videos']:
-            if video['statut'] != 'ONLINE':
+        for video in (info.get('videos') or []):
+            if video.get('statut') != 'ONLINE':
                 continue
-            if not video['url']:
+            if not video.get('url'):
                 continue
             videos.append(video)
 
@@ -151,15 +152,15 @@ class FranceTVIE(InfoExtractor):
 
         formats = []
         for video in videos:
-            video_url = video['url']
+            video_url = video.get('url')
             if not video_url:
                 continue
             if is_live is None:
-                is_live = ((try_get(
-                    video, lambda x: x['plages_ouverture'][0]['direct'], bool) is True)
+                is_live = (try_get(
+                    video, lambda x: x['plages_ouverture'][0]['direct'], bool) is True
                     or video.get('is_live') is True
                     or '/live.francetv.fr/' in video_url)
-            format_id = video['format']
+            format_id = video.get('format')
             ext = determine_ext(video_url)
             if ext == 'f4m':
                 if georestricted:
@@ -209,10 +210,10 @@ class FranceTVIE(InfoExtractor):
         return {
             'id': video_id,
             'title': self._live_title(title) if is_live else title,
-            'description': clean_html(info['synopsis']),
-            'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
-            'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
-            'timestamp': int_or_none(info['diffusion']['timestamp']),
+            'description': clean_html(info.get('synopsis')),
+            'thumbnail': urljoin('http://pluzz.francetv.fr', info.get('image')),
+            'duration': int_or_none(info.get('real_duration')) or parse_duration(info.get('duree')),
+            'timestamp': int_or_none(try_get(info, lambda x: x['diffusion']['timestamp'])),
             'is_live': is_live,
             'formats': formats,
             'subtitles': subtitles,

From 5c3f7014efe75ae4c4e1c8aa18a062b8442c917a Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 18 Nov 2020 00:41:07 +0100
Subject: [PATCH 188/340] [lbry] add support for odysee.com domain(closes
 #26806)

---
 youtube_dl/extractor/lbry.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/lbry.py b/youtube_dl/extractor/lbry.py
index 587deac90..0a7ee919c 100644
--- a/youtube_dl/extractor/lbry.py
+++ b/youtube_dl/extractor/lbry.py
@@ -16,7 +16,7 @@ from ..utils import (
 
 class LBRYIE(InfoExtractor):
     IE_NAME = 'lbry.tv'
-    _VALID_URL = r'https?://(?:www\.)?lbry\.tv/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
+    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
     _TESTS = [{
         # Video
         'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -41,6 +41,9 @@ class LBRYIE(InfoExtractor):
             'timestamp': 1591312601,
             'upload_date': '20200604',
         }
+    }, {
+        'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
+        'only_matching': True,
     }]
 
     def _call_api_proxy(self, method, display_id, params):

From c7178f0f7af1d12cdaca5d1328adba5300c7436a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 18 Nov 2020 23:31:35 +0700
Subject: [PATCH 189/340] [extractor/common] Output error for invalid URLs in
 _is_valid_url (refs #21400, refs #24151, refs #25617, refs #25618, refs
 #25586, refs #26068, refs #27072)

---
 youtube_dl/extractor/common.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 021945a89..0c9089674 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1456,9 +1456,10 @@ class InfoExtractor(object):
         try:
             self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
             return True
-        except ExtractorError:
+        except ExtractorError as e:
             self.to_screen(
-                '%s: %s URL is invalid, skipping' % (video_id, item))
+                '%s: %s URL is invalid, skipping: %s'
+                % (video_id, item, error_to_compat_str(e.cause)))
             return False
 
     def http_scheme(self):

From 444a68e0ec928a6dd01170362e092e41d69e3781 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 18 Nov 2020 21:06:33 +0100
Subject: [PATCH 190/340] [mgtv] fix format extraction(closes #26415)

---
 youtube_dl/extractor/mgtv.py | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/mgtv.py b/youtube_dl/extractor/mgtv.py
index 71fc3ec56..cab3aa045 100644
--- a/youtube_dl/extractor/mgtv.py
+++ b/youtube_dl/extractor/mgtv.py
@@ -17,9 +17,8 @@ from ..utils import (
 
 
 class MGTVIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
+    _VALID_URL = r'https?://(?:w(?:ww)?\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
     IE_DESC = '芒果TV'
-    _GEO_COUNTRIES = ['CN']
 
     _TESTS = [{
         'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html',
@@ -34,14 +33,18 @@ class MGTVIE(InfoExtractor):
     }, {
         'url': 'http://www.mgtv.com/b/301817/3826653.html',
         'only_matching': True,
+    }, {
+        'url': 'https://w.mgtv.com/b/301817/3826653.html',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
+        tk2 = base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % (compat_str(uuid.uuid4()).encode(), time.time()))[::-1]
         try:
             api_data = self._download_json(
                 'https://pcweb.api.mgtv.com/player/video', video_id, query={
-                    'tk2': base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % (compat_str(uuid.uuid4()).encode(), time.time()))[::-1],
+                    'tk2': tk2,
                     'video_id': video_id,
                 }, headers=self.geo_verification_headers())['data']
         except ExtractorError as e:
@@ -56,6 +59,7 @@ class MGTVIE(InfoExtractor):
         stream_data = self._download_json(
             'https://pcweb.api.mgtv.com/player/getSource', video_id, query={
                 'pm2': api_data['atc']['pm2'],
+                'tk2': tk2,
                 'video_id': video_id,
             }, headers=self.geo_verification_headers())['data']
         stream_domain = stream_data['stream_domain'][0]

From 9b505185da1f7148e02174fce693583af68976d9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 19 Nov 2020 03:26:49 +0700
Subject: [PATCH 191/340] [arte] Extract m3u8 formats (closes #27061)

---
 youtube_dl/extractor/arte.py | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
index 2bd3bfe8a..b80467548 100644
--- a/youtube_dl/extractor/arte.py
+++ b/youtube_dl/extractor/arte.py
@@ -11,6 +11,7 @@ from ..utils import (
     qualities,
     try_get,
     unified_strdate,
+    url_or_none,
 )
 
 # There are different sources of video in arte.tv, the extraction process
@@ -63,8 +64,13 @@ class ArteTVBaseIE(InfoExtractor):
         langcode = LANGS.get(lang, lang)
 
         formats = []
+        m3u8_formats = []
         for format_id, format_dict in vsr.items():
             f = dict(format_dict)
+            format_url = url_or_none(f.get('url'))
+            streamer = f.get('streamer')
+            if not format_url and not streamer:
+                continue
             versionCode = f.get('versionCode')
             l = re.escape(langcode)
 
@@ -107,6 +113,15 @@ class ArteTVBaseIE(InfoExtractor):
             else:
                 lang_pref = -1
 
+            media_type = f.get('mediaType')
+            if media_type == 'hls':
+                m3u8_formats = self._extract_m3u8_formats(
+                    format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id=format_id, fatal=False)
+                for m3u8_format in m3u8_formats:
+                    m3u8_format['language_preference'] = lang_pref
+                continue
+
             format = {
                 'format_id': format_id,
                 'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
@@ -118,7 +133,7 @@ class ArteTVBaseIE(InfoExtractor):
                 'quality': qfunc(f.get('quality')),
             }
 
-            if f.get('mediaType') == 'rtmp':
+            if media_type == 'rtmp':
                 format['url'] = f['streamer']
                 format['play_path'] = 'mp4:' + f['url']
                 format['ext'] = 'flv'
@@ -128,6 +143,8 @@ class ArteTVBaseIE(InfoExtractor):
             formats.append(format)
 
         self._check_formats(formats, video_id)
+
+        formats.extend(m3u8_formats)
         self._sort_formats(formats)
 
         info_dict['formats'] = formats

From 91e954587f5b2b4760b73275ad0720933c0136a7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 19 Nov 2020 05:02:04 +0700
Subject: [PATCH 192/340] [arte] Rework extractors

* Reimplement embed and playlist extractors to delegate to the single entrypoint artetv extractor
  Beware reluctant download archive extractor keys breakage.
* Improve embeds detection (closes #27057)
- Remove obsolete code
---
 youtube_dl/extractor/arte.py       | 154 ++++++++++++++++++-----------
 youtube_dl/extractor/extractors.py |   2 +-
 youtube_dl/extractor/generic.py    |   9 +-
 3 files changed, 100 insertions(+), 65 deletions(-)

diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
index b80467548..03abdbfaf 100644
--- a/youtube_dl/extractor/arte.py
+++ b/youtube_dl/extractor/arte.py
@@ -4,7 +4,10 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_str
+from ..compat import (
+    compat_str,
+    compat_urlparse,
+)
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -14,14 +17,44 @@ from ..utils import (
     url_or_none,
 )
 
-# There are different sources of video in arte.tv, the extraction process
-# is different for each one. The videos usually expire in 7 days, so we can't
-# add tests.
-
 
 class ArteTVBaseIE(InfoExtractor):
-    def _extract_from_json_url(self, json_url, video_id, lang, title=None):
-        info = self._download_json(json_url, video_id)
+    _ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
+    _API_BASE = 'https://api.arte.tv/api/player/v1'
+
+
+class ArteTVIE(ArteTVBaseIE):
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            (?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
+                            api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
+                        )
+                        /(?P<id>\d{6}-\d{3}-[AF])
+                    ''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
+    _TESTS = [{
+        'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
+        'info_dict': {
+            'id': '088501-000-A',
+            'ext': 'mp4',
+            'title': 'Mexico: Stealing Petrol to Survive',
+            'upload_date': '20190628',
+        },
+    }, {
+        'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
+        'only_matching': True,
+    }, {
+        'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        lang = mobj.group('lang') or mobj.group('lang_2')
+
+        info = self._download_json(
+            '%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id)
         player_info = info['videoJsonPlayer']
 
         vsr = try_get(player_info, lambda x: x['VSR'], dict)
@@ -38,18 +71,11 @@ class ArteTVBaseIE(InfoExtractor):
         if not upload_date_str:
             upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
 
-        title = (player_info.get('VTI') or title or player_info['VID']).strip()
+        title = (player_info.get('VTI') or player_info['VID']).strip()
         subtitle = player_info.get('VSU', '').strip()
         if subtitle:
             title += ' - %s' % subtitle
 
-        info_dict = {
-            'id': player_info['VID'],
-            'title': title,
-            'description': player_info.get('VDE'),
-            'upload_date': unified_strdate(upload_date_str),
-            'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
-        }
         qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ'])
 
         LANGS = {
@@ -64,7 +90,6 @@ class ArteTVBaseIE(InfoExtractor):
         langcode = LANGS.get(lang, lang)
 
         formats = []
-        m3u8_formats = []
         for format_id, format_dict in vsr.items():
             f = dict(format_dict)
             format_url = url_or_none(f.get('url'))
@@ -120,6 +145,7 @@ class ArteTVBaseIE(InfoExtractor):
                     m3u8_id=format_id, fatal=False)
                 for m3u8_format in m3u8_formats:
                     m3u8_format['language_preference'] = lang_pref
+                formats.extend(m3u8_formats)
                 continue
 
             format = {
@@ -142,58 +168,50 @@ class ArteTVBaseIE(InfoExtractor):
 
             formats.append(format)
 
-        self._check_formats(formats, video_id)
-
-        formats.extend(m3u8_formats)
         self._sort_formats(formats)
 
-        info_dict['formats'] = formats
-        return info_dict
+        return {
+            'id': player_info.get('VID') or video_id,
+            'title': title,
+            'description': player_info.get('VDE'),
+            'upload_date': unified_strdate(upload_date_str),
+            'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
+            'formats': formats,
+        }
 
 
-class ArteTVPlus7IE(ArteTVBaseIE):
-    IE_NAME = 'arte.tv:+7'
-    _VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>fr|de|en|es|it|pl)/videos/(?P<id>\d{6}-\d{3}-[AF])'
-
+class ArteTVEmbedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
     _TESTS = [{
-        'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
+        'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
         'info_dict': {
-            'id': '088501-000-A',
+            'id': '100605-013-A',
             'ext': 'mp4',
-            'title': 'Mexico: Stealing Petrol to Survive',
-            'upload_date': '20190628',
+            'title': 'United we Stream November Lockdown Edition #13',
+            'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
+            'upload_date': '20201116',
         },
+    }, {
+        'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
+        'only_matching': True,
     }]
 
-    def _real_extract(self, url):
-        lang, video_id = re.match(self._VALID_URL, url).groups()
-        return self._extract_from_json_url(
-            'https://api.arte.tv/api/player/v1/config/%s/%s' % (lang, video_id),
-            video_id, lang)
-
-
-class ArteTVEmbedIE(ArteTVPlus7IE):
-    IE_NAME = 'arte.tv:embed'
-    _VALID_URL = r'''(?x)
-        https://www\.arte\.tv
-        /player/v3/index\.php\?json_url=
-        (?P<json_url>
-            https?://api\.arte\.tv/api/player/v1/config/
-            (?P<lang>[^/]+)/(?P<id>\d{6}-\d{3}-[AF])
-        )
-    '''
-
-    _TESTS = []
+    @staticmethod
+    def _extract_urls(webpage):
+        return [url for _, url in re.findall(
+            r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1',
+            webpage)]
 
     def _real_extract(self, url):
-        json_url, lang, video_id = re.match(self._VALID_URL, url).groups()
-        return self._extract_from_json_url(json_url, video_id, lang)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        json_url = qs['json_url'][0]
+        video_id = ArteTVIE._match_id(json_url)
+        return self.url_result(
+            json_url, ie=ArteTVIE.ie_key(), video_id=video_id)
 
 
 class ArteTVPlaylistIE(ArteTVBaseIE):
-    IE_NAME = 'arte.tv:playlist'
-    _VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>fr|de|en|es|it|pl)/videos/(?P<id>RC-\d{6})'
-
+    _VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES
     _TESTS = [{
         'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
         'info_dict': {
@@ -202,17 +220,35 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
             'description': 'md5:d322c55011514b3a7241f7fb80d494c2',
         },
         'playlist_mincount': 6,
+    }, {
+        'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         lang, playlist_id = re.match(self._VALID_URL, url).groups()
         collection = self._download_json(
-            'https://api.arte.tv/api/player/v1/collectionData/%s/%s?source=videos'
-            % (lang, playlist_id), playlist_id)
+            '%s/collectionData/%s/%s?source=videos'
+            % (self._API_BASE, lang, playlist_id), playlist_id)
+        entries = []
+        for video in collection['videos']:
+            if not isinstance(video, dict):
+                continue
+            video_url = url_or_none(video.get('url')) or url_or_none(video.get('jsonUrl'))
+            if not video_url:
+                continue
+            video_id = video.get('programId')
+            entries.append({
+                '_type': 'url_transparent',
+                'url': video_url,
+                'id': video_id,
+                'title': video.get('title'),
+                'alt_title': video.get('subtitle'),
+                'thumbnail': url_or_none(try_get(video, lambda x: x['mainImage']['url'], compat_str)),
+                'duration': int_or_none(video.get('durationSeconds')),
+                'view_count': int_or_none(video.get('views')),
+                'ie_key': ArteTVIE.ie_key(),
+            })
         title = collection.get('title')
         description = collection.get('shortDescription') or collection.get('teaserText')
-        entries = [
-            self._extract_from_json_url(
-                video['jsonUrl'], video.get('programId') or playlist_id, lang)
-            for video in collection['videos'] if video.get('jsonUrl')]
         return self.playlist_result(entries, playlist_id, title, description)
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 11ef47261..088800eb9 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -58,7 +58,7 @@ from .ard import (
     ARDMediathekIE,
 )
 from .arte import (
-    ArteTVPlus7IE,
+    ArteTVIE,
     ArteTVEmbedIE,
     ArteTVPlaylistIE,
 )
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 355067a50..d08a8cca5 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -91,6 +91,7 @@ from .piksel import PikselIE
 from .videa import VideaIE
 from .twentymin import TwentyMinutenIE
 from .ustream import UstreamIE
+from .arte import ArteTVEmbedIE
 from .videopress import VideoPressIE
 from .rutube import RutubeIE
 from .limelight import LimelightBaseIE
@@ -2760,11 +2761,9 @@ class GenericIE(InfoExtractor):
             return self.url_result(ustream_url, UstreamIE.ie_key())
 
         # Look for embedded arte.tv player
-        mobj = re.search(
-            r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
-            webpage)
-        if mobj is not None:
-            return self.url_result(mobj.group('url'), 'ArteTVEmbed')
+        arte_urls = ArteTVEmbedIE._extract_urls(webpage)
+        if arte_urls:
+            return self.playlist_from_matches(arte_urls, video_id, video_title)
 
         # Look for embedded francetv player
         mobj = re.search(

From b1347a5881fa09777408a77360f0b5ce7ae6450c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 19 Nov 2020 05:16:25 +0700
Subject: [PATCH 193/340] [youporn] Fix upload date extraction and make comment
 count optional (closes #26986)

---
 youtube_dl/extractor/youporn.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py
index e7fca22de..7b9feafeb 100644
--- a/youtube_dl/extractor/youporn.py
+++ b/youtube_dl/extractor/youporn.py
@@ -29,7 +29,6 @@ class YouPornIE(InfoExtractor):
             'upload_date': '20101217',
             'average_rating': int,
             'view_count': int,
-            'comment_count': int,
             'categories': list,
             'tags': list,
             'age_limit': 18,
@@ -48,7 +47,6 @@ class YouPornIE(InfoExtractor):
             'upload_date': '20110418',
             'average_rating': int,
             'view_count': int,
-            'comment_count': int,
             'categories': list,
             'tags': list,
             'age_limit': 18,
@@ -156,7 +154,8 @@ class YouPornIE(InfoExtractor):
             r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
             webpage, 'uploader', fatal=False)
         upload_date = unified_strdate(self._html_search_regex(
-            [r'Date\s+[Aa]dded:\s*<span>([^<]+)',
+            [r'UPLOADED:\s*<span>([^<]+)',
+             r'Date\s+[Aa]dded:\s*<span>([^<]+)',
              r'(?s)<div[^>]+class=["\']videoInfo(?:Date|Time)["\'][^>]*>(.+?)</div>'],
             webpage, 'upload date', fatal=False))
 
@@ -171,7 +170,7 @@ class YouPornIE(InfoExtractor):
             webpage, 'view count', fatal=False, group='count'))
         comment_count = str_to_int(self._search_regex(
             r'>All [Cc]omments? \(([\d,.]+)\)',
-            webpage, 'comment count', fatal=False))
+            webpage, 'comment count', default=None))
 
         def extract_tag_box(regex, title):
             tag_box = self._search_regex(regex, webpage, title, default=None)

From 32152bab7a488f8b563444e8d8cddd6d09f8a54a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 19 Nov 2020 05:21:09 +0700
Subject: [PATCH 194/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 4d404a56e..a93fcbf7f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,23 @@
+version <unreleased>
+
+Core
+* [extractor/common] Output error for invalid URLs in _is_valid_url (#21400,
+  #24151, #25617, #25618, #25586, #26068, #27072)
+
+Extractors
+* [youporn] Fix upload date extraction
+* [youporn] Make comment count optional (#26986)
+* [arte] Rework extractors
+    * Reimplement embed and playlist extractors to delegate to the single
+      entrypoint artetv extractor
+    * Improve embeds detection (#27057)
++ [arte] Extract m3u8 formats (#27061)
+* [mgtv] Fix format extraction (#26415)
++ [lbry] Add support for odysee.com (#26806)
+* [francetv] Improve info extraction
++ [francetv] Add fallback video URL extraction (#27047)
+
+
 version 2020.11.18
 
 Extractors

From 039e715b3020710546b13976042bd18ab6e2df5b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 19 Nov 2020 05:22:27 +0700
Subject: [PATCH 195/340] release 2020.11.19

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 6 +++---
 youtube_dl/version.py                            | 2 +-
 8 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 64a2eb736..e2e5a15ec 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.18
+ [debug] youtube-dl version 2020.11.19
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index c0c789673..880a96835 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index c90dbf30c..25c5a7daf 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 4c9d295de..59716f962 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.18
+ [debug] youtube-dl version 2020.11.19
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index a6040daee..410abee90 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.18. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.18**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index a93fcbf7f..572e0360d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.19
 
 Core
 * [extractor/common] Output error for invalid URLs in _is_valid_url (#21400,
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 9f0cd6ff6..37425e7a1 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -58,9 +58,9 @@
  - **ARD:mediathek**
  - **ARDBetaMediathek**
  - **Arkena**
- - **arte.tv:+7**
- - **arte.tv:embed**
- - **arte.tv:playlist**
+ - **ArteTV**
+ - **ArteTVEmbed**
+ - **ArteTVPlaylist**
  - **AsianCrush**
  - **AsianCrushPlaylist**
  - **AtresPlayer**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index d4c6b936f..53eeb6ccf 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.18'
+__version__ = '2020.11.19'

From 4fe190df705d16c66fc3e7b2d798ff14ebbc9878 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 19 Nov 2020 11:54:54 +0100
Subject: [PATCH 196/340] [mtv] fix mgid extraction(closes #26841)

---
 youtube_dl/extractor/mtv.py | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index fedd5f46b..df1034fc5 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -349,6 +349,18 @@ class MTVIE(MTVServicesInfoExtractor):
         'only_matching': True,
     }]
 
+    @staticmethod
+    def extract_child_with_type(parent, t):
+        children = parent['children']
+        return next(c for c in children if c.get('type') == t)
+
+    def _extract_mgid(self, webpage):
+        data = self._parse_json(self._search_regex(
+            r'__DATA__\s*=\s*({.+?});', webpage, 'data'), None)
+        main_container = self.extract_child_with_type(data, 'MainContainer')
+        video_player = self.extract_child_with_type(main_container, 'VideoPlayer')
+        return video_player['props']['media']['video']['config']['uri']
+
 
 class MTVJapanIE(MTVServicesInfoExtractor):
     IE_NAME = 'mtvjapan'

From 2dbb45ae82836699486d434cfb6d902920bab66e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 19 Nov 2020 13:12:58 +0100
Subject: [PATCH 197/340] [vimeo:album] fix extraction(closes #27079)

---
 youtube_dl/extractor/vimeo.py | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 421795b94..cfd04d50c 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -946,10 +946,13 @@ class VimeoAlbumIE(VimeoBaseInfoExtractor):
 
     def _real_extract(self, url):
         album_id = self._match_id(url)
-        webpage = self._download_webpage(url, album_id)
-        viewer = self._parse_json(self._search_regex(
-            r'bootstrap_data\s*=\s*({.+?})</script>',
-            webpage, 'bootstrap data'), album_id)['viewer']
+        viewer = self._download_json(
+            'https://vimeo.com/_rv/viewer', album_id, fatal=False)
+        if not viewer:
+            webpage = self._download_webpage(url, album_id)
+            viewer = self._parse_json(self._search_regex(
+                r'bootstrap_data\s*=\s*({.+?})</script>',
+                webpage, 'bootstrap data'), album_id)['viewer']
         jwt = viewer['jwt']
         album = self._download_json(
             'https://api.vimeo.com/albums/' + album_id,

From cf1a8668e8e47a56c834fb567d227787d7480d08 Mon Sep 17 00:00:00 2001
From: Joost Verdoorn <jpverdoorn@gmail.com>
Date: Thu, 19 Nov 2020 17:26:53 +0100
Subject: [PATCH 198/340] [Amara] Add new extractor (#20618)

* [Amara] Add new extractor
---
 youtube_dl/extractor/amara.py      | 76 ++++++++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py |  1 +
 2 files changed, 77 insertions(+)
 create mode 100644 youtube_dl/extractor/amara.py

diff --git a/youtube_dl/extractor/amara.py b/youtube_dl/extractor/amara.py
new file mode 100644
index 000000000..b222154bd
--- /dev/null
+++ b/youtube_dl/extractor/amara.py
@@ -0,0 +1,76 @@
+# coding: utf-8
+from __future__ import unicode_literals
+from .common import InfoExtractor
+
+
+class AmaraIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?amara\.org/(?:\w+/)?videos/(?P<id>\w+)'
+    _TESTS = [
+        {
+            'url': 'https://amara.org/en/videos/jVx79ZKGK1ky/info/why-jury-trials-are-becoming-less-common/?tab=video',
+            'md5': 'ea10daf2b6154b8c1ecf9922aca5e8ae',
+            'info_dict': {
+                'id': 'h6ZuVdvYnfE',
+                'ext': 'mp4',
+                'title': 'Why jury trials are becoming less common',
+                'description': 'md5:a61811c319943960b6ab1c23e0cbc2c1',
+                'thumbnail': r're:^https?://.*\.jpg$',
+                'subtitles': dict,
+                'upload_date': '20160813',
+                'uploader': 'PBS NewsHour',
+                'uploader_id': 'PBSNewsHour'
+            }
+        },
+        {
+            'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
+            'md5': '99392c75fa05d432a8f11df03612195e',
+            'info_dict': {
+                'id': '18622084',
+                'ext': 'mov',
+                'title': 'Vimeo at CES 2011!',
+                'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+                'thumbnail': r're:^https?://.*\.jpg$',
+                'subtitles': dict,
+                'timestamp': 1294649110,
+                'upload_date': '20110110',
+                'uploader': 'Sam Morrill',
+                'uploader_id': 'sammorrill'
+            }
+        },
+        {
+            'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
+            'md5': 'd3970f08512738ee60c5807311ff5d3f',
+            'info_dict': {
+                'id': 'ChimamandaAdichie_2009G-transcript',
+                'ext': 'mp4',
+                'title': 'The danger of a single story',
+                'description': 'md5:d769b31139c3b8bb5be9177f62ea3f23',
+                'thumbnail': r're:^https?://.*\.jpg$',
+                'subtitles': dict,
+                'upload_date': '20131206'
+            }
+        }
+    ]
+
+    def get_subtitles_for_language(self, language):
+        return [{
+            'ext': type,
+            'url': language['subtitles_uri'].replace('format=json', 'format=' + type)
+        } for type in ['vtt', 'srt', 'json']]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        meta = self._download_json('https://amara.org/api/videos/%s/' % video_id, video_id, query={'format': 'json'})
+
+        video_url = meta.get('all_urls')[0]
+        subtitles = dict([(language['code'], self.get_subtitles_for_language(language)) for language in meta.get('languages', []) if language['published']])
+
+        return {
+            '_type': 'url_transparent',
+            'url': video_url,
+            'id': video_id,
+            'subtitles': subtitles,
+            'title': meta['title'],
+            'description': meta.get('description'),
+            'thumbnail': meta.get('thumbnail')
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 088800eb9..183050e07 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -36,6 +36,7 @@ from .afreecatv import AfreecaTVIE
 from .airmozilla import AirMozillaIE
 from .aljazeera import AlJazeeraIE
 from .alphaporno import AlphaPornoIE
+from .amara import AmaraIE
 from .amcnetworks import AMCNetworksIE
 from .americastestkitchen import AmericasTestKitchenIE
 from .animeondemand import AnimeOnDemandIE

From 2cf8003638ef76a0f76541229ecab1adf739a3ae Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 19 Nov 2020 17:29:30 +0100
Subject: [PATCH 199/340] [amara] improve extraction

---
 youtube_dl/extractor/amara.py | 143 ++++++++++++++++++++--------------
 1 file changed, 85 insertions(+), 58 deletions(-)

diff --git a/youtube_dl/extractor/amara.py b/youtube_dl/extractor/amara.py
index b222154bd..61d469574 100644
--- a/youtube_dl/extractor/amara.py
+++ b/youtube_dl/extractor/amara.py
@@ -1,76 +1,103 @@
 # coding: utf-8
 from __future__ import unicode_literals
+
 from .common import InfoExtractor
+from .youtube import YoutubeIE
+from .vimeo import VimeoIE
+from ..utils import (
+    int_or_none,
+    parse_iso8601,
+    update_url_query,
+)
 
 
 class AmaraIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?amara\.org/(?:\w+/)?videos/(?P<id>\w+)'
-    _TESTS = [
-        {
-            'url': 'https://amara.org/en/videos/jVx79ZKGK1ky/info/why-jury-trials-are-becoming-less-common/?tab=video',
-            'md5': 'ea10daf2b6154b8c1ecf9922aca5e8ae',
-            'info_dict': {
-                'id': 'h6ZuVdvYnfE',
-                'ext': 'mp4',
-                'title': 'Why jury trials are becoming less common',
-                'description': 'md5:a61811c319943960b6ab1c23e0cbc2c1',
-                'thumbnail': r're:^https?://.*\.jpg$',
-                'subtitles': dict,
-                'upload_date': '20160813',
-                'uploader': 'PBS NewsHour',
-                'uploader_id': 'PBSNewsHour'
-            }
-        },
-        {
-            'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
-            'md5': '99392c75fa05d432a8f11df03612195e',
-            'info_dict': {
-                'id': '18622084',
-                'ext': 'mov',
-                'title': 'Vimeo at CES 2011!',
-                'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
-                'thumbnail': r're:^https?://.*\.jpg$',
-                'subtitles': dict,
-                'timestamp': 1294649110,
-                'upload_date': '20110110',
-                'uploader': 'Sam Morrill',
-                'uploader_id': 'sammorrill'
-            }
-        },
-        {
-            'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
-            'md5': 'd3970f08512738ee60c5807311ff5d3f',
-            'info_dict': {
-                'id': 'ChimamandaAdichie_2009G-transcript',
-                'ext': 'mp4',
-                'title': 'The danger of a single story',
-                'description': 'md5:d769b31139c3b8bb5be9177f62ea3f23',
-                'thumbnail': r're:^https?://.*\.jpg$',
-                'subtitles': dict,
-                'upload_date': '20131206'
-            }
+    _TESTS = [{
+        # Youtube
+        'url': 'https://amara.org/en/videos/jVx79ZKGK1ky/info/why-jury-trials-are-becoming-less-common/?tab=video',
+        'md5': 'ea10daf2b6154b8c1ecf9922aca5e8ae',
+        'info_dict': {
+            'id': 'h6ZuVdvYnfE',
+            'ext': 'mp4',
+            'title': 'Why jury trials are becoming less common',
+            'description': 'md5:a61811c319943960b6ab1c23e0cbc2c1',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'subtitles': dict,
+            'upload_date': '20160813',
+            'uploader': 'PBS NewsHour',
+            'uploader_id': 'PBSNewsHour',
+            'timestamp': 1549639570,
         }
-    ]
-
-    def get_subtitles_for_language(self, language):
-        return [{
-            'ext': type,
-            'url': language['subtitles_uri'].replace('format=json', 'format=' + type)
-        } for type in ['vtt', 'srt', 'json']]
+    }, {
+        # Vimeo
+        'url': 'https://amara.org/en/videos/kYkK1VUTWW5I/info/vimeo-at-ces-2011',
+        'md5': '99392c75fa05d432a8f11df03612195e',
+        'info_dict': {
+            'id': '18622084',
+            'ext': 'mov',
+            'title': 'Vimeo at CES 2011!',
+            'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'subtitles': dict,
+            'timestamp': 1294763658,
+            'upload_date': '20110111',
+            'uploader': 'Sam Morrill',
+            'uploader_id': 'sammorrill'
+        }
+    }, {
+        # Direct Link
+        'url': 'https://amara.org/en/videos/s8KL7I3jLmh6/info/the-danger-of-a-single-story/',
+        'md5': 'd3970f08512738ee60c5807311ff5d3f',
+        'info_dict': {
+            'id': 's8KL7I3jLmh6',
+            'ext': 'mp4',
+            'title': 'The danger of a single story',
+            'description': 'md5:d769b31139c3b8bb5be9177f62ea3f23',
+            'thumbnail': r're:^https?://.*\.jpg$',
+            'subtitles': dict,
+            'upload_date': '20091007',
+            'timestamp': 1254942511,
+        }
+    }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        meta = self._download_json('https://amara.org/api/videos/%s/' % video_id, video_id, query={'format': 'json'})
+        meta = self._download_json(
+            'https://amara.org/api/videos/%s/' % video_id,
+            video_id, query={'format': 'json'})
+        title = meta['title']
+        video_url = meta['all_urls'][0]
 
-        video_url = meta.get('all_urls')[0]
-        subtitles = dict([(language['code'], self.get_subtitles_for_language(language)) for language in meta.get('languages', []) if language['published']])
+        subtitles = {}
+        for language in (meta.get('languages') or []):
+            subtitles_uri = language.get('subtitles_uri')
+            if not (subtitles_uri and language.get('published')):
+                continue
+            subtitle = subtitles.setdefault(language.get('code') or 'en', [])
+            for f in ('json', 'srt', 'vtt'):
+                subtitle.append({
+                    'ext': f,
+                    'url': update_url_query(subtitles_uri, {'format': f}),
+                })
 
-        return {
-            '_type': 'url_transparent',
+        info = {
             'url': video_url,
             'id': video_id,
             'subtitles': subtitles,
-            'title': meta['title'],
+            'title': title,
             'description': meta.get('description'),
-            'thumbnail': meta.get('thumbnail')
+            'thumbnail': meta.get('thumbnail'),
+            'duration': int_or_none(meta.get('duration')),
+            'timestamp': parse_iso8601(meta.get('created')),
         }
+
+        for ie in (YoutubeIE, VimeoIE):
+            if ie.suitable(video_url):
+                info.update({
+                    '_type': 'url_transparent',
+                    'ie_key': ie.ie_key(),
+                })
+                break
+
+        return info

From 25a35cb38a472731d9f487309f6bcff94ee4918c Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 19 Nov 2020 20:01:24 +0100
Subject: [PATCH 200/340] [googledrive] fix format extraction(closes #26979)

---
 youtube_dl/extractor/googledrive.py | 58 +++++++++++------------------
 1 file changed, 21 insertions(+), 37 deletions(-)

diff --git a/youtube_dl/extractor/googledrive.py b/youtube_dl/extractor/googledrive.py
index f2cc57e44..de8c80e36 100644
--- a/youtube_dl/extractor/googledrive.py
+++ b/youtube_dl/extractor/googledrive.py
@@ -3,11 +3,13 @@ from __future__ import unicode_literals
 import re
 
 from .common import InfoExtractor
+from ..compat import compat_parse_qs
 from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
     lowercase_escape,
+    try_get,
     update_url_query,
 )
 
@@ -38,21 +40,10 @@ class GoogleDriveIE(InfoExtractor):
         # video can't be watched anonymously due to view count limit reached,
         # but can be downloaded (see https://github.com/ytdl-org/youtube-dl/issues/14046)
         'url': 'https://drive.google.com/file/d/0B-vUyvmDLdWDcEt4WjBqcmI2XzQ/view',
-        'md5': 'bfbd670d03a470bb1e6d4a257adec12e',
-        'info_dict': {
-            'id': '0B-vUyvmDLdWDcEt4WjBqcmI2XzQ',
-            'ext': 'mp4',
-            'title': 'Annabelle Creation (2017)- Z.V1 [TH].MP4',
-        }
+        'only_matching': True,
     }, {
         # video id is longer than 28 characters
         'url': 'https://drive.google.com/file/d/1ENcQ_jeCuj7y19s66_Ou9dRP4GKGsodiDQ/edit',
-        'info_dict': {
-            'id': '1ENcQ_jeCuj7y19s66_Ou9dRP4GKGsodiDQ',
-            'ext': 'mp4',
-            'title': 'Andreea Banica feat Smiley - Hooky Song (Official Video).mp4',
-            'duration': 189,
-        },
         'only_matching': True,
     }, {
         'url': 'https://drive.google.com/open?id=0B2fjwgkl1A_CX083Tkowdmt6d28',
@@ -171,23 +162,21 @@ class GoogleDriveIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(
-            'http://docs.google.com/file/d/%s' % video_id, video_id)
+        video_info = compat_parse_qs(self._download_webpage(
+            'https://drive.google.com/get_video_info',
+            video_id, query={'docid': video_id}))
 
-        title = self._search_regex(
-            r'"title"\s*,\s*"([^"]+)', webpage, 'title',
-            default=None) or self._og_search_title(webpage)
-        duration = int_or_none(self._search_regex(
-            r'"length_seconds"\s*,\s*"([^"]+)', webpage, 'length seconds',
-            default=None))
+        def get_value(key):
+            return try_get(video_info, lambda x: x[key][0])
+
+        reason = get_value('reason')
+        title = get_value('title')
+        if not title and reason:
+            raise ExtractorError(reason, expected=True)
 
         formats = []
-        fmt_stream_map = self._search_regex(
-            r'"fmt_stream_map"\s*,\s*"([^"]+)', webpage,
-            'fmt stream map', default='').split(',')
-        fmt_list = self._search_regex(
-            r'"fmt_list"\s*,\s*"([^"]+)', webpage,
-            'fmt_list', default='').split(',')
+        fmt_stream_map = (get_value('fmt_stream_map') or '').split(',')
+        fmt_list = (get_value('fmt_list') or '').split(',')
         if fmt_stream_map and fmt_list:
             resolutions = {}
             for fmt in fmt_list:
@@ -257,19 +246,14 @@ class GoogleDriveIE(InfoExtractor):
                         if urlh and urlh.headers.get('Content-Disposition'):
                             add_source_format(urlh)
 
-        if not formats:
-            reason = self._search_regex(
-                r'"reason"\s*,\s*"([^"]+)', webpage, 'reason', default=None)
-            if reason:
-                raise ExtractorError(reason, expected=True)
+        if not formats and reason:
+            raise ExtractorError(reason, expected=True)
 
         self._sort_formats(formats)
 
-        hl = self._search_regex(
-            r'"hl"\s*,\s*"([^"]+)', webpage, 'hl', default=None)
+        hl = get_value('hl')
         subtitles_id = None
-        ttsurl = self._search_regex(
-            r'"ttsurl"\s*,\s*"([^"]+)', webpage, 'ttsurl', default=None)
+        ttsurl = get_value('ttsurl')
         if ttsurl:
             # the video Id for subtitles will be the last value in the ttsurl
             # query string
@@ -279,8 +263,8 @@ class GoogleDriveIE(InfoExtractor):
         return {
             'id': video_id,
             'title': title,
-            'thumbnail': self._og_search_thumbnail(webpage, default=None),
-            'duration': duration,
+            'thumbnail': 'https://drive.google.com/thumbnail?id=' + video_id,
+            'duration': int_or_none(get_value('length_seconds')),
             'formats': formats,
             'subtitles': self.extract_subtitles(video_id, subtitles_id, hl),
             'automatic_captions': self.extract_automatic_captions(

From daa25d414284747980a9ad32e138a2ae388fcd0c Mon Sep 17 00:00:00 2001
From: beefchop <32330393+beefchop@users.noreply.github.com>
Date: Fri, 20 Nov 2020 07:38:09 +1100
Subject: [PATCH 201/340] [viki] fix stream extraction from mpd (#27092)

Co-authored-by: beefchop <beefchop@users.noreply.github.com>
---
 youtube_dl/extractor/viki.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index b0dcdc0e6..48ab7b944 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -296,6 +296,9 @@ class VikiIE(VikiBaseIE):
                         if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
                             f['acodec'] = None
                     formats.extend(m3u8_formats)
+                elif format_id == 'mpd':
+                    formats.extend(self._extract_mpd_formats(
+                        format_url, video_id, 'mpd-%s' % protocol, fatal=False))
                 elif format_url.startswith('rtmp'):
                     mobj = re.search(
                         r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$',

From 59e583f7e8530ca92776c866897d895c072e2a82 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 19 Nov 2020 22:45:46 +0100
Subject: [PATCH 202/340] [viki] improve format extraction

---
 youtube_dl/extractor/viki.py | 142 ++++++++++++++++++++---------------
 1 file changed, 83 insertions(+), 59 deletions(-)

diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index 48ab7b944..a003b7af8 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import base64
 import hashlib
 import hmac
 import itertools
@@ -9,6 +10,10 @@ import re
 import time
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_parse_qs,
+    compat_urllib_parse_urlparse,
+)
 from ..utils import (
     ExtractorError,
     int_or_none,
@@ -165,19 +170,20 @@ class VikiIE(VikiBaseIE):
     }, {
         # episode
         'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
-        'md5': '5fa476a902e902783ac7a4d615cdbc7a',
+        'md5': '94e0e34fd58f169f40c184f232356cfe',
         'info_dict': {
             'id': '44699v',
             'ext': 'mp4',
             'title': 'Boys Over Flowers - Episode 1',
             'description': 'md5:b89cf50038b480b88b5b3c93589a9076',
-            'duration': 4204,
+            'duration': 4172,
             'timestamp': 1270496524,
             'upload_date': '20100405',
             'uploader': 'group8',
             'like_count': int,
             'age_limit': 13,
-        }
+        },
+        'expected_warnings': ['Unknown MIME type image/jpeg in DASH manifest'],
     }, {
         # youtube external
         'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
@@ -194,14 +200,15 @@ class VikiIE(VikiBaseIE):
             'uploader_id': 'ad14065n',
             'like_count': int,
             'age_limit': 13,
-        }
+        },
+        'skip': 'Page not found!',
     }, {
         'url': 'http://www.viki.com/player/44699v',
         'only_matching': True,
     }, {
         # non-English description
         'url': 'http://www.viki.com/videos/158036v-love-in-magic',
-        'md5': '1713ae35df5a521b31f6dc40730e7c9c',
+        'md5': 'adf9e321a0ae5d0aace349efaaff7691',
         'info_dict': {
             'id': '158036v',
             'ext': 'mp4',
@@ -217,8 +224,11 @@ class VikiIE(VikiBaseIE):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        video = self._call_api(
-            'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
+        resp = self._download_json(
+            'https://www.viki.com/api/videos/' + video_id,
+            video_id, 'Downloading video JSON',
+            headers={'x-viki-app-ver': '4.0.57'})
+        video = resp['video']
 
         self._check_errors(video)
 
@@ -265,60 +275,74 @@ class VikiIE(VikiBaseIE):
             'subtitles': subtitles,
         }
 
-        streams = self._call_api(
-            'videos/%s/streams.json' % video_id, video_id,
-            'Downloading video streams JSON')
-
-        if 'external' in streams:
-            result.update({
-                '_type': 'url_transparent',
-                'url': streams['external']['url'],
-            })
-            return result
-
         formats = []
-        for format_id, stream_dict in streams.items():
-            height = int_or_none(self._search_regex(
-                r'^(\d+)[pP]$', format_id, 'height', default=None))
-            for protocol, format_dict in stream_dict.items():
-                # rtmps URLs does not seem to work
-                if protocol == 'rtmps':
-                    continue
-                format_url = format_dict['url']
-                if format_id == 'm3u8':
-                    m3u8_formats = self._extract_m3u8_formats(
-                        format_url, video_id, 'mp4',
-                        entry_protocol='m3u8_native',
-                        m3u8_id='m3u8-%s' % protocol, fatal=False)
-                    # Despite CODECS metadata in m3u8 all video-only formats
-                    # are actually video+audio
-                    for f in m3u8_formats:
-                        if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
-                            f['acodec'] = None
-                    formats.extend(m3u8_formats)
-                elif format_id == 'mpd':
-                    formats.extend(self._extract_mpd_formats(
-                        format_url, video_id, 'mpd-%s' % protocol, fatal=False))
-                elif format_url.startswith('rtmp'):
-                    mobj = re.search(
-                        r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$',
-                        format_url)
-                    if not mobj:
+
+        def add_format(format_id, format_dict, protocol='http'):
+            # rtmps URLs does not seem to work
+            if protocol == 'rtmps':
+                return
+            format_url = format_dict.get('url')
+            if not format_url:
+                return
+            qs = compat_parse_qs(compat_urllib_parse_urlparse(format_url).query)
+            stream = qs.get('stream', [None])[0]
+            if stream:
+                format_url = base64.b64decode(stream).decode()
+            if format_id in ('m3u8', 'hls'):
+                m3u8_formats = self._extract_m3u8_formats(
+                    format_url, video_id, 'mp4',
+                    entry_protocol='m3u8_native',
+                    m3u8_id='m3u8-%s' % protocol, fatal=False)
+                # Despite CODECS metadata in m3u8 all video-only formats
+                # are actually video+audio
+                for f in m3u8_formats:
+                    if '_drm/index_' in f['url']:
                         continue
-                    formats.append({
-                        'format_id': 'rtmp-%s' % format_id,
-                        'ext': 'flv',
-                        'url': mobj.group('url'),
-                        'play_path': mobj.group('playpath'),
-                        'app': mobj.group('app'),
-                        'page_url': url,
-                    })
-                else:
-                    formats.append({
-                        'url': format_url,
-                        'format_id': '%s-%s' % (format_id, protocol),
-                        'height': height,
-                    })
+                    if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
+                        f['acodec'] = None
+                    formats.append(f)
+            elif format_id in ('mpd', 'dash'):
+                formats.extend(self._extract_mpd_formats(
+                    format_url, video_id, 'mpd-%s' % protocol, fatal=False))
+            elif format_url.startswith('rtmp'):
+                mobj = re.search(
+                    r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$',
+                    format_url)
+                if not mobj:
+                    return
+                formats.append({
+                    'format_id': 'rtmp-%s' % format_id,
+                    'ext': 'flv',
+                    'url': mobj.group('url'),
+                    'play_path': mobj.group('playpath'),
+                    'app': mobj.group('app'),
+                    'page_url': url,
+                })
+            else:
+                formats.append({
+                    'url': format_url,
+                    'format_id': '%s-%s' % (format_id, protocol),
+                    'height': int_or_none(self._search_regex(
+                        r'^(\d+)[pP]$', format_id, 'height', default=None)),
+                })
+
+        for format_id, format_dict in (resp.get('streams') or {}).items():
+            add_format(format_id, format_dict)
+        if not formats:
+            streams = self._call_api(
+                'videos/%s/streams.json' % video_id, video_id,
+                'Downloading video streams JSON')
+
+            if 'external' in streams:
+                result.update({
+                    '_type': 'url_transparent',
+                    'url': streams['external']['url'],
+                })
+                return result
+
+            for format_id, stream_dict in streams.items():
+                for protocol, format_dict in stream_dict.items():
+                    add_format(format_id, format_dict, protocol)
         self._sort_formats(formats)
 
         result['formats'] = formats

From dd9e0f58f3482204491007e06a134c69788b1c82 Mon Sep 17 00:00:00 2001
From: Leonardo Taccari <iamleot@gmail.com>
Date: Fri, 20 Nov 2020 10:00:05 +0100
Subject: [PATCH 203/340] [rai] Fix extraction for recent raiplay.it updates
 (#27077)

- Remove first test of RaiPlayIE: it is no longer available
- Make RaiPlayIE extension-agnostic (passing possible `.json' URLs is now
  supported too)
- Adjust RaiPlayLiveIE to recent raiplay.it updates.  Passing it as
  `url_transparent' is no longer supported (there is no longer an accessible
  ContentItem)
- Adjust RaiPlayPlaylistIE to recent raiplay.it updates and instruct it about
  ContentSet-s.
- Update a RaiIE test and remove two tests that are no longer availables

Thanks to @remitamine for the review!
---
 youtube_dl/extractor/rai.py | 126 +++++++++++++++---------------------
 1 file changed, 52 insertions(+), 74 deletions(-)

diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index bee2d53f5..dae7800d2 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -17,7 +17,6 @@ from ..utils import (
     int_or_none,
     parse_duration,
     strip_or_none,
-    unescapeHTML,
     unified_strdate,
     unified_timestamp,
     update_url_query,
@@ -122,27 +121,8 @@ class RaiBaseIE(InfoExtractor):
 
 
 class RaiPlayIE(RaiBaseIE):
-    _VALID_URL = r'(?P<url>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>%s)\.html)' % RaiBaseIE._UUID_RE
+    _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-)(?P<id>%s)\.(?:html|json)' % RaiBaseIE._UUID_RE
     _TESTS = [{
-        'url': 'http://www.raiplay.it/video/2016/10/La-Casa-Bianca-e06118bb-59a9-4636-b914-498e4cfd2c66.html?source=twitter',
-        'md5': '340aa3b7afb54bfd14a8c11786450d76',
-        'info_dict': {
-            'id': 'e06118bb-59a9-4636-b914-498e4cfd2c66',
-            'ext': 'mp4',
-            'title': 'La Casa Bianca',
-            'alt_title': 'S2016 - Puntata del 23/10/2016',
-            'description': 'md5:a09d45890850458077d1f68bb036e0a5',
-            'thumbnail': r're:^https?://.*\.jpg$',
-            'uploader': 'Rai 3',
-            'creator': 'Rai 3',
-            'duration': 3278,
-            'timestamp': 1477764300,
-            'upload_date': '20161029',
-            'series': 'La Casa Bianca',
-            'season': '2016',
-        },
-        'skip': 'This content is not available',
-    }, {
         'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
         'md5': '8970abf8caf8aef4696e7b1f2adfc696',
         'info_dict': {
@@ -166,10 +146,11 @@ class RaiPlayIE(RaiBaseIE):
     }]
 
     def _real_extract(self, url):
-        url, video_id = re.match(self._VALID_URL, url).groups()
+        mobj = re.match(self._VALID_URL, url)
+        base, video_id, = mobj.group('base', 'id')
 
         media = self._download_json(
-            url.replace('.html', '.json'), video_id, 'Downloading video JSON')
+            '%s%s.json' % (base, video_id), video_id, 'Downloading video JSON')
 
         title = media['name']
 
@@ -219,7 +200,7 @@ class RaiPlayIE(RaiBaseIE):
 
 
 class RaiPlayLiveIE(RaiBaseIE):
-    _VALID_URL = r'https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+)'
+    _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+))'
     _TEST = {
         'url': 'http://www.raiplay.it/dirette/rainews24',
         'info_dict': {
@@ -227,7 +208,7 @@ class RaiPlayLiveIE(RaiBaseIE):
             'display_id': 'rainews24',
             'ext': 'mp4',
             'title': 're:^Diretta di Rai News 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
-            'description': 'md5:6eca31500550f9376819f174e5644754',
+            'description': 'md5:4d00bcf6dc98b27c6ec480de329d1497',
             'uploader': 'Rai News 24',
             'creator': 'Rai News 24',
             'is_live': True,
@@ -238,53 +219,75 @@ class RaiPlayLiveIE(RaiBaseIE):
     }
 
     def _real_extract(self, url):
-        display_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        base, display_id, = mobj.group('base', 'id')
 
-        webpage = self._download_webpage(url, display_id)
+        media = self._download_json(
+            '%s.json' % base,
+            display_id, 'Downloading channel JSON')
 
-        video_id = self._search_regex(
-            r'data-uniquename=["\']ContentItem-(%s)' % RaiBaseIE._UUID_RE,
-            webpage, 'content id')
+        title = media['name']
+        video = media['video']
+        video_id = media['id'].replace('ContentItem-', '')
 
-        return {
-            '_type': 'url_transparent',
-            'ie_key': RaiPlayIE.ie_key(),
-            'url': 'http://www.raiplay.it/dirette/ContentItem-%s.html' % video_id,
+        relinker_info = self._extract_relinker_info(video['content_url'], video_id)
+        self._sort_formats(relinker_info['formats'])
+
+        info = {
             'id': video_id,
             'display_id': display_id,
+            'title': self._live_title(title) if relinker_info.get(
+                'is_live') else title,
+            'description': media.get('description'),
+            'uploader': strip_or_none(media.get('channel')),
+            'creator': strip_or_none(media.get('editor')),
         }
 
+        info.update(relinker_info)
+        return info
+
 
 class RaiPlayPlaylistIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+)'
+    _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+))'
     _TESTS = [{
         'url': 'http://www.raiplay.it/programmi/nondirloalmiocapo/',
         'info_dict': {
             'id': 'nondirloalmiocapo',
             'title': 'Non dirlo al mio capo',
-            'description': 'md5:9f3d603b2947c1c7abb098f3b14fac86',
+            'description': 'md5:98ab6b98f7f44c2843fd7d6f045f153b',
         },
         'playlist_mincount': 12,
     }]
 
     def _real_extract(self, url):
-        playlist_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        base, playlist_id, = mobj.group('base', 'id')
 
-        webpage = self._download_webpage(url, playlist_id)
+        media = self._download_json(
+            '%s.json' % base,
+            playlist_id, 'Downloading program JSON')
 
-        title = self._html_search_meta(
-            ('programma', 'nomeProgramma'), webpage, 'title')
-        description = unescapeHTML(self._html_search_meta(
-            ('description', 'og:description'), webpage, 'description'))
+        title = media.get('name')
+        description = None
+        if media.get('program_info') and media['program_info'].get('description'):
+            description = media['program_info']['description']
 
         entries = []
-        for mobj in re.finditer(
-                r'<a\b[^>]+\bhref=(["\'])(?P<path>/raiplay/video/.+?)\1',
-                webpage):
-            video_url = urljoin(url, mobj.group('path'))
-            entries.append(self.url_result(
-                video_url, ie=RaiPlayIE.ie_key(),
-                video_id=RaiPlayIE._match_id(video_url)))
+        for b in media.get('blocks', []):
+            for s in b.get('sets', []):
+                cs = s.get('id')
+                if not cs:
+                    continue
+                medias = self._download_json(
+                    '%s/%s.json' % (base, cs),
+                    cs, 'Downloading content set JSON', fatal=False)
+                if not medias:
+                    continue
+                for m in medias['items']:
+                    video_url = urljoin(url, m['path_id'])
+                    entries.append(self.url_result(
+                        video_url, ie=RaiPlayIE.ie_key(),
+                        video_id=RaiPlayIE._match_id(video_url)))
 
         return self.playlist_result(entries, playlist_id, title, description)
 
@@ -329,19 +332,6 @@ class RaiIE(RaiBaseIE):
             'duration': 2214,
             'upload_date': '20161103',
         }
-    }, {
-        # drawMediaRaiTV(...)
-        'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
-        'md5': '2dd727e61114e1ee9c47f0da6914e178',
-        'info_dict': {
-            'id': '59d69d28-6bb6-409d-a4b5-ed44096560af',
-            'ext': 'mp4',
-            'title': 'Il pacco',
-            'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
-            'thumbnail': r're:^https?://.*\.jpg$',
-            'upload_date': '20141221',
-        },
-        'skip': 'This content is not available',
     }, {
         # initEdizione('ContentItem-...'
         'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined',
@@ -353,18 +343,6 @@ class RaiIE(RaiBaseIE):
             'upload_date': '20170401',
         },
         'skip': 'Changes daily',
-    }, {
-        # HDS live stream with only relinker URL
-        'url': 'http://www.rai.tv/dl/RaiTV/dirette/PublishingBlock-1912dbbf-3f96-44c3-b4cf-523681fbacbc.html?channel=EuroNews',
-        'info_dict': {
-            'id': '1912dbbf-3f96-44c3-b4cf-523681fbacbc',
-            'ext': 'flv',
-            'title': 'EuroNews',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'skip': 'This content is available only in Italy',
     }, {
         # HLS live stream with ContentItem in og:url
         'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html',

From af7bb684c086292e855d4e6f37a724ef27e14bd8 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 20 Nov 2020 10:01:56 +0100
Subject: [PATCH 204/340] [rai] improve extraction

---
 youtube_dl/extractor/rai.py | 82 ++++++++++++-------------------------
 1 file changed, 27 insertions(+), 55 deletions(-)

diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index dae7800d2..b072a0f38 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -16,7 +16,9 @@ from ..utils import (
     GeoRestrictedError,
     int_or_none,
     parse_duration,
+    remove_start,
     strip_or_none,
+    try_get,
     unified_strdate,
     unified_timestamp,
     update_url_query,
@@ -121,7 +123,7 @@ class RaiBaseIE(InfoExtractor):
 
 
 class RaiPlayIE(RaiBaseIE):
-    _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-)(?P<id>%s)\.(?:html|json)' % RaiBaseIE._UUID_RE
+    _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>%s))\.(?:html|json)' % RaiBaseIE._UUID_RE
     _TESTS = [{
         'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
         'md5': '8970abf8caf8aef4696e7b1f2adfc696',
@@ -146,11 +148,10 @@ class RaiPlayIE(RaiBaseIE):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        base, video_id, = mobj.group('base', 'id')
+        base, video_id = re.match(self._VALID_URL, url).groups()
 
         media = self._download_json(
-            '%s%s.json' % (base, video_id), video_id, 'Downloading video JSON')
+            base + '.json', video_id, 'Downloading video JSON')
 
         title = media['name']
 
@@ -177,7 +178,8 @@ class RaiPlayIE(RaiBaseIE):
         season = media.get('season')
 
         info = {
-            'id': video_id,
+            'id': remove_start(media.get('id'), 'ContentItem-') or video_id,
+            'display_id': video_id,
             'title': self._live_title(title) if relinker_info.get(
                 'is_live') else title,
             'alt_title': strip_or_none(media.get('subtitle')),
@@ -199,9 +201,9 @@ class RaiPlayIE(RaiBaseIE):
         return info
 
 
-class RaiPlayLiveIE(RaiBaseIE):
+class RaiPlayLiveIE(RaiPlayIE):
     _VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+))'
-    _TEST = {
+    _TESTS = [{
         'url': 'http://www.raiplay.it/dirette/rainews24',
         'info_dict': {
             'id': 'd784ad40-e0ae-4a69-aa76-37519d238a9c',
@@ -216,35 +218,7 @@ class RaiPlayLiveIE(RaiBaseIE):
         'params': {
             'skip_download': True,
         },
-    }
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        base, display_id, = mobj.group('base', 'id')
-
-        media = self._download_json(
-            '%s.json' % base,
-            display_id, 'Downloading channel JSON')
-
-        title = media['name']
-        video = media['video']
-        video_id = media['id'].replace('ContentItem-', '')
-
-        relinker_info = self._extract_relinker_info(video['content_url'], video_id)
-        self._sort_formats(relinker_info['formats'])
-
-        info = {
-            'id': video_id,
-            'display_id': display_id,
-            'title': self._live_title(title) if relinker_info.get(
-                'is_live') else title,
-            'description': media.get('description'),
-            'uploader': strip_or_none(media.get('channel')),
-            'creator': strip_or_none(media.get('editor')),
-        }
-
-        info.update(relinker_info)
-        return info
+    }]
 
 
 class RaiPlayPlaylistIE(InfoExtractor):
@@ -260,36 +234,34 @@ class RaiPlayPlaylistIE(InfoExtractor):
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        base, playlist_id, = mobj.group('base', 'id')
+        base, playlist_id = re.match(self._VALID_URL, url).groups()
 
-        media = self._download_json(
-            '%s.json' % base,
-            playlist_id, 'Downloading program JSON')
-
-        title = media.get('name')
-        description = None
-        if media.get('program_info') and media['program_info'].get('description'):
-            description = media['program_info']['description']
+        program = self._download_json(
+            base + '.json', playlist_id, 'Downloading program JSON')
 
         entries = []
-        for b in media.get('blocks', []):
-            for s in b.get('sets', []):
-                cs = s.get('id')
-                if not cs:
+        for b in (program.get('blocks') or []):
+            for s in (b.get('sets') or []):
+                s_id = s.get('id')
+                if not s_id:
                     continue
                 medias = self._download_json(
-                    '%s/%s.json' % (base, cs),
-                    cs, 'Downloading content set JSON', fatal=False)
+                    '%s/%s.json' % (base, s_id), s_id,
+                    'Downloading content set JSON', fatal=False)
                 if not medias:
                     continue
-                for m in medias['items']:
-                    video_url = urljoin(url, m['path_id'])
+                for m in (medias.get('items') or []):
+                    path_id = m.get('path_id')
+                    if not path_id:
+                        continue
+                    video_url = urljoin(url, path_id)
                     entries.append(self.url_result(
                         video_url, ie=RaiPlayIE.ie_key(),
                         video_id=RaiPlayIE._match_id(video_url)))
 
-        return self.playlist_result(entries, playlist_id, title, description)
+        return self.playlist_result(
+            entries, playlist_id, program.get('name'),
+            try_get(program, lambda x: x['program_info']['description']))
 
 
 class RaiIE(RaiBaseIE):

From a78e530c14cf7caf9449918285653d8284ef30dc Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 20 Nov 2020 10:10:57 +0100
Subject: [PATCH 205/340] [rai] fix unavailable video format detection

---
 youtube_dl/extractor/rai.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index b072a0f38..06958966f 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -68,7 +68,7 @@ class RaiBaseIE(InfoExtractor):
 
             # This does not imply geo restriction (e.g.
             # http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html)
-            if media_url == 'http://download.rai.it/video_no_available.mp4':
+            if '/video_no_available.mp4' in media_url:
                 continue
 
             ext = determine_ext(media_url)

From 7bc7fbce239da880f7ab67fc5be55ea82df64e20 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 20 Nov 2020 10:26:55 +0100
Subject: [PATCH 206/340] [rai] fix protocol relative relinker URLs(closes
 #22766)

---
 youtube_dl/extractor/rai.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index 06958966f..ecb628f14 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -424,7 +424,7 @@ class RaiIE(RaiBaseIE):
             except ExtractorError:
                 pass
 
-        relinker_url = self._search_regex(
+        relinker_url = self._proto_relative_url(self._search_regex(
             r'''(?x)
                 (?:
                     var\s+videoURL|
@@ -436,7 +436,7 @@ class RaiIE(RaiBaseIE):
                     //mediapolis(?:vod)?\.rai\.it/relinker/relinkerServlet\.htm\?
                     (?:(?!\1).)*\bcont=(?:(?!\1).)+)\1
             ''',
-            webpage, 'relinker URL', group='url')
+            webpage, 'relinker URL', group='url'))
 
         relinker_info = self._extract_relinker_info(
             urljoin(url, relinker_url), video_id)

From 86f2fa1590991fffae7b1daacae9164771312c0b Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 20 Nov 2020 10:47:52 +0100
Subject: [PATCH 207/340] [discoverynetworks] add support new TLC/DMAX
 URLs(closes #27100)

---
 youtube_dl/extractor/discoverynetworks.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/discoverynetworks.py b/youtube_dl/extractor/discoverynetworks.py
index 607a54948..c512b95d0 100644
--- a/youtube_dl/extractor/discoverynetworks.py
+++ b/youtube_dl/extractor/discoverynetworks.py
@@ -7,7 +7,7 @@ from .dplay import DPlayIE
 
 
 class DiscoveryNetworksDeIE(DPlayIE):
-    _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show)/(?P<programme>[^/]+)/video/(?P<alternate_id>[^/]+)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show|sendungen)/(?P<programme>[^/]+)/(?:video/)?(?P<alternate_id>[^/]+)'
 
     _TESTS = [{
         'url': 'https://www.tlc.de/programme/breaking-amish/video/die-welt-da-drauen/DCB331270001100',
@@ -29,6 +29,9 @@ class DiscoveryNetworksDeIE(DPlayIE):
     }, {
         'url': 'https://www.dplay.co.uk/show/ghost-adventures/video/hotel-leger-103620/EHD_280313B',
         'only_matching': True,
+    }, {
+        'url': 'https://tlc.de/sendungen/breaking-amish/die-welt-da-drauen/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From b31b5f4434b52816f3a5a1ae2cbe1d162be0fbd0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 20 Nov 2020 23:21:52 +0700
Subject: [PATCH 208/340] [youtube] Improve yt initial data extraction (closes
 #27093)

---
 youtube_dl/extractor/youtube.py | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 79f87aa85..a85aede8e 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -283,6 +283,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
         },
     }
 
+    _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
+
     def _call_api(self, ep, query, video_id):
         data = self._DEFAULT_API_DATA.copy()
         data.update(query)
@@ -299,8 +301,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
     def _extract_yt_initial_data(self, video_id, webpage):
         return self._parse_json(
             self._search_regex(
-                r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;',
-                webpage, 'yt initial data'),
+                (r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
+                 self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
             video_id)
 
 
@@ -1066,6 +1068,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'skip_download': True,
             },
         },
+        {
+            # with '};' inside yt initial data (see https://github.com/ytdl-org/youtube-dl/issues/27093)
+            'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
+            'info_dict': {
+                'id': 'CHqg6qOn4no',
+                'ext': 'mp4',
+                'title': 'Part 77   Sort a list of simple types in c#',
+                'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
+                'upload_date': '20130831',
+                'uploader_id': 'kudvenkat',
+                'uploader': 'kudvenkat',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        },
     ]
 
     def __init__(self, *args, **kwargs):

From ec99f4710877731da4619617a89cf1dd45a2fc2a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 20 Nov 2020 23:34:46 +0700
Subject: [PATCH 209/340] [youtube:tab] Replace some test URLs with
 RIAA-friendly ones

---
 youtube_dl/extractor/youtube.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index a85aede8e..fd9e54c1f 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2474,13 +2474,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         },
         'playlist_mincount': 138,
     }, {
-        'url': 'https://invidio.us/channel/UC23qupoDRn9YOAVzeoxjOQA',
+        'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
         'only_matching': True,
     }, {
-        'url': 'https://www.youtubekids.com/channel/UCyu8StPfZWapR6rfW_JgqcA',
+        'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
         'only_matching': True,
     }, {
-        'url': 'https://music.youtube.com/channel/UCT-K0qO8z6NzWrywqefBPBQ',
+        'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
         'only_matching': True,
     }, {
         'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
@@ -2527,7 +2527,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         },
         'playlist_mincount': 11,
     }, {
-        'url': 'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
+        'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
         'only_matching': True,
     }, {
         # Playlist URL that does not actually serve a playlist

From ab0eda99e1d1c6cd6aa697f4931c439bec350bd0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 04:00:08 +0700
Subject: [PATCH 210/340] [YoutubeDL] Fix --ignore-errors for playlists with
 generator-based entries of url_transparent (closes #27064)

---
 test/test_YoutubeDL.py  | 70 +++++++++++++++++++++++++++++++++++++++++
 youtube_dl/YoutubeDL.py | 52 +++++++++++++++++-------------
 2 files changed, 101 insertions(+), 21 deletions(-)

diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py
index 1e204e551..62f916d11 100644
--- a/test/test_YoutubeDL.py
+++ b/test/test_YoutubeDL.py
@@ -919,6 +919,76 @@ class TestYoutubeDL(unittest.TestCase):
         self.assertEqual(downloaded['extractor'], 'testex')
         self.assertEqual(downloaded['extractor_key'], 'TestEx')
 
+    # Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
+    def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
+
+        class _YDL(YDL):
+            def __init__(self, *args, **kwargs):
+                super(_YDL, self).__init__(*args, **kwargs)
+
+            def trouble(self, s, tb=None):
+                pass
+
+        ydl = _YDL({
+            'format': 'extra',
+            'ignoreerrors': True,
+        })
+
+        class VideoIE(InfoExtractor):
+            _VALID_URL = r'video:(?P<id>\d+)'
+
+            def _real_extract(self, url):
+                video_id = self._match_id(url)
+                formats = [{
+                    'format_id': 'default',
+                    'url': 'url:',
+                }]
+                if video_id == '0':
+                    raise ExtractorError('foo')
+                if video_id == '2':
+                    formats.append({
+                        'format_id': 'extra',
+                        'url': TEST_URL,
+                    })
+                return {
+                    'id': video_id,
+                    'title': 'Video %s' % video_id,
+                    'formats': formats,
+                }
+
+        class PlaylistIE(InfoExtractor):
+            _VALID_URL = r'playlist:'
+
+            def _entries(self):
+                for n in range(3):
+                    video_id = compat_str(n)
+                    yield {
+                        '_type': 'url_transparent',
+                        'ie_key': VideoIE.ie_key(),
+                        'id': video_id,
+                        'url': 'video:%s' % video_id,
+                        'title': 'Video Transparent %s' % video_id,
+                    }
+
+            def _real_extract(self, url):
+                return self.playlist_result(self._entries())
+
+        ydl.add_info_extractor(VideoIE(ydl))
+        ydl.add_info_extractor(PlaylistIE(ydl))
+        info = ydl.extract_info('playlist:')
+        entries = info['entries']
+        self.assertEqual(len(entries), 3)
+        self.assertTrue(entries[0] is None)
+        self.assertTrue(entries[1] is None)
+        self.assertEqual(len(ydl.downloaded_info_dicts), 1)
+        downloaded = ydl.downloaded_info_dicts[0]
+        self.assertEqual(entries[2], downloaded)
+        self.assertEqual(downloaded['url'], TEST_URL)
+        self.assertEqual(downloaded['title'], 'Video Transparent 2')
+        self.assertEqual(downloaded['id'], '2')
+        self.assertEqual(downloaded['extractor'], 'Video')
+        self.assertEqual(downloaded['extractor_key'], 'Video')
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 19370f62b..855a73157 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -793,21 +793,14 @@ class YoutubeDL(object):
                 self.report_warning('The program functionality for this site has been marked as broken, '
                                     'and will probably not work.')
 
+            return self.__extract_info(url, ie, download, extra_info, process)
+        else:
+            self.report_error('no suitable InfoExtractor for URL %s' % url)
+
+    def __handle_extraction_exceptions(func):
+        def wrapper(self, *args, **kwargs):
             try:
-                ie_result = ie.extract(url)
-                if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
-                    break
-                if isinstance(ie_result, list):
-                    # Backwards compatibility: old IE result format
-                    ie_result = {
-                        '_type': 'compat_list',
-                        'entries': ie_result,
-                    }
-                self.add_default_extra_info(ie_result, ie, url)
-                if process:
-                    return self.process_ie_result(ie_result, download, extra_info)
-                else:
-                    return ie_result
+                return func(self, *args, **kwargs)
             except GeoRestrictedError as e:
                 msg = e.msg
                 if e.countries:
@@ -815,20 +808,33 @@ class YoutubeDL(object):
                         map(ISO3166Utils.short2full, e.countries))
                 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
                 self.report_error(msg)
-                break
             except ExtractorError as e:  # An error we somewhat expected
                 self.report_error(compat_str(e), e.format_traceback())
-                break
             except MaxDownloadsReached:
                 raise
             except Exception as e:
                 if self.params.get('ignoreerrors', False):
                     self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
-                    break
                 else:
                     raise
+        return wrapper
+
+    @__handle_extraction_exceptions
+    def __extract_info(self, url, ie, download, extra_info, process):
+        ie_result = ie.extract(url)
+        if ie_result is None:  # Finished already (backwards compatibility; listformats and friends should be moved here)
+            return
+        if isinstance(ie_result, list):
+            # Backwards compatibility: old IE result format
+            ie_result = {
+                '_type': 'compat_list',
+                'entries': ie_result,
+            }
+        self.add_default_extra_info(ie_result, ie, url)
+        if process:
+            return self.process_ie_result(ie_result, download, extra_info)
         else:
-            self.report_error('no suitable InfoExtractor for URL %s' % url)
+            return ie_result
 
     def add_default_extra_info(self, ie_result, ie, url):
         self.add_extra_info(ie_result, {
@@ -1003,9 +1009,8 @@ class YoutubeDL(object):
                     self.to_screen('[download] ' + reason)
                     continue
 
-                entry_result = self.process_ie_result(entry,
-                                                      download=download,
-                                                      extra_info=extra)
+                entry_result = self.__process_iterable_entry(entry, download, extra)
+                # TODO: skip failed (empty) entries?
                 playlist_results.append(entry_result)
             ie_result['entries'] = playlist_results
             self.to_screen('[download] Finished downloading playlist: %s' % playlist)
@@ -1034,6 +1039,11 @@ class YoutubeDL(object):
         else:
             raise Exception('Invalid result type: %s' % result_type)
 
+    @__handle_extraction_exceptions
+    def __process_iterable_entry(self, entry, download, extra_info):
+        return self.process_ie_result(
+            entry, download=download, extra_info=extra_info)
+
     def _build_format_filter(self, filter_spec):
         " Returns a function to filter the formats according to the filter_spec "
 

From a7e0531999fa3a7fef542942aaad8c55e22adec5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 04:22:21 +0700
Subject: [PATCH 211/340] [downloader/http] Fix crash during urlopen caused by
 missing reason of URLError

---
 youtube_dl/downloader/http.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 96379caf1..d8ac41dcc 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -109,7 +109,9 @@ class HttpFD(FileDownloader):
                 try:
                     ctx.data = self.ydl.urlopen(request)
                 except (compat_urllib_error.URLError, ) as err:
-                    if isinstance(err.reason, socket.timeout):
+                    # reason may not be available, e.g. for urllib2.HTTPError on python 2.6
+                    reason = getattr(err, 'reason', None)
+                    if isinstance(reason, socket.timeout):
                         raise RetryDownload(err)
                     raise err
                 # When trying to resume, Content-Range HTTP header of response has to be checked

From 51ca93d75162f554efd8d4cb5ac97864a38f7bee Mon Sep 17 00:00:00 2001
From: renalid <renalid@gmail.com>
Date: Fri, 20 Nov 2020 22:44:08 +0100
Subject: [PATCH 212/340] [francetv] Update to fix thumbnail URL issue (#27120)

Fix the thumbnail URL. The issue was here for many years, never fixed. It's done ! :-)

Example : https://www.france.tv/france-2/de-gaulle-l-eclat-et-le-secret/de-gaulle-l-eclat-et-le-secret-saison-1/2035247-solitude.html

failed thumbnail url generated : http://pluzz.francetv.fr/staticftv/ref_emissions/2020-11-02/EMI_1104da66f533cc7dc5d0d07a181a18c2e2fe1d81_20201014122553940.jpg

right thumbnail url fixed : https://sivideo.webservices.francetelevisions.fr/staticftv/ref_emissions/2020-11-02/EMI_1104da66f533cc7dc5d0d07a181a18c2e2fe1d81_20201014122553940.jpg
---
 youtube_dl/extractor/francetv.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index 92f0851e3..3ca415077 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -211,7 +211,7 @@ class FranceTVIE(InfoExtractor):
             'id': video_id,
             'title': self._live_title(title) if is_live else title,
             'description': clean_html(info.get('synopsis')),
-            'thumbnail': urljoin('http://pluzz.francetv.fr', info.get('image')),
+            'thumbnail': urljoin('https://sivideo.webservices.francetelevisions.fr', info.get('image')),
             'duration': int_or_none(info.get('real_duration')) or parse_duration(info.get('duree')),
             'timestamp': int_or_none(try_get(info, lambda x: x['diffusion']['timestamp'])),
             'is_live': is_live,

From a1c88c4819233cf5f3734bcd6cf251d4339196ce Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 20 Nov 2020 23:23:55 +0100
Subject: [PATCH 213/340] [infoq] fix format extraction(closes #25984)

---
 youtube_dl/extractor/infoq.py | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py
index 18249cf9b..0a70a1fb4 100644
--- a/youtube_dl/extractor/infoq.py
+++ b/youtube_dl/extractor/infoq.py
@@ -54,7 +54,7 @@ class InfoQIE(BokeCCBaseIE):
 
     def _extract_rtmp_video(self, webpage):
         # The server URL is hardcoded
-        video_url = 'rtmpe://video.infoq.com/cfx/st/'
+        video_url = 'rtmpe://videof.infoq.com/cfx/st/'
 
         # Extract video URL
         encoded_id = self._search_regex(
@@ -86,17 +86,18 @@ class InfoQIE(BokeCCBaseIE):
         return [{
             'format_id': 'http_video',
             'url': http_video_url,
+            'http_headers': {'Referer': 'https://www.infoq.com/'},
         }]
 
     def _extract_http_audio(self, webpage, video_id):
-        fields = self._hidden_inputs(webpage)
+        fields = self._form_hidden_inputs('mp3Form', webpage)
         http_audio_url = fields.get('filename')
         if not http_audio_url:
             return []
 
         # base URL is found in the Location header in the response returned by
         # GET https://www.infoq.com/mp3download.action?filename=... when logged in.
-        http_audio_url = compat_urlparse.urljoin('http://res.infoq.com/downloads/mp3downloads/', http_audio_url)
+        http_audio_url = compat_urlparse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url)
         http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage))
 
         # audio file seem to be missing some times even if there is a download link

From e2096776b99e4b5b67aacd1bcc7807a3d3757236 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 20:48:13 +0700
Subject: [PATCH 214/340] [youtube:tab] Add support for current video and fix
 lives extraction (closes #27126)

---
 youtube_dl/extractor/extractors.py |   1 -
 youtube_dl/extractor/youtube.py    | 114 ++++++++++++++---------------
 2 files changed, 55 insertions(+), 60 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 183050e07..5691c4cba 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1475,7 +1475,6 @@ from .yourupload import YourUploadIE
 from .youtube import (
     YoutubeIE,
     YoutubeHistoryIE,
-    YoutubeLiveIE,
     YoutubeTabIE,
     YoutubePlaylistIE,
     YoutubeRecommendedIE,
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index fd9e54c1f..484f79765 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2559,13 +2559,57 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
     }, {
         'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
         'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
+        'info_dict': {
+            'id': '9Auq9mYxFEE',
+            'ext': 'mp4',
+            'title': 'Watch Sky News live',
+            'uploader': 'Sky News',
+            'uploader_id': 'skynews',
+            'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
+            'upload_date': '20191102',
+            'description': 'md5:78de4e1c2359d0ea3ed829678e38b662',
+            'categories': ['News & Politics'],
+            'tags': list,
+            'like_count': int,
+            'dislike_count': int,
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://www.youtube.com/user/TheYoungTurks/live',
+        'info_dict': {
+            'id': 'a48o2S1cPoo',
+            'ext': 'mp4',
+            'title': 'The Young Turks - Live Main Show',
+            'uploader': 'The Young Turks',
+            'uploader_id': 'TheYoungTurks',
+            'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
+            'upload_date': '20150715',
+            'license': 'Standard YouTube License',
+            'description': 'md5:438179573adcdff3c97ebb1ee632b891',
+            'categories': ['News & Politics'],
+            'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
+            'like_count': int,
+            'dislike_count': int,
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/TheYoungTurks/live',
+        'only_matching': True,
     }]
 
-    @classmethod
-    def suitable(cls, url):
-        return False if YoutubeLiveIE.suitable(url) else super(
-            YoutubeTabIE, cls).suitable(url)
-
     def _extract_channel_id(self, webpage):
         channel_id = self._html_search_meta(
             'channelId', webpage, 'channel id', default=None)
@@ -2951,7 +2995,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
         webpage = self._download_webpage(url, item_id)
         identity_token = self._search_regex(
-            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+            r'\bID_TOKEN["\']\s*:\s/l*["\'](.+?)["\']', webpage,
             'identity token', default=None)
         data = self._extract_yt_initial_data(item_id, webpage)
         tabs = try_get(
@@ -2962,7 +3006,11 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
         if playlist:
             return self._extract_from_playlist(item_id, data, playlist)
-        # Fallback to video extraction if no playlist alike page is recognized
+        # Fallback to video extraction if no playlist alike page is recognized.
+        # First check for the current video then try the v attribute of URL query.
+        video_id = try_get(
+            data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
+            compat_str) or video_id
         if video_id:
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
         # Failed to recognize
@@ -3083,58 +3131,6 @@ class YoutubeYtUserIE(InfoExtractor):
             ie=YoutubeTabIE.ie_key(), video_id=user_id)
 
 
-class YoutubeLiveIE(YoutubeBaseInfoExtractor):
-    IE_DESC = 'YouTube.com live streams'
-    _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:(?:user|channel|c)/)?(?P<id>[^/]+))/live'
-    IE_NAME = 'youtube:live'
-
-    _TESTS = [{
-        'url': 'https://www.youtube.com/user/TheYoungTurks/live',
-        'info_dict': {
-            'id': 'a48o2S1cPoo',
-            'ext': 'mp4',
-            'title': 'The Young Turks - Live Main Show',
-            'uploader': 'The Young Turks',
-            'uploader_id': 'TheYoungTurks',
-            'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
-            'upload_date': '20150715',
-            'license': 'Standard YouTube License',
-            'description': 'md5:438179573adcdff3c97ebb1ee632b891',
-            'categories': ['News & Politics'],
-            'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
-            'like_count': int,
-            'dislike_count': int,
-        },
-        'params': {
-            'skip_download': True,
-        },
-    }, {
-        'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
-        'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
-        'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/TheYoungTurks/live',
-        'only_matching': True,
-    }]
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        channel_id = mobj.group('id')
-        base_url = mobj.group('base_url')
-        webpage = self._download_webpage(url, channel_id, fatal=False)
-        if webpage:
-            page_type = self._og_search_property(
-                'type', webpage, 'page type', default='')
-            video_id = self._html_search_meta(
-                'videoId', webpage, 'video id', default=None)
-            if page_type.startswith('video') and video_id and re.match(
-                    r'^[0-9A-Za-z_-]{11}$', video_id):
-                return self.url_result(video_id, YoutubeIE.ie_key())
-        return self.url_result(base_url)
-
-
 class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com searches'
     # there doesn't appear to be a real limit, for example if you search for

From 46a265a2da26c663463244ecf9a4a699c2cd6efc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 20:49:56 +0700
Subject: [PATCH 215/340] [youtube] Fix like and dislike count extraction
 (closes #25977)

---
 youtube_dl/extractor/youtube.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 484f79765..fb4c31326 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2196,8 +2196,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
         def _extract_count(count_name):
             return str_to_int(self._search_regex(
-                r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
-                % re.escape(count_name),
+                (r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>' % re.escape(count_name),
+                 r'["\']label["\']\s*:\s*["\']([\d,.]+)\s+%ss["\']' % re.escape(count_name)),
                 video_webpage, count_name, default=None))
 
         like_count = _extract_count('like')

From 21292c0649e956afc46bd39d774ec811d568de2a Mon Sep 17 00:00:00 2001
From: Daniel Peukert <daniel@peukert.cc>
Date: Sat, 21 Nov 2020 15:52:20 +0100
Subject: [PATCH 216/340] [youtube] Fix error reason extraction (#27081)

---
 youtube_dl/extractor/youtube.py | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index fb4c31326..fb6d816cc 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2023,6 +2023,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                     formats.append(a_format)
             else:
                 error_message = extract_unavailable_message()
+                if not error_message:
+                    reason_list = try_get(
+                        player_response,
+                        lambda x: x['playabilityStatus']['errorScreen']['playerErrorMessageRenderer']['subreason']['runs'],
+                        list) or []
+                    for reason in reason_list:
+                        if not isinstance(reason, dict):
+                            continue
+                        reason_text = try_get(reason, lambda x: x['text'], compat_str)
+                        if reason_text:
+                            if not error_message:
+                                error_message = ''
+                            error_message += reason_text
+                    if error_message:
+                        error_message = clean_html(error_message)
                 if not error_message:
                     error_message = clean_html(try_get(
                         player_response, lambda x: x['playabilityStatus']['reason'],

From 71ddc222adca21f471051e922d2c2f08a27b2d44 Mon Sep 17 00:00:00 2001
From: Josh Soref <jsoref@users.noreply.github.com>
Date: Sat, 21 Nov 2020 10:00:05 -0500
Subject: [PATCH 217/340] Fix typos (#27084)

* spelling: authorization

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: brightcove

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: creation

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: exceeded

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: exception

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: extension

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: extracting

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: extraction

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: frontline

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: improve

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: length

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: listsubtitles

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: multimedia

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: obfuscated

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: partitioning

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: playlist

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: playlists

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: restriction

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: services

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: split

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: srmediathek

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: support

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: thumbnail

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: verification

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>

* spelling: whitespaces

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
---
 ChangeLog                           | 34 ++++++++++++++---------------
 test/parameters.json                |  2 +-
 youtube_dl/compat.py                |  2 +-
 youtube_dl/extractor/brightcove.py  |  8 +++----
 youtube_dl/extractor/common.py      |  2 +-
 youtube_dl/extractor/europa.py      |  4 ++--
 youtube_dl/extractor/generic.py     |  2 +-
 youtube_dl/extractor/kusi.py        |  4 ++--
 youtube_dl/extractor/npr.py         |  2 +-
 youtube_dl/extractor/pbs.py         |  2 +-
 youtube_dl/extractor/soundcloud.py  |  2 +-
 youtube_dl/extractor/tagesschau.py  |  2 +-
 youtube_dl/extractor/theplatform.py |  2 +-
 youtube_dl/extractor/turner.py      |  6 ++---
 youtube_dl/extractor/vimeo.py       |  4 ++--
 youtube_dl/extractor/xiami.py       |  8 +++----
 youtube_dl/utils.py                 | 16 +++++++-------
 17 files changed, 51 insertions(+), 51 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 572e0360d..db5dd488a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -367,7 +367,7 @@ Extractors
     + Add support for more domains
 * [svt] Fix series extraction (#22297)
 * [svt] Fix article extraction (#22897, #22919)
-* [soundcloud] Imporve private playlist/set tracks extraction (#3707)
+* [soundcloud] Improve private playlist/set tracks extraction (#3707)
 
 
 version 2020.01.24
@@ -493,7 +493,7 @@ Extractors
 * [abcotvs] Relax URL regular expression and improve metadata extraction
   (#18014)
 * [channel9] Reduce response size
-* [adobetv] Improve extaction
+* [adobetv] Improve extraction
     * Use OnDemandPagedList for list extractors
     * Reduce show extraction requests
     * Extract original video format and subtitles
@@ -518,7 +518,7 @@ Extractors
 * [dailymotion] Improve extraction
     * Extract http formats included in m3u8 manifest
     * Fix user extraction (#3553, #21415)
-    + Add suport for User Authentication (#11491)
+    + Add support for User Authentication (#11491)
     * Fix password protected videos extraction (#23176)
     * Respect age limit option and family filter cookie value (#18437)
     * Handle video url playlist query param
@@ -603,7 +603,7 @@ Extractors
 - [go90] Remove extractor
 * [kakao] Remove raw request
 + [kakao] Extract format total bitrate
-* [daum] Fix VOD and Clip extracton (#15015)
+* [daum] Fix VOD and Clip extraction (#15015)
 * [kakao] Improve extraction
     + Add support for embed URLs
     + Add support for Kakao Legacy vid based embed URLs
@@ -647,7 +647,7 @@ Extractors
     * Improve format extraction (#22123)
     + Extract uploader_id and uploader_url (#21916)
     + Extract all known thumbnails (#19071, #20659)
-    * Fix extration for private playlists (#20976)
+    * Fix extraction for private playlists (#20976)
     + Add support for playlist embeds (#20976)
     * Skip preview formats (#22806)
 * [dplay] Improve extraction
@@ -1122,7 +1122,7 @@ Extractors
 * [hbo] Fix extraction and extract subtitles (#14629, #13709)
 * [youtube] Extract srv[1-3] subtitle formats (#20566)
 * [adultswim] Fix extraction (#18025)
-* [teamcoco] Fix extraction and add suport for subdomains (#17099, #20339)
+* [teamcoco] Fix extraction and add support for subdomains (#17099, #20339)
 * [adn] Fix subtitle compatibility with ffmpeg
 * [adn] Fix extraction and add support for positioning styles (#20549)
 * [vk] Use unique video id (#17848)
@@ -1534,7 +1534,7 @@ version 2018.11.18
 
 Extractors
 + [wwe] Extract subtitles
-+ [wwe] Add support for playlistst (#14781)
++ [wwe] Add support for playlists (#14781)
 + [wwe] Add support for wwe.com (#14781, #17450)
 * [vk] Detect geo restriction (#17767)
 * [openload] Use original host during extraction (#18211)
@@ -2567,7 +2567,7 @@ Extractors
 * [youku] Update ccode (#14872)
 * [mnet] Fix format extraction (#14883)
 + [xiami] Add Referer header to API request
-* [mtv] Correct scc extention in extracted subtitles (#13730)
+* [mtv] Correct scc extension in extracted subtitles (#13730)
 * [vvvvid] Fix extraction for kenc videos (#13406)
 + [br] Add support for BR Mediathek videos (#14560, #14788)
 + [daisuki] Add support for motto.daisuki.com (#14681)
@@ -2588,7 +2588,7 @@ Extractors
 * [nexx] Extract more formats
 + [openload] Add support for openload.link (#14763)
 * [empflix] Relax URL regular expression
-* [empflix] Fix extractrion
+* [empflix] Fix extraction
 * [tnaflix] Don't modify download URLs (#14811)
 - [gamersyde] Remove extractor
 * [francetv:generationwhat] Fix extraction
@@ -2783,7 +2783,7 @@ Extractors
 * [yahoo] Bypass geo restriction for brightcove (#14210)
 * [yahoo] Use extracted brightcove account id (#14210)
 * [rtve:alacarta] Fix extraction (#14290)
-+ [yahoo] Add support for custom brigthcove embeds (#14210)
++ [yahoo] Add support for custom brightcove embeds (#14210)
 + [generic] Add support for Video.js embeds
 + [gfycat] Add support for /gifs/detail URLs (#14322)
 * [generic] Fix infinite recursion for twitter:player URLs (#14339)
@@ -3028,7 +3028,7 @@ Extractors
 * [amcnetworks] Make rating optional (#12453)
 * [cloudy] Fix extraction (#13737)
 + [nickru] Add support for nickelodeon.ru
-* [mtv] Improve thumbnal extraction
+* [mtv] Improve thumbnail extraction
 * [nick] Automate geo-restriction bypass (#13711)
 * [niconico] Improve error reporting (#13696)
 
@@ -3392,7 +3392,7 @@ Extractors
 + [cda] Support birthday verification (#12789)
 * [leeco] Fix extraction (#12974)
 + [pbs] Extract chapters
-* [amp] Imporove thumbnail and subtitles extraction
+* [amp] Improve thumbnail and subtitles extraction
 * [foxsports] Fix extraction (#12945)
 - [coub] Remove comment count extraction (#12941)
 
@@ -3562,7 +3562,7 @@ Extractors
 + [rbmaradio] Add support for redbullradio.com URLs (#12687)
 + [npo:live] Add support for default URL (#12555)
 * [mixcloud:playlist] Fix title, description and view count extraction (#12582)
-+ [thesun] Add suport for thesun.co.uk (#11298, #12674)
++ [thesun] Add support for thesun.co.uk (#11298, #12674)
 + [ceskateleveize:porady] Add support for porady (#7411, #12645)
 * [ceskateleveize] Improve extraction and remove URL replacement hacks
 + [kaltura] Add support for iframe embeds (#12679)
@@ -3601,7 +3601,7 @@ Extractors
 * [funimation] Fix extraction (#10696, #11773)
 + [xfileshare] Add support for vidabc.com (#12589)
 + [xfileshare] Improve extraction and extract hls formats
-+ [crunchyroll] Pass geo verifcation proxy
++ [crunchyroll] Pass geo verification proxy
 + [cwtv] Extract ISM formats
 + [tvplay] Bypass geo restriction
 + [vrv] Add support for vrv.co
@@ -3665,7 +3665,7 @@ Extractors
 + [bostonglobe] Add extractor for bostonglobe.com (#12099)
 + [toongoggles] Add support for toongoggles.com (#12171)
 + [medialaan] Add support for Medialaan sites (#9974, #11912)
-+ [discoverynetworks] Add support for more domains and bypass geo restiction
++ [discoverynetworks] Add support for more domains and bypass geo restriction
 * [openload] Fix extraction (#10408)
 
 
@@ -5255,7 +5255,7 @@ version 2016.07.09.1
 Fixed/improved extractors
 - youtube
 - ard
-- srmediatek (#9373)
+- srmediathek (#9373)
 
 
 version 2016.07.09
@@ -5319,7 +5319,7 @@ Fixed/improved extractors
 - kaltura (#5557)
 - la7
 - Changed features
-- Rename --cn-verfication-proxy to --geo-verification-proxy
+- Rename --cn-verification-proxy to --geo-verification-proxy
 Miscellaneous
 - Add script for displaying downloads statistics
 
diff --git a/test/parameters.json b/test/parameters.json
index 7bf59c25f..65fd54428 100644
--- a/test/parameters.json
+++ b/test/parameters.json
@@ -37,7 +37,7 @@
     "writeinfojson": true, 
     "writesubtitles": false,
     "allsubtitles": false,
-    "listssubtitles": false,
+    "listsubtitles": false,
     "socket_timeout": 20,
     "fixup": "never"
 }
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 0ee9bc760..6c3d49d45 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -2345,7 +2345,7 @@ except ImportError:  # Python <3.4
 
         # HTMLParseError has been deprecated in Python 3.3 and removed in
         # Python 3.5. Introducing dummy exception for Python >3.5 for compatible
-        # and uniform cross-version exceptiong handling
+        # and uniform cross-version exception handling
         class compat_HTMLParseError(Exception):
             pass
 
diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 2aa9f4782..000eac71c 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -147,7 +147,7 @@ class BrightcoveLegacyIE(InfoExtractor):
     ]
 
     @classmethod
-    def _build_brighcove_url(cls, object_str):
+    def _build_brightcove_url(cls, object_str):
         """
         Build a Brightcove url from a xml string containing
         <object class="BrightcoveExperience">{params}</object>
@@ -217,7 +217,7 @@ class BrightcoveLegacyIE(InfoExtractor):
         return cls._make_brightcove_url(params)
 
     @classmethod
-    def _build_brighcove_url_from_js(cls, object_js):
+    def _build_brightcove_url_from_js(cls, object_js):
         # The layout of JS is as follows:
         # customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
         #   // build Brightcove <object /> XML
@@ -272,12 +272,12 @@ class BrightcoveLegacyIE(InfoExtractor):
             ).+?>\s*</object>''',
             webpage)
         if matches:
-            return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
+            return list(filter(None, [cls._build_brightcove_url(m) for m in matches]))
 
         matches = re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)
         if matches:
             return list(filter(None, [
-                cls._build_brighcove_url_from_js(custom_bc)
+                cls._build_brightcove_url_from_js(custom_bc)
                 for custom_bc in matches]))
         return [src for _, src in re.findall(
             r'<iframe[^>]+src=([\'"])((?:https?:)?//link\.brightcove\.com/services/player/(?!\1).+)\1', webpage)]
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 0c9089674..1a08c7616 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1664,7 +1664,7 @@ class InfoExtractor(object):
         # just the media without qualities renditions.
         # Fortunately, master playlist can be easily distinguished from media
         # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
-        # master playlist tags MUST NOT appear in a media playist and vice versa.
+        # master playlist tags MUST NOT appear in a media playlist and vice versa.
         # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
         # media playlist and MUST NOT appear in master playlist thus we can
         # clearly detect media playlist with this criterion.
diff --git a/youtube_dl/extractor/europa.py b/youtube_dl/extractor/europa.py
index 1efc0b2ec..2c1c747a1 100644
--- a/youtube_dl/extractor/europa.py
+++ b/youtube_dl/extractor/europa.py
@@ -60,7 +60,7 @@ class EuropaIE(InfoExtractor):
 
         title = get_item('title', preferred_langs) or video_id
         description = get_item('description', preferred_langs)
-        thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
+        thumbnail = xpath_text(playlist, './info/thumburl', 'thumbnail')
         upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
         duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
         view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
@@ -85,7 +85,7 @@ class EuropaIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'description': description,
-            'thumbnail': thumbnmail,
+            'thumbnail': thumbnail,
             'upload_date': upload_date,
             'duration': duration,
             'view_count': view_count,
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index d08a8cca5..f10f11244 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -842,7 +842,7 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             }
         },
-        # MTVSercices embed
+        # MTVServices embed
         {
             'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
             'md5': 'ca1aef97695ef2c1d6973256a57e5252',
diff --git a/youtube_dl/extractor/kusi.py b/youtube_dl/extractor/kusi.py
index 6a7e3baa7..9833d35eb 100644
--- a/youtube_dl/extractor/kusi.py
+++ b/youtube_dl/extractor/kusi.py
@@ -64,7 +64,7 @@ class KUSIIE(InfoExtractor):
         duration = float_or_none(xpath_text(doc, 'DURATION'), scale=1000)
         description = xpath_text(doc, 'ABSTRACT')
         thumbnail = xpath_text(doc, './THUMBNAILIMAGE/FILENAME')
-        createtion_time = timeconvert(xpath_text(doc, 'rfc822creationdate'))
+        creation_time = timeconvert(xpath_text(doc, 'rfc822creationdate'))
 
         quality_options = doc.find('{http://search.yahoo.com/mrss/}group').findall('{http://search.yahoo.com/mrss/}content')
         formats = []
@@ -84,5 +84,5 @@ class KUSIIE(InfoExtractor):
             'duration': duration,
             'formats': formats,
             'thumbnail': thumbnail,
-            'timestamp': createtion_time,
+            'timestamp': creation_time,
         }
diff --git a/youtube_dl/extractor/npr.py b/youtube_dl/extractor/npr.py
index 53acc6e57..9d1122f0c 100644
--- a/youtube_dl/extractor/npr.py
+++ b/youtube_dl/extractor/npr.py
@@ -33,7 +33,7 @@ class NprIE(InfoExtractor):
             },
         }],
     }, {
-        # mutlimedia, not media title
+        # multimedia, not media title
         'url': 'https://www.npr.org/2017/06/19/533198237/tigers-jaw-tiny-desk-concert',
         'info_dict': {
             'id': '533198237',
diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py
index 4dbe661be..d4baa16ee 100644
--- a/youtube_dl/extractor/pbs.py
+++ b/youtube_dl/extractor/pbs.py
@@ -477,7 +477,7 @@ class PBSIE(InfoExtractor):
             if media_id:
                 return media_id, presumptive_id, upload_date, description
 
-            # Fronline video embedded via flp
+            # Frontline video embedded via flp
             video_id = self._search_regex(
                 r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
             if video_id:
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index a2fddf6d9..abb85e1e5 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -558,7 +558,7 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
 
 class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
     def _extract_playlist(self, base_url, playlist_id, playlist_title):
-        # Per the SoundCloud documentation, the maximum limit for a linked partioning query is 200.
+        # Per the SoundCloud documentation, the maximum limit for a linked partitioning query is 200.
         # https://developers.soundcloud.com/blog/offset-pagination-deprecated
         COMMON_QUERY = {
             'limit': 200,
diff --git a/youtube_dl/extractor/tagesschau.py b/youtube_dl/extractor/tagesschau.py
index c351b7545..8ceab7e35 100644
--- a/youtube_dl/extractor/tagesschau.py
+++ b/youtube_dl/extractor/tagesschau.py
@@ -86,7 +86,7 @@ class TagesschauPlayerIE(InfoExtractor):
         #     return self._extract_via_api(kind, video_id)
 
         # JSON api does not provide some audio formats (e.g. ogg) thus
-        # extractiong audio via webpage
+        # extracting audio via webpage
 
         webpage = self._download_webpage(url, video_id)
 
diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py
index 07055513a..41bfbe80f 100644
--- a/youtube_dl/extractor/theplatform.py
+++ b/youtube_dl/extractor/theplatform.py
@@ -208,7 +208,7 @@ class ThePlatformIE(ThePlatformBaseIE, AdobePassIE):
         if m:
             return [m.group('url')]
 
-        # Are whitesapces ignored in URLs?
+        # Are whitespaces ignored in URLs?
         # https://github.com/ytdl-org/youtube-dl/issues/12044
         matches = re.findall(
             r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage)
diff --git a/youtube_dl/extractor/turner.py b/youtube_dl/extractor/turner.py
index 4a6cbfbb8..2964504a2 100644
--- a/youtube_dl/extractor/turner.py
+++ b/youtube_dl/extractor/turner.py
@@ -56,9 +56,9 @@ class TurnerBaseIE(AdobePassIE):
         content_id = xpath_text(video_data, 'contentId') or video_id
         # rtmp_src = xpath_text(video_data, 'akamai/src')
         # if rtmp_src:
-        #     splited_rtmp_src = rtmp_src.split(',')
-        #     if len(splited_rtmp_src) == 2:
-        #         rtmp_src = splited_rtmp_src[1]
+        #     split_rtmp_src = rtmp_src.split(',')
+        #     if len(split_rtmp_src) == 2:
+        #         rtmp_src = split_rtmp_src[1]
         # aifp = xpath_text(video_data, 'akamai/aifp', default='')
 
         urls = []
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index cfd04d50c..4c55946f1 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -922,7 +922,7 @@ class VimeoAlbumIE(VimeoBaseInfoExtractor):
     }]
     _PAGE_SIZE = 100
 
-    def _fetch_page(self, album_id, authorizaion, hashed_pass, page):
+    def _fetch_page(self, album_id, authorization, hashed_pass, page):
         api_page = page + 1
         query = {
             'fields': 'link,uri',
@@ -934,7 +934,7 @@ class VimeoAlbumIE(VimeoBaseInfoExtractor):
         videos = self._download_json(
             'https://api.vimeo.com/albums/%s/videos' % album_id,
             album_id, 'Downloading page %d' % api_page, query=query, headers={
-                'Authorization': 'jwt ' + authorizaion,
+                'Authorization': 'jwt ' + authorization,
             })['data']
         for video in videos:
             link = video.get('link')
diff --git a/youtube_dl/extractor/xiami.py b/youtube_dl/extractor/xiami.py
index 618da8382..769aab331 100644
--- a/youtube_dl/extractor/xiami.py
+++ b/youtube_dl/extractor/xiami.py
@@ -54,17 +54,17 @@ class XiamiBaseIE(InfoExtractor):
     def _decrypt(origin):
         n = int(origin[0])
         origin = origin[1:]
-        short_lenth = len(origin) // n
-        long_num = len(origin) - short_lenth * n
+        short_length = len(origin) // n
+        long_num = len(origin) - short_length * n
         l = tuple()
         for i in range(0, n):
-            length = short_lenth
+            length = short_length
             if i < long_num:
                 length += 1
             l += (origin[0:length], )
             origin = origin[length:]
         ans = ''
-        for i in range(0, short_lenth + 1):
+        for i in range(0, short_length + 1):
             for j in range(0, n):
                 if len(l[j]) > i:
                     ans += l[j][i]
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 321f903ab..8cefafd79 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2458,7 +2458,7 @@ class XAttrMetadataError(YoutubeDLError):
 
         # Parsing code and msg
         if (self.code in (errno.ENOSPC, errno.EDQUOT)
-                or 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
+                or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
             self.reason = 'NO_SPACE'
         elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
             self.reason = 'VALUE_TOO_LONG'
@@ -4207,10 +4207,10 @@ def parse_codecs(codecs_str):
     # http://tools.ietf.org/html/rfc6381
     if not codecs_str:
         return {}
-    splited_codecs = list(filter(None, map(
+    split_codecs = list(filter(None, map(
         lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
     vcodec, acodec = None, None
-    for full_codec in splited_codecs:
+    for full_codec in split_codecs:
         codec = full_codec.split('.')[0]
         if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
             if not vcodec:
@@ -4221,10 +4221,10 @@ def parse_codecs(codecs_str):
         else:
             write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
     if not vcodec and not acodec:
-        if len(splited_codecs) == 2:
+        if len(split_codecs) == 2:
             return {
-                'vcodec': splited_codecs[0],
-                'acodec': splited_codecs[1],
+                'vcodec': split_codecs[0],
+                'acodec': split_codecs[1],
             }
     else:
         return {
@@ -5463,7 +5463,7 @@ def encode_base_n(num, n, table=None):
 
 def decode_packed_codes(code):
     mobj = re.search(PACKED_CODES_RE, code)
-    obfucasted_code, base, count, symbols = mobj.groups()
+    obfuscated_code, base, count, symbols = mobj.groups()
     base = int(base)
     count = int(count)
     symbols = symbols.split('|')
@@ -5476,7 +5476,7 @@ def decode_packed_codes(code):
 
     return re.sub(
         r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
-        obfucasted_code)
+        obfuscated_code)
 
 
 def caesar(s, alphabet, shift):

From 0ada1b90b801e5d6ce713a25eccd7de21e7e4b6f Mon Sep 17 00:00:00 2001
From: Mattias Wadman <mattias.wadman@gmail.com>
Date: Sat, 21 Nov 2020 17:24:37 +0100
Subject: [PATCH 218/340] [svt] Extract timestamp and thumbnail in more cases
 (#27130)

Add timestamp, set to "valid from" which i think could been seen as publish time.
Add thumbnail in more cases, seems to was only done in the embedded data case for some reason.
Switch svtplay test url to an existing video and also one with no expire date.
Also add an additional thumbnail url test regex.
---
 youtube_dl/extractor/svt.py | 34 +++++++++++++++++++++++++---------
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py
index 2f6887d86..9dd91c69a 100644
--- a/youtube_dl/extractor/svt.py
+++ b/youtube_dl/extractor/svt.py
@@ -9,6 +9,7 @@ from ..utils import (
     determine_ext,
     dict_get,
     int_or_none,
+    unified_timestamp,
     str_or_none,
     strip_or_none,
     try_get,
@@ -44,7 +45,8 @@ class SVTBaseIE(InfoExtractor):
                     'format_id': player_type,
                     'url': vurl,
                 })
-        if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
+        rights = try_get(video_info, lambda x: x['rights'], dict) or {}
+        if not formats and rights.get('geoBlockedSweden'):
             self.raise_geo_restricted(
                 'This video is only available in Sweden',
                 countries=self._GEO_COUNTRIES)
@@ -70,6 +72,7 @@ class SVTBaseIE(InfoExtractor):
         episode = video_info.get('episodeTitle')
         episode_number = int_or_none(video_info.get('episodeNumber'))
 
+        timestamp = unified_timestamp(rights.get('validFrom'))
         duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
         age_limit = None
         adult = dict_get(
@@ -84,6 +87,7 @@ class SVTBaseIE(InfoExtractor):
             'formats': formats,
             'subtitles': subtitles,
             'duration': duration,
+            'timestamp': timestamp,
             'age_limit': age_limit,
             'series': series,
             'season_number': season_number,
@@ -141,21 +145,30 @@ class SVTPlayIE(SVTPlayBaseIE):
                     )
                     '''
     _TESTS = [{
-        'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
-        'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
+        'url': 'https://www.svtplay.se/video/26194546/det-har-ar-himlen',
+        'md5': '2382036fd6f8c994856c323fe51c426e',
         'info_dict': {
-            'id': '5996901',
+            'id': 'jNwpV9P',
             'ext': 'mp4',
-            'title': 'Flygplan till Haile Selassie',
-            'duration': 3527,
-            'thumbnail': r're:^https?://.*[\.-]jpg$',
+            'title': 'Det h\xe4r \xe4r himlen',
+            'timestamp': 1586044800,
+            'upload_date': '20200405',
+            'duration': 3515,
+            'thumbnail': r're:^https?://(?:.*[\.-]jpg|www.svtstatic.se/image/.*)$',
             'age_limit': 0,
             'subtitles': {
                 'sv': [{
-                    'ext': 'wsrt',
+                    'ext': 'vtt',
                 }]
             },
         },
+        'params': {
+            'format': 'bestvideo',
+            # skip for now due to download test asserts that segment is > 10000 bytes and svt uses
+            # init segments that are smaller
+            # AssertionError: Expected test_SVTPlay_jNwpV9P.mp4 to be at least 9.77KiB, but it's only 864.00B
+            'skip_download': True,
+        },
     }, {
         # geo restricted to Sweden
         'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
@@ -236,7 +249,10 @@ class SVTPlayIE(SVTPlayBaseIE):
                  r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)'),
                 webpage, 'video id')
 
-        return self._extract_by_video_id(svt_id, webpage)
+        info_dict = self._extract_by_video_id(svt_id, webpage)
+        info_dict['thumbnail'] = thumbnail
+
+        return info_dict
 
 
 class SVTSeriesIE(SVTPlayBaseIE):

From 049f2242480fd481ddb6a88c5a9d7f360f890d7c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:35:01 +0700
Subject: [PATCH 219/340] [svtplay] Add support for svt.se/barnkanalen (closes
 #24817)

---
 youtube_dl/extractor/svt.py | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py
index 9dd91c69a..17bd4acdc 100644
--- a/youtube_dl/extractor/svt.py
+++ b/youtube_dl/extractor/svt.py
@@ -140,7 +140,11 @@ class SVTPlayIE(SVTPlayBaseIE):
     IE_DESC = 'SVT Play and Öppet arkiv'
     _VALID_URL = r'''(?x)
                     (?:
-                        svt:(?P<svt_id>[^/?#&]+)|
+                        (?:
+                            svt:|
+                            https?://(?:www\.)?svt\.se/barnkanalen/barnplay/[^/]+/
+                        )
+                        (?P<svt_id>[^/?#&]+)|
                         https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)
                     )
                     '''
@@ -185,6 +189,12 @@ class SVTPlayIE(SVTPlayBaseIE):
     }, {
         'url': 'svt:14278044',
         'only_matching': True,
+    }, {
+        'url': 'https://www.svt.se/barnkanalen/barnplay/kar/eWv5MLX/',
+        'only_matching': True,
+    }, {
+        'url': 'svt:eWv5MLX',
+        'only_matching': True,
     }]
 
     def _adjust_title(self, info):
@@ -376,7 +386,7 @@ class SVTPageIE(InfoExtractor):
 
     @classmethod
     def suitable(cls, url):
-        return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
+        return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTPageIE, cls).suitable(url)
 
     def _real_extract(self, url):
         path, display_id = re.match(self._VALID_URL, url).groups()

From 1e72660c9b2bcd18c688cd8786f81acaa7ad088e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:36:25 +0700
Subject: [PATCH 220/340] [svtplay] Fix test title

---
 youtube_dl/extractor/svt.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py
index 17bd4acdc..a0b6ef4db 100644
--- a/youtube_dl/extractor/svt.py
+++ b/youtube_dl/extractor/svt.py
@@ -154,7 +154,7 @@ class SVTPlayIE(SVTPlayBaseIE):
         'info_dict': {
             'id': 'jNwpV9P',
             'ext': 'mp4',
-            'title': 'Det h\xe4r \xe4r himlen',
+            'title': 'Det här är himlen',
             'timestamp': 1586044800,
             'upload_date': '20200405',
             'duration': 3515,

From 82abc13aedb42d77afcb0ca9c1d7982955826b66 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:41:49 +0700
Subject: [PATCH 221/340] [youtube:tab] Comment out test

---
 youtube_dl/extractor/youtube.py | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index fb6d816cc..2aad855c8 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2620,10 +2620,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
     }, {
         'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
         'only_matching': True,
-    }, {
-        'url': 'https://www.youtube.com/TheYoungTurks/live',
-        'only_matching': True,
-    }]
+    },
+    # TODO
+    # {
+    #     'url': 'https://www.youtube.com/TheYoungTurks/live',
+    #     'only_matching': True,
+    # }
+    ]
 
     def _extract_channel_id(self, webpage):
         channel_id = self._html_search_meta(

From 5d8cb4367dc5f8c5739bfa4e807d2a8b301dfe4a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:54:33 +0700
Subject: [PATCH 222/340] release 2020.11.21

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 docs/supportedsites.md                           | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index e2e5a15ec..1601ee725 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.19
+ [debug] youtube-dl version 2020.11.21
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 880a96835..1e8bc7d82 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 25c5a7daf..611d7cab9 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 59716f962..aaef901ad 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.19
+ [debug] youtube-dl version 2020.11.21
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 410abee90..86c2f1e32 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.19. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.19**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 37425e7a1..53dbc28e5 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -41,6 +41,7 @@
  - **AlJazeera**
  - **Allocine**
  - **AlphaPorno**
+ - **Amara**
  - **AMCNetworks**
  - **AmericasTestKitchen**
  - **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
@@ -1130,7 +1131,6 @@
  - **YourUpload**
  - **youtube**: YouTube.com
  - **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
- - **youtube:live**: YouTube.com live streams
  - **youtube:playlist**: YouTube.com playlists
  - **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
  - **youtube:search**: YouTube.com searches
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 53eeb6ccf..9ede0334a 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.19'
+__version__ = '2020.11.21'

From 650bd8f6231b3302dadcddf336748a9eb1dc85ed Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:52:39 +0700
Subject: [PATCH 223/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 31 +++++++++++++++++++++++++++++++
 1 file changed, 31 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index db5dd488a..a108e8734 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,34 @@
+version <unreleased>
+
+Core
+* [downloader/http] Fix crash during urlopen caused by missing reason
+  of URLError
+* [YoutubeDL] Fix --ignore-errors for playlists with generator-based entries
+  of url_transparent (#27064)
+
+Extractors
++ [svtplay] Add support for svt.se/barnkanalen (#24817)
++ [svt] Extract timestamp (#27130)
+* [svtplay] Improve thumbnail extraction (#27130)
+* [youtube] Fix error reason extraction (#27081)
+* [youtube] Fix like and dislike count extraction (#25977)
++ [youtube:tab] Add support for current video and fix lives extraction (#27126)
+* [infoq] Fix format extraction (#25984)
+* [francetv] Update to fix thumbnail URL issue (#27120)
+* [youtube] Improve yt initial data extraction (#27093)
++ [discoverynetworks] Add support new TLC/DMAX URLs (#27100)
+* [rai] Fix protocol relative relinker URLs (#22766)
+* [rai] Fix unavailable video format detection
+* [rai] Improve extraction
+* [rai] Fix extraction (#27077)
+* [viki] Improve format extraction
+* [viki] Fix stream extraction from MPD (#27092)
+* [googledrive] Fix format extraction (#26979)
++ [amara] Add support for amara.org (#20618)
+* [vimeo:album] Fix extraction (#27079)
+* [mtv] Fix mgid extraction (#26841)
+
+
 version 2020.11.19
 
 Core

From f23eceebbfa0aa26f7ff598026ee6029233148b6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 21 Nov 2020 23:59:11 +0700
Subject: [PATCH 224/340] release 2020.11.21.1

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 1601ee725..e2cbaee1c 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.21
+ [debug] youtube-dl version 2020.11.21.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 1e8bc7d82..e531e89c0 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 611d7cab9..686878c56 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index aaef901ad..17b853716 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.21
+ [debug] youtube-dl version 2020.11.21.1
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 86c2f1e32..109c47925 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index a108e8734..5a08b3a66 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.21.1
 
 Core
 * [downloader/http] Fix crash during urlopen caused by missing reason
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 9ede0334a..d7e901521 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.21'
+__version__ = '2020.11.21.1'

From c4cabf040e1bf37aec69a3ff45594ac6965ba139 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 22 Nov 2020 05:04:01 +0700
Subject: [PATCH 225/340] [pinterest] Add extractor (closes #25747)

---
 youtube_dl/extractor/extractors.py |   4 +
 youtube_dl/extractor/pinterest.py  | 176 +++++++++++++++++++++++++++++
 2 files changed, 180 insertions(+)
 create mode 100644 youtube_dl/extractor/pinterest.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 5691c4cba..ba11f12b9 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -844,6 +844,10 @@ from .picarto import (
 )
 from .piksel import PikselIE
 from .pinkbike import PinkbikeIE
+from .pinterest import (
+    PinterestIE,
+    PinterestCollectionIE,
+)
 from .pladform import PladformIE
 from .platzi import (
     PlatziIE,
diff --git a/youtube_dl/extractor/pinterest.py b/youtube_dl/extractor/pinterest.py
new file mode 100644
index 000000000..2bb4ca660
--- /dev/null
+++ b/youtube_dl/extractor/pinterest.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    determine_ext,
+    float_or_none,
+    int_or_none,
+    try_get,
+    unified_timestamp,
+    url_or_none,
+)
+
+
+class PinterestBaseIE(InfoExtractor):
+    _VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'
+
+    def _extract_resource(self, webpage, video_id):
+        return self._parse_json(
+            self._search_regex(
+                r'<script[^>]+\bid=["\']initial-state["\'][^>]*>({.+?})</script>',
+                webpage, 'application json'),
+            video_id)['resourceResponses']
+
+    def _extract_video(self, data, extract_formats=True):
+        video_id = data['id']
+
+        title = (data.get('title') or data.get('grid_title') or video_id).strip()
+
+        formats = []
+        duration = None
+        if extract_formats:
+            for format_id, format_dict in data['videos']['video_list'].items():
+                if not isinstance(format_dict, dict):
+                    continue
+                format_url = url_or_none(format_dict.get('url'))
+                if not format_url:
+                    continue
+                duration = float_or_none(format_dict.get('duration'), scale=1000)
+                ext = determine_ext(format_url)
+                if 'hls' in format_id.lower() or ext == 'm3u8':
+                    formats.extend(self._extract_m3u8_formats(
+                        format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                        m3u8_id=format_id, fatal=False))
+                else:
+                    formats.append({
+                        'url': format_url,
+                        'format_id': format_id,
+                        'width': int_or_none(format_dict.get('width')),
+                        'height': int_or_none(format_dict.get('height')),
+                        'duration': duration,
+                    })
+            self._sort_formats(
+                formats, field_preference=('height', 'width', 'tbr', 'format_id'))
+
+        description = data.get('description') or data.get('description_html') or data.get('seo_description')
+        timestamp = unified_timestamp(data.get('created_at'))
+
+        def _u(field):
+            return try_get(data, lambda x: x['closeup_attribution'][field], compat_str)
+
+        uploader = _u('full_name')
+        uploader_id = _u('id')
+
+        repost_count = int_or_none(data.get('repin_count'))
+        comment_count = int_or_none(data.get('comment_count'))
+        categories = try_get(data, lambda x: x['pin_join']['visual_annotation'], list)
+        tags = data.get('hashtags')
+
+        thumbnails = []
+        images = data.get('images')
+        if isinstance(images, dict):
+            for thumbnail_id, thumbnail in images.items():
+                if not isinstance(thumbnail, dict):
+                    continue
+                thumbnail_url = url_or_none(thumbnail.get('url'))
+                if not thumbnail_url:
+                    continue
+                thumbnails.append({
+                    'url': thumbnail_url,
+                    'width': int_or_none(thumbnail.get('width')),
+                    'height': int_or_none(thumbnail.get('height')),
+                })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'timestamp': timestamp,
+            'thumbnails': thumbnails,
+            'uploader': uploader,
+            'uploader_id': uploader_id,
+            'repost_count': repost_count,
+            'comment_count': comment_count,
+            'categories': categories,
+            'tags': tags,
+            'formats': formats,
+            'extractor_key': PinterestIE.ie_key(),
+        }
+
+
+class PinterestIE(PinterestBaseIE):
+    _VALID_URL = r'%s/pin/(?P<id>\d+)' % PinterestBaseIE._VALID_URL_BASE
+    _TESTS = [{
+        'url': 'https://www.pinterest.com/pin/664281013778109217/',
+        'md5': '6550c2af85d6d9f3fe3b88954d1577fc',
+        'info_dict': {
+            'id': '664281013778109217',
+            'ext': 'mp4',
+            'title': 'Origami',
+            'description': 'md5:b9d90ddf7848e897882de9e73344f7dd',
+            'duration': 57.7,
+            'timestamp': 1593073622,
+            'upload_date': '20200625',
+            'uploader': 'Love origami -I am Dafei',
+            'uploader_id': '586523688879454212',
+            'repost_count': 50,
+            'comment_count': 0,
+            'categories': list,
+            'tags': list,
+        },
+    }, {
+        'url': 'https://co.pinterest.com/pin/824721750502199491/',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        data = self._extract_resource(webpage, video_id)[0]['response']['data']
+        return self._extract_video(data)
+
+
+class PinterestCollectionIE(PinterestBaseIE):
+    _VALID_URL = r'%s/[^/]+/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE
+    _TESTS = [{
+        'url': 'https://www.pinterest.ca/mashal0407/cool-diys/',
+        'info_dict': {
+            'id': '585890301462791043',
+            'title': 'cool diys',
+        },
+        'playlist_count': 8,
+    }]
+
+    @classmethod
+    def suitable(cls, url):
+        return False if PinterestIE.suitable(url) else super(
+            PinterestCollectionIE, cls).suitable(url)
+
+    def _real_extract(self, url):
+        collection_name = self._match_id(url)
+        webpage = self._download_webpage(url, collection_name)
+        resource = self._extract_resource(webpage, collection_name)[1]
+        entries = []
+        for item in resource['response']['data']:
+            if not isinstance(item, dict) or item.get('type') != 'pin':
+                continue
+            video_id = item.get('id')
+            if video_id:
+                # Some pins may not be available anonymously via pin URL
+                # video = self._extract_video(item, extract_formats=False)
+                # video.update({
+                #     '_type': 'url_transparent',
+                #     'url': 'https://www.pinterest.com/pin/%s/' % video_id,
+                # })
+                # entries.append(video)
+                entries.append(self._extract_video(item))
+        title = try_get(
+            resource, lambda x: x['options']['board_title'], compat_str)
+        collection_id = try_get(
+            resource, lambda x: x['options']['board_id'],
+            compat_str) or collection_name
+        return self.playlist_result(
+            entries, playlist_id=collection_id, playlist_title=title)

From 193422e12a98ebcc49a215cf3667c7fce593f25c Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 12:54:55 +0100
Subject: [PATCH 226/340] [extractor/common] add generic support for akamai
 http format extraction

---
 youtube_dl/extractor/common.py | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 1a08c7616..16aff885c 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2596,6 +2596,7 @@ class InfoExtractor(object):
 
     def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
         formats = []
+
         hdcore_sign = 'hdcore=3.7.0'
         f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
         hds_host = hosts.get('hds')
@@ -2608,6 +2609,7 @@ class InfoExtractor(object):
         for entry in f4m_formats:
             entry.update({'extra_param_to_segment_url': hdcore_sign})
         formats.extend(f4m_formats)
+
         m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
         hls_host = hosts.get('hls')
         if hls_host:
@@ -2615,6 +2617,31 @@ class InfoExtractor(object):
         formats.extend(self._extract_m3u8_formats(
             m3u8_url, video_id, 'mp4', 'm3u8_native',
             m3u8_id='hls', fatal=False))
+
+        http_host = hosts.get('http')
+        if http_host and 'hdnea=' not in manifest_url:
+            REPL_REGEX = r'https://[^/]+/i/([^,]+),([^/]+),([^/]+).csmil/.+'
+            qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
+            qualities_length = len(qualities)
+            if len(formats) in (qualities_length + 1, qualities_length * 2 + 1):
+                i = 0
+                http_formats = []
+                for f in formats:
+                    if f['protocol'] == 'm3u8_native' and f['vcodec'] != 'none':
+                        for protocol in ('http', 'https'):
+                            http_f = f.copy()
+                            del http_f['manifest_url']
+                            http_url = re.sub(
+                                REPL_REGEX, protocol + r'://%s/\1%s\3' % (http_host, qualities[i]), f['url'])
+                            http_f.update({
+                                'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
+                                'url': http_url,
+                                'protocol': protocol,
+                            })
+                            http_formats.append(http_f)
+                        i += 1
+                formats.extend(http_formats)
+
         return formats
 
     def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):

From e9cbb98a0f63e08cf7c42d1612450e2534c8de7e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 13:01:56 +0100
Subject: [PATCH 227/340] [skyit] add support for multiple Sky Italia
 websites(closes #26629)

---
 youtube_dl/extractor/extractors.py |  10 ++
 youtube_dl/extractor/skyit.py      | 239 +++++++++++++++++++++++++++++
 2 files changed, 249 insertions(+)
 create mode 100644 youtube_dl/extractor/skyit.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index ba11f12b9..356f4cc6b 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1013,6 +1013,16 @@ from .shared import (
 from .showroomlive import ShowRoomLiveIE
 from .sina import SinaIE
 from .sixplay import SixPlayIE
+from .skyit import (
+    SkyItPlayerIE,
+    SkyItVideoIE,
+    SkyItVideoLiveIE,
+    SkyItIE,
+    SkyItAcademyIE,
+    SkyItArteIE,
+    CieloTVItIE,
+    TV8ItIE,
+)
 from .skylinewebcams import SkylineWebcamsIE
 from .skynewsarabia import (
     SkyNewsArabiaIE,
diff --git a/youtube_dl/extractor/skyit.py b/youtube_dl/extractor/skyit.py
new file mode 100644
index 000000000..14a4d8d4c
--- /dev/null
+++ b/youtube_dl/extractor/skyit.py
@@ -0,0 +1,239 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_str,
+    compat_parse_qs,
+    compat_urllib_parse_urlparse,
+)
+from ..utils import (
+    dict_get,
+    int_or_none,
+    parse_duration,
+    unified_timestamp,
+)
+
+
+class SkyItPlayerIE(InfoExtractor):
+    IE_NAME = 'player.sky.it'
+    _VALID_URL = r'https?://player\.sky\.it/player/(?:external|social)\.html\?.*?\bid=(?P<id>\d+)'
+    _GEO_BYPASS = False
+    _DOMAIN = 'sky'
+    _PLAYER_TMPL = 'https://player.sky.it/player/external.html?id=%s&domain=%s'
+    # http://static.sky.it/static/skyplayer/conf.json
+    _TOKEN_MAP = {
+        'cielo': 'Hh9O7M8ks5yi6nSROL7bKYz933rdf3GhwZlTLMgvy4Q',
+        'hotclub': 'kW020K2jq2lk2eKRJD2vWEg832ncx2EivZlTLQput2C',
+        'mtv8': 'A5Nn9GGb326CI7vP5e27d7E4PIaQjota',
+        'salesforce': 'C6D585FD1615272C98DE38235F38BD86',
+        'sitocommerciale': 'VJwfFuSGnLKnd9Phe9y96WkXgYDCguPMJ2dLhGMb2RE',
+        'sky': 'F96WlOd8yoFmLQgiqv6fNQRvHZcsWk5jDaYnDvhbiJk',
+        'skyacademy': 'A6LAn7EkO2Q26FRy0IAMBekX6jzDXYL3',
+        'skyarte': 'LWk29hfiU39NNdq87ePeRach3nzTSV20o0lTv2001Cd',
+        'theupfront': 'PRSGmDMsg6QMGc04Obpoy7Vsbn7i2Whp',
+    }
+
+    def _player_url_result(self, video_id):
+        return self.url_result(
+            self._PLAYER_TMPL % (video_id, self._DOMAIN),
+            SkyItPlayerIE.ie_key(), video_id)
+
+    def _parse_video(self, video, video_id):
+        title = video['title']
+        is_live = video.get('type') == 'live'
+        hls_url = video.get(('streaming' if is_live else 'hls') + '_url')
+        if not hls_url and video.get('geoblock' if is_live else 'geob'):
+            self.raise_geo_restricted(countries=['IT'])
+
+        if is_live:
+            formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4')
+        else:
+            formats = self._extract_akamai_formats(
+                hls_url, video_id, {'http': 'videoplatform.sky.it'})
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': self._live_title(title) if is_live else title,
+            'formats': formats,
+            'thumbnail': dict_get(video, ('video_still', 'video_still_medium', 'thumb')),
+            'description': video.get('short_desc') or None,
+            'timestamp': unified_timestamp(video.get('create_date')),
+            'duration': int_or_none(video.get('duration_sec')) or parse_duration(video.get('duration')),
+            'is_live': is_live,
+        }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        domain = compat_parse_qs(compat_urllib_parse_urlparse(
+            url).query).get('domain', [None])[0]
+        token = dict_get(self._TOKEN_MAP, (domain, 'sky'))
+        video = self._download_json(
+            'https://apid.sky.it/vdp/v1/getVideoData',
+            video_id, query={
+                'caller': 'sky',
+                'id': video_id,
+                'token': token
+            }, headers=self.geo_verification_headers())
+        return self._parse_video(video, video_id)
+
+
+class SkyItVideoIE(SkyItPlayerIE):
+    IE_NAME = 'video.sky.it'
+    _VALID_URL = r'https?://(?:masterchef|video|xfactor)\.sky\.it(?:/[^/]+)*/video/[0-9a-z-]+-(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://video.sky.it/news/mondo/video/uomo-ucciso-da-uno-squalo-in-australia-631227',
+        'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
+        'info_dict': {
+            'id': '631227',
+            'ext': 'mp4',
+            'title': 'Uomo ucciso da uno squalo in Australia',
+            'timestamp': 1606036192,
+            'upload_date': '20201122',
+        }
+    }, {
+        'url': 'https://xfactor.sky.it/video/x-factor-2020-replay-audizioni-1-615820',
+        'only_matching': True,
+    }, {
+        'url': 'https://masterchef.sky.it/video/masterchef-9-cosa-e-successo-nella-prima-puntata-562831',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self._player_url_result(video_id)
+
+
+class SkyItVideoLiveIE(SkyItPlayerIE):
+    IE_NAME = 'video.sky.it:live'
+    _VALID_URL = r'https?://video\.sky\.it/diretta/(?P<id>[^/?&#]+)'
+    _TEST = {
+        'url': 'https://video.sky.it/diretta/tg24',
+        'info_dict': {
+            'id': '1',
+            'ext': 'mp4',
+            'title': r're:Diretta TG24 \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
+            'description': 'Guarda la diretta streaming di SkyTg24, segui con Sky tutti gli appuntamenti e gli speciali di Tg24.',
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+    }
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        asset_id = compat_str(self._parse_json(self._search_regex(
+            r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
+            webpage, 'next data'), display_id)['props']['initialState']['livePage']['content']['asset_id'])
+        livestream = self._download_json(
+            'https://apid.sky.it/vdp/v1/getLivestream',
+            asset_id, query={'id': asset_id})
+        return self._parse_video(livestream, asset_id)
+
+
+class SkyItIE(SkyItPlayerIE):
+    IE_NAME = 'sky.it'
+    _VALID_URL = r'https?://(?:sport|tg24)\.sky\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
+    _TESTS = [{
+        'url': 'https://sport.sky.it/calcio/serie-a/2020/11/21/juventus-cagliari-risultato-gol',
+        'info_dict': {
+            'id': '631201',
+            'ext': 'mp4',
+            'title': 'Un rosso alla violenza: in campo per i diritti delle donne',
+            'upload_date': '20201121',
+            'timestamp': 1605995753,
+        },
+        'expected_warnings': ['Unable to download f4m manifest'],
+    }, {
+        'url': 'https://tg24.sky.it/mondo/2020/11/22/australia-squalo-uccide-uomo',
+        'md5': 'fe5c91e59a84a3437eaa0bca6e134ccd',
+        'info_dict': {
+            'id': '631227',
+            'ext': 'mp4',
+            'title': 'Uomo ucciso da uno squalo in Australia',
+            'timestamp': 1606036192,
+            'upload_date': '20201122',
+        },
+    }]
+    _VIDEO_ID_REGEX = r'data-videoid="(\d+)"'
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        video_id = self._search_regex(
+            self._VIDEO_ID_REGEX, webpage, 'video id')
+        return self._player_url_result(video_id)
+
+
+class SkyItAcademyIE(SkyItIE):
+    IE_NAME = 'skyacademy.it'
+    _VALID_URL = r'https?://(?:www\.)?skyacademy\.it(?:/[^/]+)*/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)'
+    _TESTS = [{
+        'url': 'https://www.skyacademy.it/eventi-speciali/2019/07/05/a-lezione-di-cinema-con-sky-academy-/',
+        'md5': 'ced5c26638b7863190cbc44dd6f6ba08',
+        'info_dict': {
+            'id': '523458',
+            'ext': 'mp4',
+            'title': 'Sky Academy "The Best CineCamp 2019"',
+            'timestamp': 1562843784,
+            'upload_date': '20190711',
+        }
+    }]
+    _DOMAIN = 'skyacademy'
+    _VIDEO_ID_REGEX = r'id="news-videoId_(\d+)"'
+
+
+class SkyItArteIE(SkyItIE):
+    IE_NAME = 'arte.sky.it'
+    _VALID_URL = r'https?://arte\.sky\.it/video/(?P<id>[^/?&#]+)'
+    _TESTS = [{
+        'url': 'https://arte.sky.it/video/serie-musei-venezia-collezionismo-12-novembre/',
+        'md5': '515aee97b87d7a018b6c80727d3e7e17',
+        'info_dict': {
+            'id': '627926',
+            'ext': 'mp4',
+            'title': "Musei Galleria Franchetti alla Ca' d'Oro Palazzo Grimani",
+            'upload_date': '20201106',
+            'timestamp': 1604664493,
+        }
+    }]
+    _DOMAIN = 'skyarte'
+    _VIDEO_ID_REGEX = r'(?s)<iframe[^>]+src="(?:https:)?//player\.sky\.it/player/external\.html\?[^"]*\bid=(\d+)'
+
+
+class CieloTVItIE(SkyItIE):
+    IE_NAME = 'cielotv.it'
+    _VALID_URL = r'https?://(?:www\.)?cielotv\.it/video/(?P<id>[^.]+)\.html'
+    _TESTS = [{
+        'url': 'https://www.cielotv.it/video/Il-lunedi-e-sempre-un-dramma.html',
+        'md5': 'c4deed77552ba901c2a0d9258320304b',
+        'info_dict': {
+            'id': '499240',
+            'ext': 'mp4',
+            'title': 'Il lunedì è sempre un dramma',
+            'upload_date': '20190329',
+            'timestamp': 1553862178,
+        }
+    }]
+    _DOMAIN = 'cielo'
+    _VIDEO_ID_REGEX = r'videoId\s*=\s*"(\d+)"'
+
+
+class TV8ItIE(SkyItVideoIE):
+    IE_NAME = 'tv8.it'
+    _VALID_URL = r'https?://tv8\.it/showvideo/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://tv8.it/showvideo/630529/ogni-mattina-ucciso-asino-di-andrea-lo-cicero/18-11-2020/',
+        'md5': '9ab906a3f75ea342ed928442f9dabd21',
+        'info_dict': {
+            'id': '630529',
+            'ext': 'mp4',
+            'title': 'Ogni mattina - Ucciso asino di Andrea Lo Cicero',
+            'timestamp': 1605721374,
+            'upload_date': '20201118',
+        }
+    }]
+    _DOMAIN = 'mtv8'

From 9d531aa2918067570e4827fcced59c60accac220 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 13:07:04 +0100
Subject: [PATCH 228/340] [rumble] add support for embed pages(#10785)

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/rumble.py     | 67 ++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+)
 create mode 100644 youtube_dl/extractor/rumble.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 356f4cc6b..31fb4c95a 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -963,6 +963,7 @@ from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETe
 from .rtvnh import RTVNHIE
 from .rtvs import RTVSIE
 from .ruhd import RUHDIE
+from .rumble import RumbleEmbedIE
 from .rutube import (
     RutubeIE,
     RutubeChannelIE,
diff --git a/youtube_dl/extractor/rumble.py b/youtube_dl/extractor/rumble.py
new file mode 100644
index 000000000..4a0225109
--- /dev/null
+++ b/youtube_dl/extractor/rumble.py
@@ -0,0 +1,67 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    determine_ext,
+    int_or_none,
+    parse_iso8601,
+    try_get,
+)
+
+
+class RumbleEmbedIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?rumble\.com/embed/(?:[0-9a-z]+\.)?(?P<id>[0-9a-z]+)'
+    _TESTS = [{
+        'url': 'https://rumble.com/embed/v5pv5f',
+        'md5': '36a18a049856720189f30977ccbb2c34',
+        'info_dict': {
+            'id': 'v5pv5f',
+            'ext': 'mp4',
+            'title': 'WMAR 2 News Latest Headlines | October 20, 6pm',
+            'timestamp': 1571611968,
+            'upload_date': '20191020',
+        }
+    }, {
+        'url': 'https://rumble.com/embed/ufe9n.v5pv5f',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        video = self._download_json(
+            'https://rumble.com/embedJS/', video_id,
+            query={'request': 'video', 'v': video_id})
+        title = video['title']
+
+        formats = []
+        for height, ua in (video.get('ua') or {}).items():
+            for i in range(2):
+                f_url = try_get(ua, lambda x: x[i], compat_str)
+                if f_url:
+                    ext = determine_ext(f_url)
+                    f = {
+                        'ext': ext,
+                        'format_id': '%s-%sp' % (ext, height),
+                        'height': int_or_none(height),
+                        'url': f_url,
+                    }
+                    bitrate = try_get(ua, lambda x: x[i + 2]['bitrate'])
+                    if bitrate:
+                        f['tbr'] = int_or_none(bitrate)
+                    formats.append(f)
+        self._sort_formats(formats)
+
+        author = video.get('author') or {}
+
+        return {
+            'id': video_id,
+            'title': title,
+            'formats': formats,
+            'thumbnail': video.get('i'),
+            'timestamp': parse_iso8601(video.get('pubDate')),
+            'channel': author.get('name'),
+            'channel_url': author.get('url'),
+            'duration': int_or_none(video.get('duration')),
+        }

From cb6e24f946023e04469acb00174dfd71c2fa518d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 13:16:03 +0100
Subject: [PATCH 229/340] [lbry] relax _VALID_URL regex(closes #27144)

---
 youtube_dl/extractor/lbry.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/lbry.py b/youtube_dl/extractor/lbry.py
index 0a7ee919c..6177297ab 100644
--- a/youtube_dl/extractor/lbry.py
+++ b/youtube_dl/extractor/lbry.py
@@ -16,7 +16,7 @@ from ..utils import (
 
 class LBRYIE(InfoExtractor):
     IE_NAME = 'lbry.tv'
-    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
+    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[^:]+:[0-9a-z]+/[^:]+:[0-9a-z])'
     _TESTS = [{
         # Video
         'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -44,6 +44,9 @@ class LBRYIE(InfoExtractor):
     }, {
         'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
         'only_matching': True,
+    }, {
+        'url': "https://odysee.com/@ScammerRevolts:b0/I-SYSKEY'D-THE-SAME-SCAMMERS-3-TIMES!:b",
+        'only_matching': True,
     }]
 
     def _call_api_proxy(self, method, display_id, params):

From 15f27347911e51954184aa483a77c98eaea2c399 Mon Sep 17 00:00:00 2001
From: Jia Rong Yee <28086837+fourjr@users.noreply.github.com>
Date: Sun, 22 Nov 2020 21:12:47 +0800
Subject: [PATCH 230/340] [nytimes] Add new cooking.nytimes.com extractor
 (#27143)

* [nytimes] support cooking.nytimes.com, resolves #27112

Co-authored-by: remitamine <remitamine@gmail.com>
---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/nytimes.py    | 38 ++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 31fb4c95a..fb18a0563 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -782,6 +782,7 @@ from .ntvru import NTVRuIE
 from .nytimes import (
     NYTimesIE,
     NYTimesArticleIE,
+    NYTimesCookingIE,
 )
 from .nuvid import NuvidIE
 from .nzz import NZZIE
diff --git a/youtube_dl/extractor/nytimes.py b/youtube_dl/extractor/nytimes.py
index fc78ca56c..976b1c694 100644
--- a/youtube_dl/extractor/nytimes.py
+++ b/youtube_dl/extractor/nytimes.py
@@ -221,3 +221,41 @@ class NYTimesArticleIE(NYTimesBaseIE):
              r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
             webpage, 'podcast data')
         return self._extract_podcast_from_json(podcast_data, page_id, webpage)
+
+
+class NYTimesCookingIE(NYTimesBaseIE):
+    _VALID_URL = r'https?://cooking\.nytimes\.com/(?:guid|recip)es/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://cooking.nytimes.com/recipes/1017817-cranberry-curd-tart',
+        'md5': 'dab81fa2eaeb3f9ed47498bdcfcdc1d3',
+        'info_dict': {
+            'id': '100000004756089',
+            'ext': 'mov',
+            'timestamp': 1479383008,
+            'uploader': 'By SHAW LASH, ADAM SAEWITZ and JAMES HERRON',
+            'title': 'Cranberry Tart',
+            'upload_date': '20161117',
+            'description': 'If you are a fan of lemon curd or the classic French tarte au citron, you will love this cranberry version.',
+        },
+    }, {
+        'url': 'https://cooking.nytimes.com/guides/13-how-to-cook-a-turkey',
+        'md5': '4b2e8c70530a89b8d905a2b572316eb8',
+        'info_dict': {
+            'id': '100000003951728',
+            'ext': 'mov',
+            'timestamp': 1445509539,
+            'description': 'Turkey guide',
+            'upload_date': '20151022',
+            'title': 'Turkey',
+        }
+    }]
+
+    def _real_extract(self, url):
+        page_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, page_id)
+
+        video_id = self._search_regex(
+            r'data-video-id=["\'](\d+)', webpage, 'video id')
+
+        return self._extract_video_from_id(video_id)

From c84f9475b8df0f892b631966159e1649dafe13f0 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 17:39:41 +0100
Subject: [PATCH 231/340] [box] Add new extractor(#5949)

---
 youtube_dl/extractor/box.py        | 98 ++++++++++++++++++++++++++++++
 youtube_dl/extractor/extractors.py |  1 +
 2 files changed, 99 insertions(+)
 create mode 100644 youtube_dl/extractor/box.py

diff --git a/youtube_dl/extractor/box.py b/youtube_dl/extractor/box.py
new file mode 100644
index 000000000..aae82d1af
--- /dev/null
+++ b/youtube_dl/extractor/box.py
@@ -0,0 +1,98 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+    determine_ext,
+    parse_iso8601,
+    # try_get,
+    update_url_query,
+)
+
+
+class BoxIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/]+)/file/(?P<id>\d+)'
+    _TEST = {
+        'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538',
+        'md5': '1f81b2fd3960f38a40a3b8823e5fcd43',
+        'info_dict': {
+            'id': '510727257538',
+            'ext': 'mp4',
+            'title': 'Garber   St. Louis will be 28th MLS team  +scarving.mp4',
+            'uploader': 'MLS Video',
+            'timestamp': 1566320259,
+            'upload_date': '20190820',
+            'uploader_id': '235196876',
+        }
+    }
+
+    def _real_extract(self, url):
+        shared_name, file_id = re.match(self._VALID_URL, url).groups()
+        webpage = self._download_webpage(url, file_id)
+        request_token = self._parse_json(self._search_regex(
+            r'Box\.config\s*=\s*({.+?});', webpage,
+            'Box config'), file_id)['requestToken']
+        access_token = self._download_json(
+            'https://app.box.com/app-api/enduserapp/elements/tokens', file_id,
+            'Downloading token JSON metadata',
+            data=json.dumps({'fileIDs': [file_id]}).encode(), headers={
+                'Content-Type': 'application/json',
+                'X-Request-Token': request_token,
+                'X-Box-EndUser-API': 'sharedName=' + shared_name,
+            })[file_id]['read']
+        shared_link = 'https://app.box.com/s/' + shared_name
+        f = self._download_json(
+            'https://api.box.com/2.0/files/' + file_id, file_id,
+            'Downloading file JSON metadata', headers={
+                'Authorization': 'Bearer ' + access_token,
+                'BoxApi': 'shared_link=' + shared_link,
+                'X-Rep-Hints': '[dash]',  # TODO: extract `hls` formats
+            }, query={
+                'fields': 'authenticated_download_url,created_at,created_by,description,extension,is_download_available,name,representations,size'
+            })
+        title = f['name']
+
+        query = {
+            'access_token': access_token,
+            'shared_link': shared_link
+        }
+
+        formats = []
+
+        # for entry in (try_get(f, lambda x: x['representations']['entries'], list) or []):
+        #     entry_url_template = try_get(
+        #         entry, lambda x: x['content']['url_template'])
+        #     if not entry_url_template:
+        #         continue
+        #     representation = entry.get('representation')
+        #     if representation == 'dash':
+        #         TODO: append query to every fragment URL
+        #         formats.extend(self._extract_mpd_formats(
+        #             entry_url_template.replace('{+asset_path}', 'manifest.mpd'),
+        #             file_id, query=query))
+
+        authenticated_download_url = f.get('authenticated_download_url')
+        if authenticated_download_url and f.get('is_download_available'):
+            formats.append({
+                'ext': f.get('extension') or determine_ext(title),
+                'filesize': f.get('size'),
+                'format_id': 'download',
+                'url': update_url_query(authenticated_download_url, query),
+            })
+
+        self._sort_formats(formats)
+
+        creator = f.get('created_by') or {}
+
+        return {
+            'id': file_id,
+            'title': title,
+            'formats': formats,
+            'description': f.get('description') or None,
+            'uploader': creator.get('name'),
+            'timestamp': parse_iso8601(f.get('created_at')),
+            'uploader_id': creator.get('id'),
+        }
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index fb18a0563..7ba4087fd 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -122,6 +122,7 @@ from .blinkx import BlinkxIE
 from .bloomberg import BloombergIE
 from .bokecc import BokeCCIE
 from .bostonglobe import BostonGlobeIE
+from .box import BoxIE
 from .bpb import BpbIE
 from .br import (
     BRIE,

From dd0f524c69ad95541f7d370bdb877ee68f722f26 Mon Sep 17 00:00:00 2001
From: renalid <renalid@gmail.com>
Date: Sun, 22 Nov 2020 19:35:53 +0100
Subject: [PATCH 232/340] [franceinter] add thumbnail url (#27153)

Co-authored-by: remitamine <remitamine@gmail.com>
---
 youtube_dl/extractor/franceinter.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py
index 05806895c..a009f4d38 100644
--- a/youtube_dl/extractor/franceinter.py
+++ b/youtube_dl/extractor/franceinter.py
@@ -16,6 +16,7 @@ class FranceInterIE(InfoExtractor):
             'ext': 'mp3',
             'title': 'Affaire Cahuzac : le contentieux du compte en Suisse',
             'description': 'md5:401969c5d318c061f86bda1fa359292b',
+            'thumbnail': r're:^https?://.*\.jpg',
             'upload_date': '20160907',
         },
     }
@@ -31,6 +32,7 @@ class FranceInterIE(InfoExtractor):
 
         title = self._og_search_title(webpage)
         description = self._og_search_description(webpage)
+        thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
 
         upload_date_str = self._search_regex(
             r'class=["\']\s*cover-emission-period\s*["\'][^>]*>[^<]+\s+(\d{1,2}\s+[^\s]+\s+\d{4})<',
@@ -48,6 +50,7 @@ class FranceInterIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'description': description,
+            'thumbnail' : thumbnail,
             'upload_date': upload_date,
             'formats': [{
                 'url': video_url,

From 2cd43a00d1eec54c40e3fdf9a288f4cb391a8323 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 22 Nov 2020 19:38:45 +0100
Subject: [PATCH 233/340] [franceinter] flake8

---
 youtube_dl/extractor/franceinter.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py
index a009f4d38..ae822a50e 100644
--- a/youtube_dl/extractor/franceinter.py
+++ b/youtube_dl/extractor/franceinter.py
@@ -50,7 +50,7 @@ class FranceInterIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'description': description,
-            'thumbnail' : thumbnail,
+            'thumbnail': thumbnail,
             'upload_date': upload_date,
             'formats': [{
                 'url': video_url,

From 484fe787379a20ec9ef19f69b9e07321359bbd1e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 23 Nov 2020 14:16:38 +0100
Subject: [PATCH 234/340] [pinterest] Add support for large collections(more
 than 25 pins)

---
 youtube_dl/extractor/pinterest.py | 87 ++++++++++++++++++++-----------
 1 file changed, 56 insertions(+), 31 deletions(-)

diff --git a/youtube_dl/extractor/pinterest.py b/youtube_dl/extractor/pinterest.py
index 2bb4ca660..b249c9eda 100644
--- a/youtube_dl/extractor/pinterest.py
+++ b/youtube_dl/extractor/pinterest.py
@@ -1,6 +1,9 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import json
+import re
+
 from .common import InfoExtractor
 from ..compat import compat_str
 from ..utils import (
@@ -16,12 +19,12 @@ from ..utils import (
 class PinterestBaseIE(InfoExtractor):
     _VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'
 
-    def _extract_resource(self, webpage, video_id):
-        return self._parse_json(
-            self._search_regex(
-                r'<script[^>]+\bid=["\']initial-state["\'][^>]*>({.+?})</script>',
-                webpage, 'application json'),
-            video_id)['resourceResponses']
+    def _call_api(self, resource, video_id, options):
+        return self._download_json(
+            'https://www.pinterest.com/resource/%sResource/get/' % resource,
+            video_id, 'Download %s JSON metadata' % resource, query={
+                'data': json.dumps({'options': options})
+            })['resource_response']
 
     def _extract_video(self, data, extract_formats=True):
         video_id = data['id']
@@ -128,13 +131,16 @@ class PinterestIE(PinterestBaseIE):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
-        data = self._extract_resource(webpage, video_id)[0]['response']['data']
+        data = self._call_api(
+            'Pin', video_id, {
+                'field_set_key': 'unauth_react_main_pin',
+                'id': video_id,
+            })['data']
         return self._extract_video(data)
 
 
 class PinterestCollectionIE(PinterestBaseIE):
-    _VALID_URL = r'%s/[^/]+/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE
+    _VALID_URL = r'%s/(?P<username>[^/]+)/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE
     _TESTS = [{
         'url': 'https://www.pinterest.ca/mashal0407/cool-diys/',
         'info_dict': {
@@ -142,6 +148,14 @@ class PinterestCollectionIE(PinterestBaseIE):
             'title': 'cool diys',
         },
         'playlist_count': 8,
+    }, {
+        'url': 'https://www.pinterest.ca/fudohub/videos/',
+        'info_dict': {
+            'id': '682858430939307450',
+            'title': 'VIDEOS',
+        },
+        'playlist_mincount': 365,
+        'skip': 'Test with extract_formats=False',
     }]
 
     @classmethod
@@ -150,27 +164,38 @@ class PinterestCollectionIE(PinterestBaseIE):
             PinterestCollectionIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        collection_name = self._match_id(url)
-        webpage = self._download_webpage(url, collection_name)
-        resource = self._extract_resource(webpage, collection_name)[1]
+        username, slug = re.match(self._VALID_URL, url).groups()
+        board = self._call_api(
+            'Board', slug, {
+                'slug': slug,
+                'username': username
+            })['data']
+        board_id = board['id']
+        options = {
+            'board_id': board_id,
+            'page_size': 250,
+        }
+        bookmark = None
         entries = []
-        for item in resource['response']['data']:
-            if not isinstance(item, dict) or item.get('type') != 'pin':
-                continue
-            video_id = item.get('id')
-            if video_id:
-                # Some pins may not be available anonymously via pin URL
-                # video = self._extract_video(item, extract_formats=False)
-                # video.update({
-                #     '_type': 'url_transparent',
-                #     'url': 'https://www.pinterest.com/pin/%s/' % video_id,
-                # })
-                # entries.append(video)
-                entries.append(self._extract_video(item))
-        title = try_get(
-            resource, lambda x: x['options']['board_title'], compat_str)
-        collection_id = try_get(
-            resource, lambda x: x['options']['board_id'],
-            compat_str) or collection_name
+        while True:
+            if bookmark:
+                options['bookmarks'] = [bookmark]
+            board_feed = self._call_api('BoardFeed', board_id, options)
+            for item in (board_feed.get('data') or []):
+                if not isinstance(item, dict) or item.get('type') != 'pin':
+                    continue
+                video_id = item.get('id')
+                if video_id:
+                    # Some pins may not be available anonymously via pin URL
+                    # video = self._extract_video(item, extract_formats=False)
+                    # video.update({
+                    #     '_type': 'url_transparent',
+                    #     'url': 'https://www.pinterest.com/pin/%s/' % video_id,
+                    # })
+                    # entries.append(video)
+                    entries.append(self._extract_video(item))
+            bookmark = board_feed.get('bookmark')
+            if not bookmark:
+                break
         return self.playlist_result(
-            entries, playlist_id=collection_id, playlist_title=title)
+            entries, playlist_id=board_id, playlist_title=board.get('name'))

From 316b10855af61798760b30531a5dc34da8d9a69b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 23 Nov 2020 22:19:25 +0700
Subject: [PATCH 235/340] [youtube:tab] Fix some weird typo (closes #27157)

---
 youtube_dl/extractor/youtube.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 2aad855c8..8f814a81c 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3013,7 +3013,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
         webpage = self._download_webpage(url, item_id)
         identity_token = self._search_regex(
-            r'\bID_TOKEN["\']\s*:\s/l*["\'](.+?)["\']', webpage,
+            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
             'identity token', default=None)
         data = self._extract_yt_initial_data(item_id, webpage)
         tabs = try_get(

From 9d2c90354f9743214a0d02de6981fa748af11b37 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 23 Nov 2020 22:33:21 +0700
Subject: [PATCH 236/340] [youtube:favorites] Restore extractor

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/youtube.py    | 19 +++++++++++++++++++
 2 files changed, 20 insertions(+)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 7ba4087fd..71178da7e 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1491,6 +1491,7 @@ from .yourporn import YourPornIE
 from .yourupload import YourUploadIE
 from .youtube import (
     YoutubeIE,
+    YoutubeFavouritesIE,
     YoutubeHistoryIE,
     YoutubeTabIE,
     YoutubePlaylistIE,
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 8f814a81c..29c1d0413 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3149,6 +3149,25 @@ class YoutubeYtUserIE(InfoExtractor):
             ie=YoutubeTabIE.ie_key(), video_id=user_id)
 
 
+class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
+    IE_NAME = 'youtube:favorites'
+    IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
+    _VALID_URL = r'https?://(?:www\.)?youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
+    _LOGIN_REQUIRED = True
+    _TESTS = [{
+        'url': ':ytfav',
+        'only_matching': True,
+    }, {
+        'url': ':ytfavorites',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        return self.url_result(
+            'https://www.youtube.com/playlist?list=LL',
+            ie=YoutubeTabIE.ie_key())
+
+
 class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com searches'
     # there doesn't appear to be a real limit, for example if you search for

From 323427281899cf3527e504ac7058f5b10aad5d61 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 23 Nov 2020 22:34:27 +0700
Subject: [PATCH 237/340] [youtube:tab] PEP 8

---
 youtube_dl/extractor/youtube.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 29c1d0413..abb1cad74 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2621,11 +2621,11 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
         'only_matching': True,
     },
-    # TODO
-    # {
-    #     'url': 'https://www.youtube.com/TheYoungTurks/live',
-    #     'only_matching': True,
-    # }
+        # TODO
+        # {
+        #     'url': 'https://www.youtube.com/TheYoungTurks/live',
+        #     'only_matching': True,
+        # }
     ]
 
     def _extract_channel_id(self, webpage):

From 191286265d8ad4fff834249d44e3d334a969b668 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 00:10:25 +0700
Subject: [PATCH 238/340] [youtube:tab] Fix feeds extraction (closes #25695,
 closes #26452)

---
 test/test_all_urls.py           |   7 +-
 youtube_dl/extractor/youtube.py | 161 ++++++++++++++++++--------------
 2 files changed, 97 insertions(+), 71 deletions(-)

diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index 56a08bed8..50c3466fa 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -61,9 +61,10 @@ class TestAllURLsMatching(unittest.TestCase):
     #     self.assertMatch('http://www.youtube.com/NASAgovVideo/videos', ['youtube:tab'])
 
     def test_youtube_feeds(self):
-        self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watchlater'])
-        self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions'])
-        self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended'])
+        self.assertMatch('https://www.youtube.com/feed/library', ['youtube:tab'])
+        self.assertMatch('https://www.youtube.com/feed/history', ['youtube:tab'])
+        self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:tab'])
+        self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:tab'])
 
     # def test_youtube_search_matching(self):
     #     self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index abb1cad74..7324d8080 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -33,7 +33,6 @@ from ..utils import (
     get_element_by_id,
     int_or_none,
     mimetype2ext,
-    orderedSet,
     parse_codecs,
     parse_duration,
     remove_quotes,
@@ -2381,7 +2380,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
 
 class YoutubeTabIE(YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com tab'
-    _VALID_URL = r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|(?:playlist|watch)\?.*?\blist=)(?P<id>[^/?#&]+)'
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:\w+\.)?
+                        (?:
+                            youtube(?:kids)?\.com|
+                            invidio\.us
+                        )/
+                        (?:
+                            (?:channel|c|user|feed)/|
+                            (?:playlist|watch)\?.*?\blist=
+                        )
+                        (?P<id>[^/?\#&]+)
+                    '''
     IE_NAME = 'youtube:tab'
 
     _TESTS = [{
@@ -2620,7 +2631,30 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
     }, {
         'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
         'only_matching': True,
-    },
+    }, {
+        'url': 'https://www.youtube.com/feed/trending',
+        'only_matching': True,
+    }, {
+        # needs auth
+        'url': 'https://www.youtube.com/feed/library',
+        'only_matching': True,
+    }, {
+        # needs auth
+        'url': 'https://www.youtube.com/feed/history',
+        'only_matching': True,
+    }, {
+        # needs auth
+        'url': 'https://www.youtube.com/feed/subscriptions',
+        'only_matching': True,
+    }, {
+        # needs auth
+        'url': 'https://www.youtube.com/feed/watch_later',
+        'only_matching': True,
+    }, {
+        # no longer available?
+        'url': 'https://www.youtube.com/feed/recommended',
+        'only_matching': True,
+    }
         # TODO
         # {
         #     'url': 'https://www.youtube.com/TheYoungTurks/live',
@@ -2707,27 +2741,34 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                     'https://www.youtube.com/channel/%s' % channel_id,
                     ie=YoutubeTabIE.ie_key(), video_title=title)
 
-    def _shelf_entries_trimmed(self, shelf_renderer):
-        renderer = try_get(
-            shelf_renderer, lambda x: x['content']['horizontalListRenderer'], dict)
-        if not renderer:
+    def _shelf_entries_from_content(self, shelf_renderer):
+        content = shelf_renderer.get('content')
+        if not isinstance(content, dict):
             return
-        # TODO: add support for nested playlists so each shelf is processed
-        # as separate playlist
-        # TODO: this includes only first N items
-        for entry in self._grid_entries(renderer):
-            yield entry
+        renderer = content.get('gridRenderer')
+        if renderer:
+            # TODO: add support for nested playlists so each shelf is processed
+            # as separate playlist
+            # TODO: this includes only first N items
+            for entry in self._grid_entries(renderer):
+                yield entry
+        renderer = content.get('horizontalListRenderer')
+        if renderer:
+            # TODO
+            pass
 
     def _shelf_entries(self, shelf_renderer):
         ep = try_get(
             shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
             compat_str)
         shelf_url = urljoin('https://www.youtube.com', ep)
-        if not shelf_url:
-            return
-        title = try_get(
-            shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
-        yield self.url_result(shelf_url, video_title=title)
+        if shelf_url:
+            title = try_get(
+                shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+            yield self.url_result(shelf_url, video_title=title)
+        # Shelf may not contain shelf URL, fallback to extraction from content
+        for entry in self._shelf_entries_from_content(shelf_renderer):
+            yield entry
 
     def _playlist_entries(self, video_list_renderer):
         for content in video_list_renderer['contents']:
@@ -2832,8 +2873,11 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             }
 
     def _entries(self, tab, identity_token):
+        slr_renderer = try_get(tab, lambda x: x['sectionListRenderer'], dict)
+        if not slr_renderer:
+            return
         continuation = None
-        slr_contents = try_get(tab, lambda x: x['sectionListRenderer']['contents'], list) or []
+        slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
         for slr_content in slr_contents:
             if not isinstance(slr_content, dict):
                 continue
@@ -2876,6 +2920,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             if not continuation:
                 continuation = self._extract_continuation(is_renderer)
 
+        if not continuation:
+            continuation = self._extract_continuation(slr_renderer)
+
         headers = {
             'x-youtube-client-name': '1',
             'x-youtube-client-version': '2.20201112.04.01',
@@ -2924,7 +2971,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                 continuation_item = continuation_items[0]
                 if not isinstance(continuation_item, dict):
                     continue
-                renderer = continuation_item.get('playlistVideoRenderer')
+                renderer = continuation_item.get('playlistVideoRenderer') or continuation_item.get('itemSectionRenderer')
                 if renderer:
                     video_list_renderer = {'contents': continuation_items}
                     for entry in self._playlist_entries(video_list_renderer):
@@ -2969,6 +3016,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         selected_tab = self._extract_selected_tab(tabs)
         renderer = try_get(
             data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
+        playlist_id = title = description = None
         if renderer:
             channel_title = renderer.get('title') or item_id
             tab_title = selected_tab.get('title')
@@ -3289,10 +3337,10 @@ class YoutubeSearchURLIE(YoutubeSearchIE):
 """
 
 
-class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
+class YoutubeFeedsInfoExtractor(YoutubeTabIE):
     """
     Base class for feed extractors
-    Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
+    Subclasses must define the _FEED_NAME property.
     """
     _LOGIN_REQUIRED = True
 
@@ -3303,55 +3351,17 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
     def _real_initialize(self):
         self._login()
 
-    def _entries(self, page):
-        # The extraction process is the same as for playlists, but the regex
-        # for the video ids doesn't contain an index
-        ids = []
-        more_widget_html = content_html = page
-        for page_num in itertools.count(1):
-            matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
-
-            # 'recommended' feed has infinite 'load more' and each new portion spins
-            # the same videos in (sometimes) slightly different order, so we'll check
-            # for unicity and break when portion has no new videos
-            new_ids = list(filter(lambda video_id: video_id not in ids, orderedSet(matches)))
-            if not new_ids:
-                break
-
-            ids.extend(new_ids)
-
-            for entry in self._ids_to_results(new_ids):
-                yield entry
-
-            mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
-            if not mobj:
-                break
-
-            more = self._download_json(
-                'https://www.youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
-                'Downloading page #%s' % page_num,
-                transform_source=uppercase_escape,
-                headers=self._YOUTUBE_CLIENT_HEADERS)
-            content_html = more['content_html']
-            more_widget_html = more['load_more_widget_html']
-
     def _real_extract(self, url):
-        page = self._download_webpage(
+        return self.url_result(
             'https://www.youtube.com/feed/%s' % self._FEED_NAME,
-            self._PLAYLIST_TITLE)
-        return self.playlist_result(
-            self._entries(page), playlist_title=self._PLAYLIST_TITLE)
+            ie=YoutubeTabIE.ie_key())
 
 
 class YoutubeWatchLaterIE(InfoExtractor):
     IE_NAME = 'youtube:watchlater'
     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/watch_later|:ytwatchlater'
-
+    _VALID_URL = r':ytwatchlater'
     _TESTS = [{
-        'url': 'https://www.youtube.com/feed/watch_later',
-        'only_matching': True,
-    }, {
         'url': ':ytwatchlater',
         'only_matching': True,
     }]
@@ -3363,23 +3373,38 @@ class YoutubeWatchLaterIE(InfoExtractor):
 
 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
     IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/recommended|:ytrec(?:ommended)?'
+    _VALID_URL = r':ytrec(?:ommended)?'
     _FEED_NAME = 'recommended'
-    _PLAYLIST_TITLE = 'Youtube Recommended videos'
+    _TESTS = [{
+        'url': ':ytrec',
+        'only_matching': True,
+    }, {
+        'url': ':ytrecommended',
+        'only_matching': True,
+    }]
 
 
 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
     IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
+    _VALID_URL = r':ytsubs(?:criptions)?'
     _FEED_NAME = 'subscriptions'
-    _PLAYLIST_TITLE = 'Youtube Subscriptions'
+    _TESTS = [{
+        'url': ':ytsubs',
+        'only_matching': True,
+    }, {
+        'url': ':ytsubscriptions',
+        'only_matching': True,
+    }]
 
 
 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
     IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
-    _VALID_URL = r'https?://(?:www\.)?youtube\.com/feed/history|:ythistory'
+    _VALID_URL = r':ythistory'
     _FEED_NAME = 'history'
-    _PLAYLIST_TITLE = 'Youtube History'
+    _TESTS = [{
+        'url': ':ythistory',
+        'only_matching': True,
+    }]
 
 
 class YoutubeTruncatedURLIE(InfoExtractor):

From da4eaa15a275628a56ce0512d916fb879734969f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 00:20:42 +0700
Subject: [PATCH 239/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 5a08b3a66..d00c06195 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,22 @@
+version <unreleased>
+
+Core
++ [extractor/common] Add generic support for akamai HTTP format extraction
+
+Extractors
+* [youtube:tab] Fix feeds extraction (#25695, #26452)
+* [youtube:favorites] Restore extractor
+* [youtube:tab] Fix some weird typo (#27157)
++ [pinterest] Add support for large collections (more than 25 pins)
++ [franceinter] Extract thumbnail (#27153)
++ [box] Add support for box.com (#5949)
++ [nytimes] Add support for cooking.nytimes.com (#27112, #27143)
+* [lbry] Relax URL regular expression (#27144)
++ [rumble] Add support for embed pages (#10785)
++ [skyit] Add support for multiple Sky Italia websites (#26629)
++ [pinterest] Add support for pinterest.com (#25747)
+
+
 version 2020.11.21.1
 
 Core

From d0512ac4c56191b6bdd0c0baf2f907990e3045b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 00:23:14 +0700
Subject: [PATCH 240/340] release 2020.11.24

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          |  6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md |  4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           |  6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      |  4 ++--
 ChangeLog                                        |  2 +-
 docs/supportedsites.md                           | 14 ++++++++++++++
 youtube_dl/version.py                            |  2 +-
 8 files changed, 28 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index e2cbaee1c..73155d94e 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.21.1
+ [debug] youtube-dl version 2020.11.24
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index e531e89c0..9e1921149 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 686878c56..40ce65e73 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 17b853716..b98f06c2d 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.21.1
+ [debug] youtube-dl version 2020.11.24
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 109c47925..60bd5f337 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.21.1. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.21.1**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index d00c06195..3894ac2db 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.24
 
 Core
 + [extractor/common] Add generic support for akamai HTTP format extraction
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 53dbc28e5..dea88b5c5 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -59,6 +59,7 @@
  - **ARD:mediathek**
  - **ARDBetaMediathek**
  - **Arkena**
+ - **arte.sky.it**
  - **ArteTV**
  - **ArteTVEmbed**
  - **ArteTVPlaylist**
@@ -110,6 +111,7 @@
  - **Bloomberg**
  - **BokeCC**
  - **BostonGlobe**
+ - **Box**
  - **Bpb**: Bundeszentrale für politische Bildung
  - **BR**: Bayerischer Rundfunk
  - **BravoTV**
@@ -157,6 +159,7 @@
  - **Chilloutzone**
  - **chirbit**
  - **chirbit:profile**
+ - **cielotv.it**
  - **Cinchcast**
  - **Cinemax**
  - **CiscoLiveSearch**
@@ -608,6 +611,7 @@
  - **Nuvid**
  - **NYTimes**
  - **NYTimesArticle**
+ - **NYTimesCooking**
  - **NZZ**
  - **ocw.mit.edu**
  - **OdaTV**
@@ -660,10 +664,13 @@
  - **PicartoVod**
  - **Piksel**
  - **Pinkbike**
+ - **Pinterest**
+ - **PinterestCollection**
  - **Pladform**
  - **Platzi**
  - **PlatziCourse**
  - **play.fm**
+ - **player.sky.it**
  - **PlayPlusTV**
  - **PlaysTV**
  - **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
@@ -755,6 +762,7 @@
  - **RTVNH**
  - **RTVS**
  - **RUHD**
+ - **RumbleEmbed**
  - **rutube**: Rutube videos
  - **rutube:channel**: Rutube channels
  - **rutube:embed**: Rutube embedded videos
@@ -792,6 +800,8 @@
  - **Shared**: shared.sx
  - **ShowRoomLive**
  - **Sina**
+ - **sky.it**
+ - **skyacademy.it**
  - **SkylineWebcams**
  - **SkyNews**
  - **skynewsarabia:article**
@@ -930,6 +940,7 @@
  - **TV2DKBornholmPlay**
  - **TV4**: tv4.se and tv4play.se
  - **TV5MondePlus**: TV5MONDE+
+ - **tv8.it**
  - **TVA**
  - **TVANouvelles**
  - **TVANouvellesArticle**
@@ -1001,6 +1012,8 @@
  - **Viddler**
  - **Videa**
  - **video.google:search**: Google Video search
+ - **video.sky.it**
+ - **video.sky.it:live**
  - **VideoDetective**
  - **videofy.me**
  - **videomore**
@@ -1130,6 +1143,7 @@
  - **YourPorn**
  - **YourUpload**
  - **youtube**: YouTube.com
+ - **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
  - **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
  - **youtube:playlist**: YouTube.com playlists
  - **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index d7e901521..e8a816d0d 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.21.1'
+__version__ = '2020.11.24'

From 37258c644f76416b2a09de14c0e74da628534e2e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 02:18:40 +0700
Subject: [PATCH 241/340] [cda] Fix extraction (closes #17803, closes #24458,
 closes #24518, closes #26381)

---
 youtube_dl/extractor/cda.py | 35 ++++++++++++++++++++++++++++++++---
 1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/cda.py b/youtube_dl/extractor/cda.py
index 0c3af23d5..d67900e62 100644
--- a/youtube_dl/extractor/cda.py
+++ b/youtube_dl/extractor/cda.py
@@ -5,10 +5,16 @@ import codecs
 import re
 
 from .common import InfoExtractor
+from ..compat import (
+    compat_chr,
+    compat_ord,
+    compat_urllib_parse_unquote,
+)
 from ..utils import (
     ExtractorError,
     float_or_none,
     int_or_none,
+    merge_dicts,
     multipart_encode,
     parse_duration,
     random_birthday,
@@ -107,8 +113,9 @@ class CDAIE(InfoExtractor):
             r'Odsłony:(?:\s|&nbsp;)*([0-9]+)', webpage,
             'view_count', default=None)
         average_rating = self._search_regex(
-            r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
-            webpage, 'rating', fatal=False, group='rating_value')
+            (r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
+             r'<span[^>]+\bclass=["\']rating["\'][^>]*>(?P<rating_value>[0-9.]+)'), webpage, 'rating', fatal=False,
+            group='rating_value')
 
         info_dict = {
             'id': video_id,
@@ -123,6 +130,24 @@ class CDAIE(InfoExtractor):
             'age_limit': 18 if need_confirm_age else 0,
         }
 
+        # Source: https://www.cda.pl/js/player.js?t=1606154898
+        def decrypt_file(a):
+            for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'):
+                a = a.replace(p, '')
+            a = compat_urllib_parse_unquote(a)
+            b = []
+            for c in a:
+                f = compat_ord(c)
+                b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f and 126 >= f else compat_chr(f))
+            a = ''.join(b)
+            a = a.replace('.cda.mp4', '')
+            for p in ('.2cda.pl', '.3cda.pl'):
+                a = a.replace(p, '.cda.pl')
+            if '/upstream' in a:
+                a = a.replace('/upstream', '.mp4/upstream')
+                return 'https://' + a
+            return 'https://' + a + '.mp4'
+
         def extract_format(page, version):
             json_str = self._html_search_regex(
                 r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page,
@@ -141,6 +166,8 @@ class CDAIE(InfoExtractor):
                 video['file'] = codecs.decode(video['file'], 'rot_13')
                 if video['file'].endswith('adc.mp4'):
                     video['file'] = video['file'].replace('adc.mp4', '.mp4')
+            elif not video['file'].startswith('http'):
+                video['file'] = decrypt_file(video['file'])
             f = {
                 'url': video['file'],
             }
@@ -179,4 +206,6 @@ class CDAIE(InfoExtractor):
 
         self._sort_formats(formats)
 
-        return info_dict
+        info = self._search_json_ld(webpage, video_id, default={})
+
+        return merge_dicts(info_dict, info)

From a86ce9d7a1091b86948feec0ba4a34fa431d7618 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 02:59:49 +0700
Subject: [PATCH 242/340] [nrk] Fix extraction

---
 youtube_dl/extractor/nrk.py | 442 +++++++++++++++++++++---------------
 1 file changed, 257 insertions(+), 185 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 84aacbcda..4a395546f 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -9,6 +9,7 @@ from ..compat import (
     compat_urllib_parse_unquote,
 )
 from ..utils import (
+    determine_ext,
     ExtractorError,
     int_or_none,
     js_to_json,
@@ -16,17 +17,269 @@ from ..utils import (
     parse_age_limit,
     parse_duration,
     try_get,
+    url_or_none,
 )
 
 
 class NRKBaseIE(InfoExtractor):
     _GEO_COUNTRIES = ['NO']
 
-    _api_host = None
+
+class NRKIE(NRKBaseIE):
+    _VALID_URL = r'''(?x)
+                        (?:
+                            nrk:|
+                            https?://
+                                (?:
+                                    (?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)|
+                                    v8[-.]psapi\.nrk\.no/mediaelement/
+                                )
+                            )
+                            (?P<id>[^?\#&]+)
+                        '''
+
+    _TESTS = [{
+        # video
+        'url': 'http://www.nrk.no/video/PS*150533',
+        'md5': '706f34cdf1322577589e369e522b50ef',
+        'info_dict': {
+            'id': '150533',
+            'ext': 'mp4',
+            'title': 'Dompap og andre fugler i Piip-Show',
+            'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
+            'duration': 262,
+        }
+    }, {
+        # audio
+        'url': 'http://www.nrk.no/video/PS*154915',
+        # MD5 is unstable
+        'info_dict': {
+            'id': '154915',
+            'ext': 'flv',
+            'title': 'Slik høres internett ut når du er blind',
+            'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
+            'duration': 20,
+        }
+    }, {
+        'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
+        'only_matching': True,
+    }, {
+        'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
+        'only_matching': True,
+    }, {
+        'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999',
+        'only_matching': True,
+    }]
+
+    def _extract_from_playback(self, video_id):
+        manifest = self._download_json(
+            'http://psapi.nrk.no/playback/manifest/%s' % video_id,
+            video_id, 'Downloading manifest JSON')
+
+        playable = manifest['playable']
+
+        formats = []
+        for asset in playable['assets']:
+            if not isinstance(asset, dict):
+                continue
+            if asset.get('encrypted'):
+                continue
+            format_url = url_or_none(asset.get('url'))
+            if not format_url:
+                continue
+            if asset.get('format') == 'HLS' or determine_ext(format_url) == 'm3u8':
+                formats.extend(self._extract_m3u8_formats(
+                    format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id='hls', fatal=False))
+        self._sort_formats(formats)
+
+        data = self._download_json(
+            'http://psapi.nrk.no/playback/metadata/%s' % video_id,
+            video_id, 'Downloading metadata JSON')
+
+        preplay = data['preplay']
+        titles = preplay['titles']
+        title = titles['title']
+        alt_title = titles.get('subtitle')
+
+        description = preplay.get('description')
+        duration = parse_duration(playable.get('duration')) or parse_duration(data.get('duration'))
+
+        thumbnails = []
+        for image in try_get(
+                preplay, lambda x: x['poster']['images'], list) or []:
+            if not isinstance(image, dict):
+                continue
+            image_url = url_or_none(image.get('url'))
+            if not image_url:
+                continue
+            thumbnails.append({
+                'url': image_url,
+                'width': int_or_none(image.get('pixelWidth')),
+                'height': int_or_none(image.get('pixelHeight')),
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'alt_title': alt_title,
+            'description': description,
+            'duration': duration,
+            'thumbnails': thumbnails,
+            'formats': formats,
+        }
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
+        return self._extract_from_playback(video_id)
 
+
+class NRKTVIE(NRKBaseIE):
+    IE_DESC = 'NRK TV and NRK Radio'
+    _EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
+    _VALID_URL = r'''(?x)
+                        https?://
+                            (?:tv|radio)\.nrk(?:super)?\.no/
+                            (?:serie(?:/[^/]+){1,2}|program)/
+                            (?![Ee]pisodes)%s
+                            (?:/\d{2}-\d{2}-\d{4})?
+                            (?:\#del=(?P<part_id>\d+))?
+                    ''' % _EPISODE_RE
+    _API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
+    _TESTS = [{
+        'url': 'https://tv.nrk.no/program/MDDP12000117',
+        'md5': '8270824df46ec629b66aeaa5796b36fb',
+        'info_dict': {
+            'id': 'MDDP12000117AA',
+            'ext': 'mp4',
+            'title': 'Alarm Trolltunga',
+            'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
+            'duration': 2223,
+            'age_limit': 6,
+        },
+    }, {
+        'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
+        'md5': '9a167e54d04671eb6317a37b7bc8a280',
+        'info_dict': {
+            'id': 'MUHH48000314AA',
+            'ext': 'mp4',
+            'title': '20 spørsmål 23.05.2014',
+            'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
+            'duration': 1741,
+            'series': '20 spørsmål',
+            'episode': '23.05.2014',
+        },
+        'skip': 'NoProgramRights',
+    }, {
+        'url': 'https://tv.nrk.no/program/mdfp15000514',
+        'info_dict': {
+            'id': 'MDFP15000514CA',
+            'ext': 'mp4',
+            'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
+            'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
+            'duration': 4605,
+            'series': 'Kunnskapskanalen',
+            'episode': '24.05.2014',
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        # single playlist video
+        'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
+        'info_dict': {
+            'id': 'MSPO40010515-part2',
+            'ext': 'flv',
+            'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
+            'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'expected_warnings': ['Video is geo restricted'],
+        'skip': 'particular part is not supported currently',
+    }, {
+        'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
+        'playlist': [{
+            'info_dict': {
+                'id': 'MSPO40010515AH',
+                'ext': 'mp4',
+                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
+                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
+                'duration': 772,
+                'series': 'Tour de Ski',
+                'episode': '06.01.2015',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        }, {
+            'info_dict': {
+                'id': 'MSPO40010515BH',
+                'ext': 'mp4',
+                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
+                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
+                'duration': 6175,
+                'series': 'Tour de Ski',
+                'episode': '06.01.2015',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        }],
+        'info_dict': {
+            'id': 'MSPO40010515',
+            'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
+            'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
+        },
+        'expected_warnings': ['Video is geo restricted'],
+    }, {
+        'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
+        'info_dict': {
+            'id': 'KMTE50001317AA',
+            'ext': 'mp4',
+            'title': 'Anno 13:30',
+            'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
+            'duration': 2340,
+            'series': 'Anno',
+            'episode': '13:30',
+            'season_number': 3,
+            'episode_number': 13,
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
+        'info_dict': {
+            'id': 'MUHH46000317AA',
+            'ext': 'mp4',
+            'title': 'Nytt på Nytt 27.01.2017',
+            'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
+            'duration': 1796,
+            'series': 'Nytt på nytt',
+            'episode': '27.01.2017',
+        },
+        'params': {
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
+        'only_matching': True,
+    }, {
+        'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
+        'only_matching': True,
+    }]
+
+    _api_host = None
+
+    def _extract_from_mediaelement(self, video_id):
         api_hosts = (self._api_host, ) if self._api_host else self._API_HOSTS
 
         for api_host in api_hosts:
@@ -195,190 +448,9 @@ class NRKBaseIE(InfoExtractor):
 
         return self.playlist_result(entries, video_id, title, description)
 
-
-class NRKIE(NRKBaseIE):
-    _VALID_URL = r'''(?x)
-                        (?:
-                            nrk:|
-                            https?://
-                                (?:
-                                    (?:www\.)?nrk\.no/video/PS\*|
-                                    v8[-.]psapi\.nrk\.no/mediaelement/
-                                )
-                            )
-                            (?P<id>[^?#&]+)
-                        '''
-    _API_HOSTS = ('psapi.nrk.no', 'v8-psapi.nrk.no')
-    _TESTS = [{
-        # video
-        'url': 'http://www.nrk.no/video/PS*150533',
-        'md5': '706f34cdf1322577589e369e522b50ef',
-        'info_dict': {
-            'id': '150533',
-            'ext': 'mp4',
-            'title': 'Dompap og andre fugler i Piip-Show',
-            'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
-            'duration': 262,
-        }
-    }, {
-        # audio
-        'url': 'http://www.nrk.no/video/PS*154915',
-        # MD5 is unstable
-        'info_dict': {
-            'id': '154915',
-            'ext': 'flv',
-            'title': 'Slik høres internett ut når du er blind',
-            'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
-            'duration': 20,
-        }
-    }, {
-        'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
-        'only_matching': True,
-    }, {
-        'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70',
-        'only_matching': True,
-    }, {
-        'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
-        'only_matching': True,
-    }]
-
-
-class NRKTVIE(NRKBaseIE):
-    IE_DESC = 'NRK TV and NRK Radio'
-    _EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
-    _VALID_URL = r'''(?x)
-                        https?://
-                            (?:tv|radio)\.nrk(?:super)?\.no/
-                            (?:serie(?:/[^/]+){1,2}|program)/
-                            (?![Ee]pisodes)%s
-                            (?:/\d{2}-\d{2}-\d{4})?
-                            (?:\#del=(?P<part_id>\d+))?
-                    ''' % _EPISODE_RE
-    _API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
-    _TESTS = [{
-        'url': 'https://tv.nrk.no/program/MDDP12000117',
-        'md5': '8270824df46ec629b66aeaa5796b36fb',
-        'info_dict': {
-            'id': 'MDDP12000117AA',
-            'ext': 'mp4',
-            'title': 'Alarm Trolltunga',
-            'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
-            'duration': 2223,
-            'age_limit': 6,
-        },
-    }, {
-        'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
-        'md5': '9a167e54d04671eb6317a37b7bc8a280',
-        'info_dict': {
-            'id': 'MUHH48000314AA',
-            'ext': 'mp4',
-            'title': '20 spørsmål 23.05.2014',
-            'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
-            'duration': 1741,
-            'series': '20 spørsmål',
-            'episode': '23.05.2014',
-        },
-        'skip': 'NoProgramRights',
-    }, {
-        'url': 'https://tv.nrk.no/program/mdfp15000514',
-        'info_dict': {
-            'id': 'MDFP15000514CA',
-            'ext': 'mp4',
-            'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
-            'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
-            'duration': 4605,
-            'series': 'Kunnskapskanalen',
-            'episode': '24.05.2014',
-        },
-        'params': {
-            'skip_download': True,
-        },
-    }, {
-        # single playlist video
-        'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
-        'info_dict': {
-            'id': 'MSPO40010515-part2',
-            'ext': 'flv',
-            'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
-            'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'expected_warnings': ['Video is geo restricted'],
-        'skip': 'particular part is not supported currently',
-    }, {
-        'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
-        'playlist': [{
-            'info_dict': {
-                'id': 'MSPO40010515AH',
-                'ext': 'mp4',
-                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
-                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
-                'duration': 772,
-                'series': 'Tour de Ski',
-                'episode': '06.01.2015',
-            },
-            'params': {
-                'skip_download': True,
-            },
-        }, {
-            'info_dict': {
-                'id': 'MSPO40010515BH',
-                'ext': 'mp4',
-                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
-                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
-                'duration': 6175,
-                'series': 'Tour de Ski',
-                'episode': '06.01.2015',
-            },
-            'params': {
-                'skip_download': True,
-            },
-        }],
-        'info_dict': {
-            'id': 'MSPO40010515',
-            'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
-            'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
-        },
-        'expected_warnings': ['Video is geo restricted'],
-    }, {
-        'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
-        'info_dict': {
-            'id': 'KMTE50001317AA',
-            'ext': 'mp4',
-            'title': 'Anno 13:30',
-            'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
-            'duration': 2340,
-            'series': 'Anno',
-            'episode': '13:30',
-            'season_number': 3,
-            'episode_number': 13,
-        },
-        'params': {
-            'skip_download': True,
-        },
-    }, {
-        'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
-        'info_dict': {
-            'id': 'MUHH46000317AA',
-            'ext': 'mp4',
-            'title': 'Nytt på Nytt 27.01.2017',
-            'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
-            'duration': 1796,
-            'series': 'Nytt på nytt',
-            'episode': '27.01.2017',
-        },
-        'params': {
-            'skip_download': True,
-        },
-    }, {
-        'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
-        'only_matching': True,
-    }, {
-        'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
-        'only_matching': True,
-    }]
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self._extract_from_mediaelement(video_id)
 
 
 class NRKTVEpisodeIE(InfoExtractor):

From f4415faa4690cf71d61d90ad7e9a7f9980be9b50 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 24 Nov 2020 04:16:29 +0700
Subject: [PATCH 243/340] [downloader/fragment] Set final file's mtime
 according to last fragment's Last-Modified header (closes #11718, closes
 #18384, closes #27138)

---
 youtube_dl/downloader/fragment.py | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/downloader/fragment.py b/youtube_dl/downloader/fragment.py
index 02f35459e..35c76feba 100644
--- a/youtube_dl/downloader/fragment.py
+++ b/youtube_dl/downloader/fragment.py
@@ -97,12 +97,15 @@ class FragmentFD(FileDownloader):
 
     def _download_fragment(self, ctx, frag_url, info_dict, headers=None):
         fragment_filename = '%s-Frag%d' % (ctx['tmpfilename'], ctx['fragment_index'])
-        success = ctx['dl'].download(fragment_filename, {
+        fragment_info_dict = {
             'url': frag_url,
             'http_headers': headers or info_dict.get('http_headers'),
-        })
+        }
+        success = ctx['dl'].download(fragment_filename, fragment_info_dict)
         if not success:
             return False, None
+        if fragment_info_dict.get('filetime'):
+            ctx['fragment_filetime'] = fragment_info_dict.get('filetime')
         down, frag_sanitized = sanitize_open(fragment_filename, 'rb')
         ctx['fragment_filename_sanitized'] = frag_sanitized
         frag_content = down.read()
@@ -258,6 +261,13 @@ class FragmentFD(FileDownloader):
             downloaded_bytes = ctx['complete_frags_downloaded_bytes']
         else:
             self.try_rename(ctx['tmpfilename'], ctx['filename'])
+            if self.params.get('updatetime', True):
+                filetime = ctx.get('fragment_filetime')
+                if filetime:
+                    try:
+                        os.utime(ctx['filename'], (time.time(), filetime))
+                    except Exception:
+                        pass
             downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename']))
 
         self._hook_progress({

From 01c92973ddebe6429f5a03855e41c412889c96dc Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 23 Nov 2020 22:44:34 +0100
Subject: [PATCH 244/340] [youtube] imporve music metadata and license
 extraction(closes #26013)

---
 youtube_dl/extractor/youtube.py | 30 +++++++++++++++++++++++++++++-
 1 file changed, 29 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 7324d8080..1e3ff7d44 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2162,7 +2162,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         # Youtube Music Auto-generated description
         release_date = release_year = None
         if video_description:
-            mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
+            mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
             if mobj:
                 if not track:
                     track = mobj.group('track').strip()
@@ -2179,6 +2179,34 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 if release_year:
                     release_year = int(release_year)
 
+        yt_initial_data = self._extract_yt_initial_data(video_id, video_webpage)
+        contents = try_get(yt_initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
+        for content in contents:
+            rows = try_get(content, lambda x: x['videoSecondaryInfoRenderer']['metadataRowContainer']['metadataRowContainerRenderer']['rows'], list) or []
+            multiple_songs = False
+            for row in rows:
+                if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
+                    multiple_songs = True
+                    break
+            for row in rows:
+                mrr = row.get('metadataRowRenderer') or {}
+                mrr_title = try_get(
+                    mrr, lambda x: x['title']['simpleText'], compat_str)
+                mrr_contents = try_get(
+                    mrr, lambda x: x['contents'][0], dict) or {}
+                mrr_contents_text = try_get(mrr_contents, [lambda x: x['simpleText'], lambda x: x['runs'][0]['text']], compat_str)
+                if not (mrr_title and mrr_contents_text):
+                    continue
+                if mrr_title == 'License':
+                    video_license = mrr_contents_text
+                elif not multiple_songs:
+                    if mrr_title == 'Album':
+                        album = mrr_contents_text
+                    elif mrr_title == 'Artist':
+                        artist = mrr_contents_text
+                    elif mrr_title == 'Song':
+                        track = mrr_contents_text
+
         m_episode = re.search(
             r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
             video_webpage)

From e24ebeafd1c59336705bd31b9f1ff46676273e1d Mon Sep 17 00:00:00 2001
From: Joshua Lochner <admin@xenova.com>
Date: Tue, 24 Nov 2020 19:27:33 +0200
Subject: [PATCH 245/340] [medaltv] Add new extractor (#27149)

---
 youtube_dl/extractor/extractors.py |   1 +
 youtube_dl/extractor/medaltv.py    | 138 +++++++++++++++++++++++++++++
 2 files changed, 139 insertions(+)
 create mode 100644 youtube_dl/extractor/medaltv.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 71178da7e..688437dc2 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -606,6 +606,7 @@ from .markiza import (
 from .massengeschmacktv import MassengeschmackTVIE
 from .matchtv import MatchTVIE
 from .mdr import MDRIE
+from .medaltv import MedalTVIE
 from .mediaset import MediasetIE
 from .mediasite import (
     MediasiteIE,
diff --git a/youtube_dl/extractor/medaltv.py b/youtube_dl/extractor/medaltv.py
new file mode 100644
index 000000000..06f7b6e92
--- /dev/null
+++ b/youtube_dl/extractor/medaltv.py
@@ -0,0 +1,138 @@
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+    ExtractorError,
+    try_get,
+    float_or_none,
+    int_or_none
+)
+
+
+class MedalTVIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?medal\.tv/clips/(?P<id>[0-9]+)'
+    _TESTS = [{
+        'url': 'https://medal.tv/clips/34934644/3Is9zyGMoBMr',
+        'md5': '7b07b064331b1cf9e8e5c52a06ae68fa',
+        'info_dict': {
+            'id': '34934644',
+            'ext': 'mp4',
+            'title': 'Quad Cold',
+            'description': 'Medal,https://medal.tv/desktop/',
+            'uploader': 'MowgliSB',
+            'timestamp': 1603165266,
+            'upload_date': '20201020',
+            'uploader_id': 10619174,
+        }
+    }, {
+        'url': 'https://medal.tv/clips/36787208',
+        'md5': 'b6dc76b78195fff0b4f8bf4a33ec2148',
+        'info_dict': {
+            'id': '36787208',
+            'ext': 'mp4',
+            'title': 'u tk me i tk u bigger',
+            'description': 'Medal,https://medal.tv/desktop/',
+            'uploader': 'Mimicc',
+            'timestamp': 1605580939,
+            'upload_date': '20201117',
+            'uploader_id': 5156321,
+        }
+    }]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        hydration_data = self._search_regex(
+            r'<script[^>]*>\s*(?:var\s*)?hydrationData\s*=\s*({.+?})\s*</script>',
+            webpage, 'hydration data', default='{}')
+        parsed = self._parse_json(hydration_data, video_id)
+
+        clip_info = try_get(parsed, lambda x: x['clips'][video_id], dict) or {}
+        if not clip_info:
+            raise ExtractorError('Could not find video information.',
+                                 video_id=video_id)
+
+        width = int_or_none(clip_info.get('sourceWidth'))
+        height = int_or_none(clip_info.get('sourceHeight'))
+
+        aspect_ratio = (width / height) if(width and height) else (16 / 9)
+
+        # ordered from lowest to highest resolution
+        heights = (144, 240, 360, 480, 720, 1080)
+
+        formats = []
+        thumbnails = []
+
+        for height in heights:
+            format_key = '{0}p'.format(height)
+            video_key = 'contentUrl{0}'.format(format_key)
+            thumbnail_key = 'thumbnail{0}'.format(format_key)
+            width = int(round(aspect_ratio * height))
+
+            # Second condition needed as sometimes medal says
+            # they have a format when in fact it is another format.
+            format_url = clip_info.get(video_key)
+            if(format_url and format_key in format_url):
+                formats.append({
+                    'url': format_url,
+                    'format_id': format_key,
+                    'width': width,
+                    'height': height
+                })
+
+            thumbnail_url = clip_info.get(thumbnail_key)
+            if(thumbnail_url and format_key in thumbnail_url):
+                thumbnails.append({
+                    'id': format_key,
+                    'url': thumbnail_url,
+                    'width': width,
+                    'height': height
+                })
+
+        # add source to formats
+        source_url = clip_info.get('contentUrl')
+        if(source_url):
+            formats.append({
+                'url': source_url,
+                'format_id': 'source',
+                'width': width,
+                'height': height
+            })
+
+        error = clip_info.get('error')
+        if not formats and error:
+            if(error == 404):
+                raise ExtractorError('That clip does not exist.',
+                                     expected=True, video_id=video_id)
+            else:
+                raise ExtractorError('An unknown error occurred ({0}).'.format(error),
+                                     video_id=video_id)
+
+        # Necessary because the id of the author is not known in advance.
+        # Won't raise an issue if no profile can be found as this is optional.
+        author_info = try_get(parsed,
+                              lambda x: list(x['profiles'].values())[0], dict
+                              ) or {}
+        author_id = author_info.get('id')
+        author_url = 'https://medal.tv/users/{0}'.format(author_id) if author_id else None
+
+        return {
+            'id': video_id,
+            'title': clip_info.get('contentTitle'),
+            'formats': formats,
+            'thumbnails': thumbnails,
+            'description': clip_info.get('contentDescription'),
+
+            'uploader': author_info.get('displayName'),
+            'timestamp': float_or_none(clip_info.get('created'), 1000),
+            'uploader_id': author_id,
+            'uploader_url': author_url,
+
+            'duration': float_or_none(clip_info.get('videoLengthSeconds')),
+            'view_count': int_or_none(clip_info.get('views')),
+            'like_count': int_or_none(clip_info.get('likes')),
+            'comment_count': int_or_none(clip_info.get('comments'))
+        }

From 579d43951d7d60b5027ae224973b5f541b51c74a Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 24 Nov 2020 18:29:46 +0100
Subject: [PATCH 246/340] [medaltv] improve extraction

---
 youtube_dl/extractor/medaltv.py | 131 +++++++++++++++-----------------
 1 file changed, 62 insertions(+), 69 deletions(-)

diff --git a/youtube_dl/extractor/medaltv.py b/youtube_dl/extractor/medaltv.py
index 06f7b6e92..1603b55f6 100644
--- a/youtube_dl/extractor/medaltv.py
+++ b/youtube_dl/extractor/medaltv.py
@@ -1,13 +1,16 @@
 # coding: utf-8
-
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
+from ..compat import compat_str
 from ..utils import (
     ExtractorError,
-    try_get,
     float_or_none,
-    int_or_none
+    int_or_none,
+    str_or_none,
+    try_get,
 )
 
 
@@ -45,94 +48,84 @@ class MedalTVIE(InfoExtractor):
         video_id = self._match_id(url)
         webpage = self._download_webpage(url, video_id)
 
-        hydration_data = self._search_regex(
+        hydration_data = self._parse_json(self._search_regex(
             r'<script[^>]*>\s*(?:var\s*)?hydrationData\s*=\s*({.+?})\s*</script>',
-            webpage, 'hydration data', default='{}')
-        parsed = self._parse_json(hydration_data, video_id)
+            webpage, 'hydration data', default='{}'), video_id)
 
-        clip_info = try_get(parsed, lambda x: x['clips'][video_id], dict) or {}
-        if not clip_info:
-            raise ExtractorError('Could not find video information.',
-                                 video_id=video_id)
+        clip = try_get(
+            hydration_data, lambda x: x['clips'][video_id], dict) or {}
+        if not clip:
+            raise ExtractorError(
+                'Could not find video information.', video_id=video_id)
 
-        width = int_or_none(clip_info.get('sourceWidth'))
-        height = int_or_none(clip_info.get('sourceHeight'))
+        title = clip['contentTitle']
 
-        aspect_ratio = (width / height) if(width and height) else (16 / 9)
+        source_width = int_or_none(clip.get('sourceWidth'))
+        source_height = int_or_none(clip.get('sourceHeight'))
 
-        # ordered from lowest to highest resolution
-        heights = (144, 240, 360, 480, 720, 1080)
+        aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9
 
-        formats = []
-        thumbnails = []
-
-        for height in heights:
-            format_key = '{0}p'.format(height)
-            video_key = 'contentUrl{0}'.format(format_key)
-            thumbnail_key = 'thumbnail{0}'.format(format_key)
+        def add_item(container, item_url, height, id_key='format_id', item_id=None):
+            item_id = item_id or '%dp' % height
+            if item_id not in item_url:
+                return
             width = int(round(aspect_ratio * height))
-
-            # Second condition needed as sometimes medal says
-            # they have a format when in fact it is another format.
-            format_url = clip_info.get(video_key)
-            if(format_url and format_key in format_url):
-                formats.append({
-                    'url': format_url,
-                    'format_id': format_key,
-                    'width': width,
-                    'height': height
-                })
-
-            thumbnail_url = clip_info.get(thumbnail_key)
-            if(thumbnail_url and format_key in thumbnail_url):
-                thumbnails.append({
-                    'id': format_key,
-                    'url': thumbnail_url,
-                    'width': width,
-                    'height': height
-                })
-
-        # add source to formats
-        source_url = clip_info.get('contentUrl')
-        if(source_url):
-            formats.append({
-                'url': source_url,
-                'format_id': 'source',
+            container.append({
+                'url': item_url,
+                id_key: item_id,
                 'width': width,
                 'height': height
             })
 
-        error = clip_info.get('error')
+        formats = []
+        thumbnails = []
+        for k, v in clip.items():
+            if not (v and isinstance(v, compat_str)):
+                continue
+            mobj = re.match(r'(contentUrl|thumbnail)(?:(\d+)p)?$', k)
+            if not mobj:
+                continue
+            prefix = mobj.group(1)
+            height = int_or_none(mobj.group(2))
+            if prefix == 'contentUrl':
+                add_item(
+                    formats, v, height or source_height,
+                    item_id=None if height else 'source')
+            elif prefix == 'thumbnail':
+                add_item(thumbnails, v, height, 'id')
+
+        error = clip.get('error')
         if not formats and error:
-            if(error == 404):
-                raise ExtractorError('That clip does not exist.',
-                                     expected=True, video_id=video_id)
+            if error == 404:
+                raise ExtractorError(
+                    'That clip does not exist.',
+                    expected=True, video_id=video_id)
             else:
-                raise ExtractorError('An unknown error occurred ({0}).'.format(error),
-                                     video_id=video_id)
+                raise ExtractorError(
+                    'An unknown error occurred ({0}).'.format(error),
+                    video_id=video_id)
+
+        self._sort_formats(formats)
 
         # Necessary because the id of the author is not known in advance.
         # Won't raise an issue if no profile can be found as this is optional.
-        author_info = try_get(parsed,
-                              lambda x: list(x['profiles'].values())[0], dict
-                              ) or {}
-        author_id = author_info.get('id')
+        author = try_get(
+            hydration_data, lambda x: list(x['profiles'].values())[0], dict) or {}
+        author_id = str_or_none(author.get('id'))
         author_url = 'https://medal.tv/users/{0}'.format(author_id) if author_id else None
 
         return {
             'id': video_id,
-            'title': clip_info.get('contentTitle'),
+            'title': title,
             'formats': formats,
             'thumbnails': thumbnails,
-            'description': clip_info.get('contentDescription'),
-
-            'uploader': author_info.get('displayName'),
-            'timestamp': float_or_none(clip_info.get('created'), 1000),
+            'description': clip.get('contentDescription'),
+            'uploader': author.get('displayName'),
+            'timestamp': float_or_none(clip.get('created'), 1000),
             'uploader_id': author_id,
             'uploader_url': author_url,
-
-            'duration': float_or_none(clip_info.get('videoLengthSeconds')),
-            'view_count': int_or_none(clip_info.get('views')),
-            'like_count': int_or_none(clip_info.get('likes')),
-            'comment_count': int_or_none(clip_info.get('comments'))
+            'duration': int_or_none(clip.get('videoLengthSeconds')),
+            'view_count': int_or_none(clip.get('views')),
+            'like_count': int_or_none(clip.get('likes')),
+            'comment_count': int_or_none(clip.get('comments')),
         }

From 6c35de4c6bc13c307215cd2f8977b7ac09de28fb Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 24 Nov 2020 22:49:04 +0100
Subject: [PATCH 247/340] [bbc] fix BBC News videos extraction

---
 youtube_dl/extractor/bbc.py | 35 ++++++++++++++++++++++++++++++++++-
 1 file changed, 34 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index 002c39c39..ca93b7be5 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -981,7 +981,7 @@ class BBCIE(BBCCoUkIE):
         group_id = self._search_regex(
             r'<div[^>]+\bclass=["\']video["\'][^>]+\bdata-pid=["\'](%s)' % self._ID_REGEX,
             webpage, 'group id', default=None)
-        if playlist_id:
+        if group_id:
             return self.url_result(
                 'https://www.bbc.co.uk/programmes/%s' % group_id,
                 ie=BBCCoUkIE.ie_key())
@@ -1118,6 +1118,39 @@ class BBCIE(BBCCoUkIE):
                 return self.playlist_result(
                     entries, playlist_id, playlist_title, playlist_description)
 
+        initial_data = self._parse_json(self._search_regex(
+            r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage,
+            'preload state', default='{}'), playlist_id, fatal=False)
+        if initial_data:
+            def parse_media(media):
+                if not media:
+                    return
+                for item in (try_get(media, lambda x: x['media']['items'], list) or []):
+                    item_id = item.get('id')
+                    item_title = item.get('title')
+                    if not (item_id and item_title):
+                        continue
+                    formats, subtitles = self._download_media_selector(item_id)
+                    self._sort_formats(formats)
+                    entries.append({
+                        'id': item_id,
+                        'title': item_title,
+                        'thumbnail': item.get('holdingImageUrl'),
+                        'formats': formats,
+                        'subtitles': subtitles,
+                    })
+            for resp in (initial_data.get('data') or {}).values():
+                name = resp.get('name')
+                if name == 'media-experience':
+                    parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
+                elif name == 'article':
+                    for block in (try_get(resp, lambda x: x['data']['blocks'], list) or []):
+                        if block.get('type') != 'media':
+                            continue
+                        parse_media(block.get('model'))
+            return self.playlist_result(
+                entries, playlist_id, playlist_title, playlist_description)
+
         def extract_all(pattern):
             return list(filter(None, map(
                 lambda s: self._parse_json(s, playlist_id, fatal=False),

From a7ea88537a8bc8843636f21eb9bd9fa04d5060c5 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 24 Nov 2020 22:54:08 +0100
Subject: [PATCH 248/340] [bbc] fix BBC Three clip extraction

---
 youtube_dl/extractor/bbc.py | 22 +++++++++++++++++++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index ca93b7be5..54cbcdc8e 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -1092,10 +1092,26 @@ class BBCIE(BBCCoUkIE):
             self._search_regex(
                 r'(?s)bbcthreeConfig\s*=\s*({.+?})\s*;\s*<', webpage,
                 'bbcthree config', default='{}'),
-            playlist_id, transform_source=js_to_json, fatal=False)
-        if bbc3_config:
+            playlist_id, transform_source=js_to_json, fatal=False) or {}
+        payload = bbc3_config.get('payload') or {}
+        if payload:
+            clip = payload.get('currentClip') or {}
+            clip_vpid = clip.get('vpid')
+            clip_title = clip.get('title')
+            if clip_vpid and clip_title:
+                formats, subtitles = self._download_media_selector(clip_vpid)
+                self._sort_formats(formats)
+                return {
+                    'id': clip_vpid,
+                    'title': clip_title,
+                    'thumbnail': dict_get(clip, ('poster', 'imageUrl')),
+                    'description': clip.get('description'),
+                    'duration': parse_duration(clip.get('duration')),
+                    'formats': formats,
+                    'subtitles': subtitles,
+                }
             bbc3_playlist = try_get(
-                bbc3_config, lambda x: x['payload']['content']['bbcMedia']['playlist'],
+                payload, lambda x: x['content']['bbcMedia']['playlist'],
                 dict)
             if bbc3_playlist:
                 playlist_title = bbc3_playlist.get('title') or playlist_title

From 97c5be383c78da84efdb6ea9334ee95b198a2f1d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 24 Nov 2020 23:25:03 +0100
Subject: [PATCH 249/340] [viki] fix video API request(closes #27184)

---
 youtube_dl/extractor/viki.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index a003b7af8..a311f21ef 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -20,6 +20,7 @@ from ..utils import (
     parse_age_limit,
     parse_iso8601,
     sanitized_Request,
+    std_headers,
 )
 
 
@@ -226,8 +227,10 @@ class VikiIE(VikiBaseIE):
 
         resp = self._download_json(
             'https://www.viki.com/api/videos/' + video_id,
-            video_id, 'Downloading video JSON',
-            headers={'x-viki-app-ver': '4.0.57'})
+            video_id, 'Downloading video JSON', headers={
+                'x-client-user-agent': std_headers['User-Agent'],
+                'x-viki-app-ver': '4.0.57',
+            })
         video = resp['video']
 
         self._check_errors(video)

From 836c810716cca64673b2cb85a2b489f2dd0a15ef Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 25 Nov 2020 11:26:26 +0100
Subject: [PATCH 250/340] [vlive] Add support for post URLs(closes
 #27122)(closes #27123)

---
 youtube_dl/extractor/vlive.py | 101 ++++++++++++++++++++++++++++++----
 1 file changed, 90 insertions(+), 11 deletions(-)

diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
index df1dc78dd..111b8c1c3 100644
--- a/youtube_dl/extractor/vlive.py
+++ b/youtube_dl/extractor/vlive.py
@@ -13,6 +13,8 @@ from ..utils import (
     ExtractorError,
     int_or_none,
     merge_dicts,
+    str_or_none,
+    strip_or_none,
     try_get,
     urlencode_postdata,
 )
@@ -103,23 +105,23 @@ class VLiveIE(VLiveBaseIE):
         query = {'appId': self._APP_ID}
         if fields:
             query['fields'] = fields
-        return self._download_json(
-            'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id,
-            'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0],
-            headers={'Referer': 'https://www.vlive.tv/'}, query=query)
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-
         try:
-            post = self._call_api(
-                'post/v1.0/officialVideoPost-%s', video_id,
-                'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId}')
+            return self._download_json(
+                'https://www.vlive.tv/globalv-web/vam-web/' + path_template % video_id, video_id,
+                'Downloading %s JSON metadata' % path_template.split('/')[-1].split('-')[0],
+                headers={'Referer': 'https://www.vlive.tv/'}, query=query)
         except ExtractorError as e:
             if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
                 self.raise_login_required(json.loads(e.cause.read().decode())['message'])
             raise
 
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        post = self._call_api(
+            'post/v1.0/officialVideoPost-%s', video_id,
+            'author{nickname},channel{channelCode,channelName},officialVideo{commentCount,exposeStatus,likeCount,playCount,playTime,status,title,type,vodId}')
+
         video = post['officialVideo']
 
         def get_common_fields():
@@ -170,6 +172,83 @@ class VLiveIE(VLiveBaseIE):
                 raise ExtractorError('Unknown status ' + status)
 
 
+class VLivePostIE(VLiveIE):
+    IE_NAME = 'vlive:post'
+    _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/post/(?P<id>\d-\d+)'
+    _TESTS = [{
+        # uploadType = SOS
+        'url': 'https://www.vlive.tv/post/1-20088044',
+        'info_dict': {
+            'id': '1-20088044',
+            'title': 'Hola estrellitas la tierra les dice hola (si era así no?) Ha...',
+            'description': 'md5:fab8a1e50e6e51608907f46c7fa4b407',
+        },
+        'playlist_count': 3,
+    }, {
+        # uploadType = V
+        'url': 'https://www.vlive.tv/post/1-20087926',
+        'info_dict': {
+            'id': '1-20087926',
+            'title': 'James Corden: And so, the baby becamos the Papa💜😭💪😭',
+        },
+        'playlist_count': 1,
+    }]
+    _FVIDEO_TMPL = 'fvideo/v1.0/fvideo-%%s/%s'
+    _SOS_TMPL = _FVIDEO_TMPL % 'sosPlayInfo'
+    _INKEY_TMPL = _FVIDEO_TMPL % 'inKey'
+
+    def _real_extract(self, url):
+        post_id = self._match_id(url)
+
+        post = self._call_api(
+            'post/v1.0/post-%s', post_id,
+            'attachments{video},officialVideo{videoSeq},plainBody,title')
+
+        video_seq = str_or_none(try_get(
+            post, lambda x: x['officialVideo']['videoSeq']))
+        if video_seq:
+            return self.url_result(
+                'http://www.vlive.tv/video/' + video_seq,
+                VLiveIE.ie_key(), video_seq)
+
+        title = post['title']
+        entries = []
+        for idx, video in enumerate(post['attachments']['video'].values()):
+            video_id = video.get('videoId')
+            if not video_id:
+                continue
+            upload_type = video.get('uploadType')
+            upload_info = video.get('uploadInfo') or {}
+            entry = None
+            if upload_type == 'SOS':
+                download = self._call_api(
+                    self._SOS_TMPL, video_id)['videoUrl']['download']
+                formats = []
+                for f_id, f_url in download.items():
+                    formats.append({
+                        'format_id': f_id,
+                        'url': f_url,
+                        'height': int_or_none(f_id[:-1]),
+                    })
+                self._sort_formats(formats)
+                entry = {
+                    'formats': formats,
+                    'id': video_id,
+                    'thumbnail': upload_info.get('imageUrl'),
+                }
+            elif upload_type == 'V':
+                vod_id = upload_info.get('videoId')
+                if not vod_id:
+                    continue
+                inkey = self._call_api(self._INKEY_TMPL, video_id)['inKey']
+                entry = self._extract_video_info(video_id, vod_id, inkey)
+            if entry:
+                entry['title'] = '%s_part%s' % (title, idx)
+                entries.append(entry)
+        return self.playlist_result(
+            entries, post_id, title, strip_or_none(post.get('plainBody')))
+
+
 class VLiveChannelIE(VLiveBaseIE):
     IE_NAME = 'vlive:channel'
     _VALID_URL = r'https?://(?:channels\.vlive\.tv|(?:(?:www|m)\.)?vlive\.tv/channel)/(?P<id>[0-9A-Z]+)'

From 3a78198a96ffcb989d1302294f6a747c6147b418 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 25 Nov 2020 11:40:37 +0100
Subject: [PATCH 251/340] [vlive] improve extraction for geo-restricted videos

---
 youtube_dl/extractor/extractors.py | 1 +
 youtube_dl/extractor/vlive.py      | 6 +++++-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 688437dc2..f7757b4f4 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1375,6 +1375,7 @@ from .vk import (
 )
 from .vlive import (
     VLiveIE,
+    VLivePostIE,
     VLiveChannelIE,
 )
 from .vodlocker import VodlockerIE
diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
index 111b8c1c3..223709b1e 100644
--- a/youtube_dl/extractor/vlive.py
+++ b/youtube_dl/extractor/vlive.py
@@ -68,6 +68,10 @@ class VLiveIE(VLiveBaseIE):
     }, {
         'url': 'https://www.vlive.tv/embed/1326',
         'only_matching': True,
+    }, {
+        # works only with gcc=KR
+        'url': 'https://www.vlive.tv/video/225019',
+        'only_matching': True,
     }]
 
     def _real_initialize(self):
@@ -102,7 +106,7 @@ class VLiveIE(VLiveBaseIE):
             raise ExtractorError('Unable to log in', expected=True)
 
     def _call_api(self, path_template, video_id, fields=None):
-        query = {'appId': self._APP_ID}
+        query = {'appId': self._APP_ID, 'gcc': 'KR'}
         if fields:
             query['fields'] = fields
         try:

From 686e898fde48de9981c170f21d631d15e6f419ad Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 26 Nov 2020 02:58:48 +0700
Subject: [PATCH 252/340] [spreaker] Add extractor (closes #13480, closes
 #13877)

---
 youtube_dl/extractor/extractors.py |   6 +
 youtube_dl/extractor/spreaker.py   | 176 +++++++++++++++++++++++++++++
 2 files changed, 182 insertions(+)
 create mode 100644 youtube_dl/extractor/spreaker.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index f7757b4f4..fd19f0f0a 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1082,6 +1082,12 @@ from .stitcher import StitcherIE
 from .sport5 import Sport5IE
 from .sportbox import SportBoxIE
 from .sportdeutschland import SportDeutschlandIE
+from .spreaker import (
+    SpreakerIE,
+    SpreakerPageIE,
+    SpreakerShowIE,
+    SpreakerShowPageIE,
+)
 from .springboardplatform import SpringboardPlatformIE
 from .sprout import SproutIE
 from .srgssr import (
diff --git a/youtube_dl/extractor/spreaker.py b/youtube_dl/extractor/spreaker.py
new file mode 100644
index 000000000..beee6670c
--- /dev/null
+++ b/youtube_dl/extractor/spreaker.py
@@ -0,0 +1,176 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import itertools
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    float_or_none,
+    int_or_none,
+    str_or_none,
+    try_get,
+    unified_timestamp,
+    url_or_none,
+)
+
+
+def _extract_episode(data, episode_id=None):
+    title = data['title']
+    download_url = data['download_url']
+
+    series = try_get(data, lambda x: x['show']['title'], compat_str)
+    uploader = try_get(data, lambda x: x['author']['fullname'], compat_str)
+
+    thumbnails = []
+    for image in ('image_original', 'image_medium', 'image'):
+        image_url = url_or_none(data.get('%s_url' % image))
+        if image_url:
+            thumbnails.append({'url': image_url})
+
+    def stats(key):
+        return int_or_none(try_get(
+            data,
+            (lambda x: x['%ss_count' % key],
+             lambda x: x['stats']['%ss' % key])))
+
+    def duration(key):
+        return float_or_none(data.get(key), scale=1000)
+
+    return {
+        'id': compat_str(episode_id or data['episode_id']),
+        'url': download_url,
+        'display_id': data.get('permalink'),
+        'title': title,
+        'description': data.get('description'),
+        'timestamp': unified_timestamp(data.get('published_at')),
+        'uploader': uploader,
+        'uploader_id': str_or_none(data.get('author_id')),
+        'creator': uploader,
+        'duration': duration('duration') or duration('length'),
+        'view_count': stats('play'),
+        'like_count': stats('like'),
+        'comment_count': stats('message'),
+        'format': 'MPEG Layer 3',
+        'format_id': 'mp3',
+        'container': 'mp3',
+        'ext': 'mp3',
+        'thumbnails': thumbnails,
+        'series': series,
+        'extractor_key': SpreakerIE.ie_key(),
+    }
+
+
+class SpreakerIE(InfoExtractor):
+    _VALID_URL = r'''(?x)
+                    https?://
+                        api\.spreaker\.com/
+                        (?:
+                            (?:download/)?episode|
+                            v2/episodes
+                        )/
+                        (?P<id>\d+)
+                    '''
+    _TESTS = [{
+        'url': 'https://api.spreaker.com/episode/12534508',
+        'info_dict': {
+            'id': '12534508',
+            'display_id': 'swm-ep15-how-to-market-your-music-part-2',
+            'ext': 'mp3',
+            'title': 'EP:15 | Music Marketing (Likes) - Part 2',
+            'description': 'md5:0588c43e27be46423e183076fa071177',
+            'timestamp': 1502250336,
+            'upload_date': '20170809',
+            'uploader': 'SWM',
+            'uploader_id': '9780658',
+            'duration': 1063.42,
+            'view_count': int,
+            'like_count': int,
+            'comment_count': int,
+            'series': 'Success With Music (SWM)',
+        },
+    }, {
+        'url': 'https://api.spreaker.com/download/episode/12534508/swm_ep15_how_to_market_your_music_part_2.mp3',
+        'only_matching': True,
+    }, {
+        'url': 'https://api.spreaker.com/v2/episodes/12534508?export=episode_segments',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        episode_id = self._match_id(url)
+        data = self._download_json(
+            'https://api.spreaker.com/v2/episodes/%s' % episode_id,
+            episode_id)['response']['episode']
+        return _extract_episode(data, episode_id)
+
+
+class SpreakerPageIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?spreaker\.com/user/[^/]+/(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://www.spreaker.com/user/9780658/swm-ep15-how-to-market-your-music-part-2',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        episode_id = self._search_regex(
+            (r'data-episode_id=["\'](?P<id>\d+)',
+             r'episode_id\s*:\s*(?P<id>\d+)'), webpage, 'episode id')
+        return self.url_result(
+            'https://api.spreaker.com/episode/%s' % episode_id,
+            ie=SpreakerIE.ie_key(), video_id=episode_id)
+
+
+class SpreakerShowIE(InfoExtractor):
+    _VALID_URL = r'https?://api\.spreaker\.com/show/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://www.spreaker.com/show/3-ninjas-podcast',
+        'info_dict': {
+            'id': '4652058',
+        },
+        'playlist_mincount': 118,
+    }]
+
+    def _entries(self, show_id):
+        for page_num in itertools.count(1):
+            episodes = self._download_json(
+                'https://api.spreaker.com/show/%s/episodes' % show_id,
+                show_id, note='Downloading JSON page %d' % page_num, query={
+                    'page': page_num,
+                    'max_per_page': 100,
+                })
+            pager = try_get(episodes, lambda x: x['response']['pager'], dict)
+            if not pager:
+                break
+            results = pager.get('results')
+            if not results or not isinstance(results, list):
+                break
+            for result in results:
+                if not isinstance(result, dict):
+                    continue
+                yield _extract_episode(result)
+            if page_num == pager.get('last_page'):
+                break
+
+    def _real_extract(self, url):
+        show_id = self._match_id(url)
+        return self.playlist_result(self._entries(show_id), playlist_id=show_id)
+
+
+class SpreakerShowPageIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?spreaker\.com/show/(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://www.spreaker.com/show/success-with-music',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+        show_id = self._search_regex(
+            r'show_id\s*:\s*(?P<id>\d+)', webpage, 'show id')
+        return self.url_result(
+            'https://api.spreaker.com/show/%s' % show_id,
+            ie=SpreakerShowIE.ie_key(), video_id=show_id)

From 4dc545553f977013ae92fcb10046749b9a0405ee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 26 Nov 2020 03:03:51 +0700
Subject: [PATCH 253/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 3894ac2db..9f47e6826 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,22 @@
+version <unreleased>
+
+Core
+* [downloader/fragment] Set final file's mtime according to last fragment's
+  Last-Modified header (#11718, #18384, #27138)
+
+Extractors
++ [spreaker] Add support for spreaker.com (#13480, #13877)
+* [vlive] Improve extraction for geo-restricted videos
++ [vlive] Add support for post URLs (#27122, #27123)
+* [viki] Fix video API request (#27184)
+* [bbc] Fix BBC Three clip extraction
+* [bbc] Fix BBC News videos extraction
++ [medaltv] Add support for medal.tv (#27149)
+* [youtube] Imporve music metadata and license extraction (#26013)
+* [nrk] Fix extraction
+* [cda] Fix extraction (#17803, #24458, #24518, #26381)
+
+
 version 2020.11.24
 
 Core

From 9fe50837c3e8f6c40b7bed8bf7105a868a7a678f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 26 Nov 2020 03:05:51 +0700
Subject: [PATCH 254/340] release 2020.11.26

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 6 ++++++
 youtube_dl/version.py                            | 2 +-
 8 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 73155d94e..98c075cb1 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.24
+ [debug] youtube-dl version 2020.11.26
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 9e1921149..12423c0e1 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 40ce65e73..11522132a 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index b98f06c2d..9be368760 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.24
+ [debug] youtube-dl version 2020.11.26
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 60bd5f337..a6e1eabe6 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.24. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.24**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 9f47e6826..b785e89ad 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.26
 
 Core
 * [downloader/fragment] Set final file's mtime according to last fragment's
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index dea88b5c5..8387dcce0 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -471,6 +471,7 @@
  - **massengeschmack.tv**
  - **MatchTV**
  - **MDR**: MDR.DE and KiKA
+ - **MedalTV**
  - **media.ccc.de**
  - **media.ccc.de:lists**
  - **Medialaan**
@@ -839,6 +840,10 @@
  - **Sport5**
  - **SportBox**
  - **SportDeutschland**
+ - **Spreaker**
+ - **SpreakerPage**
+ - **SpreakerShow**
+ - **SpreakerShowPage**
  - **SpringboardPlatform**
  - **Sprout**
  - **sr:mediathek**: Saarländischer Rundfunk
@@ -1055,6 +1060,7 @@
  - **vk:wallpost**
  - **vlive**
  - **vlive:channel**
+ - **vlive:post**
  - **Vodlocker**
  - **VODPl**
  - **VODPlatform**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index e8a816d0d..d8a53b96e 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.24'
+__version__ = '2020.11.26'

From 99de2f38d375f794ffdc4f5a110a5227310f3ab0 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 25 Nov 2020 21:39:17 +0100
Subject: [PATCH 255/340] [spreaker] fix SpreakerShowIE test URL

---
 youtube_dl/extractor/spreaker.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/spreaker.py b/youtube_dl/extractor/spreaker.py
index beee6670c..6c7e40ae4 100644
--- a/youtube_dl/extractor/spreaker.py
+++ b/youtube_dl/extractor/spreaker.py
@@ -126,7 +126,7 @@ class SpreakerPageIE(InfoExtractor):
 class SpreakerShowIE(InfoExtractor):
     _VALID_URL = r'https?://api\.spreaker\.com/show/(?P<id>\d+)'
     _TESTS = [{
-        'url': 'https://www.spreaker.com/show/3-ninjas-podcast',
+        'url': 'https://api.spreaker.com/show/4652058',
         'info_dict': {
             'id': '4652058',
         },

From a3cf22e590f7057c50563f36c17b59993446dbdd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Adrian=20Heine=20n=C3=A9=20Lang?= <mail@adrianheine.de>
Date: Thu, 26 Nov 2020 12:55:06 +0100
Subject: [PATCH 256/340] [videa] Adapt to updates (#26301)

closes #25973, closes #25650.
---
 youtube_dl/extractor/videa.py | 62 +++++++++++++++++++++++++++++++++--
 1 file changed, 60 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/videa.py b/youtube_dl/extractor/videa.py
index d0e34c819..a03614cc1 100644
--- a/youtube_dl/extractor/videa.py
+++ b/youtube_dl/extractor/videa.py
@@ -2,15 +2,24 @@
 from __future__ import unicode_literals
 
 import re
+import random
+import string
+import struct
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
     int_or_none,
     mimetype2ext,
     parse_codecs,
     xpath_element,
     xpath_text,
 )
+from ..compat import (
+    compat_b64decode,
+    compat_ord,
+    compat_parse_qs,
+)
 
 
 class VideaIE(InfoExtractor):
@@ -60,15 +69,63 @@ class VideaIE(InfoExtractor):
             r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1',
             webpage)]
 
+    def rc4(self, ciphertext, key):
+        res = b''
+
+        keyLen = len(key)
+        S = list(range(256))
+
+        j = 0
+        for i in range(256):
+            j = (j + S[i] + ord(key[i % keyLen])) % 256
+            S[i], S[j] = S[j], S[i]
+
+        i = 0
+        j = 0
+        for m in range(len(ciphertext)):
+            i = (i + 1) % 256
+            j = (j + S[i]) % 256
+            S[i], S[j] = S[j], S[i]
+            k = S[(S[i] + S[j]) % 256]
+            res += struct.pack("B", k ^ compat_ord(ciphertext[m]))
+
+        return res
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id, fatal=True)
+        error = self._search_regex(r'<p class="error-text">([^<]+)</p>', webpage, 'error', default=None)
+        if error:
+            raise ExtractorError(error, expected=True)
 
-        info = self._download_xml(
+        video_src_params_raw = self._search_regex(r'<iframe[^>]+id="videa_player_iframe"[^>]+src="/player\?([^"]+)"', webpage, 'video_src_params')
+        video_src_params = compat_parse_qs(video_src_params_raw)
+        player_page = self._download_webpage("https://videa.hu/videojs_player?%s" % video_src_params_raw, video_id, fatal=True)
+        nonce = self._search_regex(r'_xt\s*=\s*"([^"]+)"', player_page, 'nonce')
+        random_seed = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(8))
+        static_secret = 'xHb0ZvME5q8CBcoQi6AngerDu3FGO9fkUlwPmLVY_RTzj2hJIS4NasXWKy1td7p'
+        l = nonce[:32]
+        s = nonce[32:]
+        result = ''
+        for i in range(0, 32):
+            result += s[i - (static_secret.index(l[i]) - 31)]
+
+        video_src_params['_s'] = random_seed
+        video_src_params['_t'] = result[:16]
+        encryption_key_stem = result[16:] + random_seed
+
+        [b64_info, handle] = self._download_webpage_handle(
             'http://videa.hu/videaplayer_get_xml.php', video_id,
-            query={'v': video_id})
+            query=video_src_params, fatal=True)
+
+        encrypted_info = compat_b64decode(b64_info)
+        key = encryption_key_stem + handle.info()['x-videa-xs']
+        info_str = self.rc4(encrypted_info, key).decode('utf8')
+        info = self._parse_xml(info_str, video_id)
 
         video = xpath_element(info, './/video', 'video', fatal=True)
         sources = xpath_element(info, './/video_sources', 'sources', fatal=True)
+        hash_values = xpath_element(info, './/hash_values', 'hash_values', fatal=True)
 
         title = xpath_text(video, './title', fatal=True)
 
@@ -77,6 +134,7 @@ class VideaIE(InfoExtractor):
             source_url = source.text
             if not source_url:
                 continue
+            source_url += '?md5=%s&expires=%s' % (hash_values.find('hash_value_%s' % source.get('name')).text, source.get('exp'))
             f = parse_codecs(source.get('codecs'))
             f.update({
                 'url': source_url,

From f9f9699f2fa87ec54d59b1834cc56adb6147fec6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 26 Nov 2020 12:56:49 +0100
Subject: [PATCH 257/340] [videa] improve extraction

---
 youtube_dl/extractor/videa.py | 91 +++++++++++++++++++----------------
 1 file changed, 50 insertions(+), 41 deletions(-)

diff --git a/youtube_dl/extractor/videa.py b/youtube_dl/extractor/videa.py
index a03614cc1..ab2c15cde 100644
--- a/youtube_dl/extractor/videa.py
+++ b/youtube_dl/extractor/videa.py
@@ -1,10 +1,9 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
 import random
+import re
 import string
-import struct
 
 from .common import InfoExtractor
 from ..utils import (
@@ -12,13 +11,14 @@ from ..utils import (
     int_or_none,
     mimetype2ext,
     parse_codecs,
+    update_url_query,
     xpath_element,
     xpath_text,
 )
 from ..compat import (
     compat_b64decode,
     compat_ord,
-    compat_parse_qs,
+    compat_struct_pack,
 )
 
 
@@ -28,7 +28,7 @@ class VideaIE(InfoExtractor):
                         videa(?:kid)?\.hu/
                         (?:
                             videok/(?:[^/]+/)*[^?#&]+-|
-                            player\?.*?\bv=|
+                            (?:videojs_)?player\?.*?\bv=|
                             player/v/
                         )
                         (?P<id>[^?#&]+)
@@ -62,6 +62,7 @@ class VideaIE(InfoExtractor):
         'url': 'https://videakid.hu/player/v/8YfIAjxwWGwT8HVQ?autoplay=1',
         'only_matching': True,
     }]
+    _STATIC_SECRET = 'xHb0ZvME5q8CBcoQi6AngerDu3FGO9fkUlwPmLVY_RTzj2hJIS4NasXWKy1td7p'
 
     @staticmethod
     def _extract_urls(webpage):
@@ -69,75 +70,84 @@ class VideaIE(InfoExtractor):
             r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//videa\.hu/player\?.*?\bv=.+?)\1',
             webpage)]
 
-    def rc4(self, ciphertext, key):
+    @staticmethod
+    def rc4(cipher_text, key):
         res = b''
 
-        keyLen = len(key)
+        key_len = len(key)
         S = list(range(256))
 
         j = 0
         for i in range(256):
-            j = (j + S[i] + ord(key[i % keyLen])) % 256
+            j = (j + S[i] + ord(key[i % key_len])) % 256
             S[i], S[j] = S[j], S[i]
 
         i = 0
         j = 0
-        for m in range(len(ciphertext)):
+        for m in range(len(cipher_text)):
             i = (i + 1) % 256
             j = (j + S[i]) % 256
             S[i], S[j] = S[j], S[i]
             k = S[(S[i] + S[j]) % 256]
-            res += struct.pack("B", k ^ compat_ord(ciphertext[m]))
+            res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m]))
 
-        return res
+        return res.decode()
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id, fatal=True)
-        error = self._search_regex(r'<p class="error-text">([^<]+)</p>', webpage, 'error', default=None)
-        if error:
-            raise ExtractorError(error, expected=True)
+        query = {'v': video_id}
+        player_page = self._download_webpage(
+            'https://videa.hu/player', video_id, query=query)
 
-        video_src_params_raw = self._search_regex(r'<iframe[^>]+id="videa_player_iframe"[^>]+src="/player\?([^"]+)"', webpage, 'video_src_params')
-        video_src_params = compat_parse_qs(video_src_params_raw)
-        player_page = self._download_webpage("https://videa.hu/videojs_player?%s" % video_src_params_raw, video_id, fatal=True)
-        nonce = self._search_regex(r'_xt\s*=\s*"([^"]+)"', player_page, 'nonce')
-        random_seed = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(8))
-        static_secret = 'xHb0ZvME5q8CBcoQi6AngerDu3FGO9fkUlwPmLVY_RTzj2hJIS4NasXWKy1td7p'
+        nonce = self._search_regex(
+            r'_xt\s*=\s*"([^"]+)"', player_page, 'nonce')
         l = nonce[:32]
         s = nonce[32:]
         result = ''
         for i in range(0, 32):
-            result += s[i - (static_secret.index(l[i]) - 31)]
+            result += s[i - (self._STATIC_SECRET.index(l[i]) - 31)]
 
-        video_src_params['_s'] = random_seed
-        video_src_params['_t'] = result[:16]
-        encryption_key_stem = result[16:] + random_seed
+        random_seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
+        query['_s'] = random_seed
+        query['_t'] = result[:16]
 
-        [b64_info, handle] = self._download_webpage_handle(
-            'http://videa.hu/videaplayer_get_xml.php', video_id,
-            query=video_src_params, fatal=True)
+        b64_info, handle = self._download_webpage_handle(
+            'http://videa.hu/videaplayer_get_xml.php', video_id, query=query)
+        if b64_info.startswith('<?xml'):
+            info = self._parse_xml(b64_info, video_id)
+        else:
+            key = result[16:] + random_seed + handle.headers['x-videa-xs']
+            info = self._parse_xml(self.rc4(
+                compat_b64decode(b64_info), key), video_id)
 
-        encrypted_info = compat_b64decode(b64_info)
-        key = encryption_key_stem + handle.info()['x-videa-xs']
-        info_str = self.rc4(encrypted_info, key).decode('utf8')
-        info = self._parse_xml(info_str, video_id)
-
-        video = xpath_element(info, './/video', 'video', fatal=True)
-        sources = xpath_element(info, './/video_sources', 'sources', fatal=True)
-        hash_values = xpath_element(info, './/hash_values', 'hash_values', fatal=True)
+        video = xpath_element(info, './video', 'video')
+        if not video:
+            raise ExtractorError(xpath_element(
+                info, './error', fatal=True), expected=True)
+        sources = xpath_element(
+            info, './video_sources', 'sources', fatal=True)
+        hash_values = xpath_element(
+            info, './hash_values', 'hash values', fatal=True)
 
         title = xpath_text(video, './title', fatal=True)
 
         formats = []
         for source in sources.findall('./video_source'):
             source_url = source.text
-            if not source_url:
+            source_name = source.get('name')
+            source_exp = source.get('exp')
+            if not (source_url and source_name and source_exp):
                 continue
-            source_url += '?md5=%s&expires=%s' % (hash_values.find('hash_value_%s' % source.get('name')).text, source.get('exp'))
+            hash_value = xpath_text(hash_values, 'hash_value_' + source_name)
+            if not hash_value:
+                continue
+            source_url = update_url_query(source_url, {
+                'md5': hash_value,
+                'expires': source_exp,
+            })
             f = parse_codecs(source.get('codecs'))
             f.update({
-                'url': source_url,
+                'url': self._proto_relative_url(source_url),
                 'ext': mimetype2ext(source.get('mimetype')) or 'mp4',
                 'format_id': source.get('name'),
                 'width': int_or_none(source.get('width')),
@@ -146,8 +156,7 @@ class VideaIE(InfoExtractor):
             formats.append(f)
         self._sort_formats(formats)
 
-        thumbnail = xpath_text(video, './poster_src')
-        duration = int_or_none(xpath_text(video, './duration'))
+        thumbnail = self._proto_relative_url(xpath_text(video, './poster_src'))
 
         age_limit = None
         is_adult = xpath_text(video, './is_adult_content', default=None)
@@ -158,7 +167,7 @@ class VideaIE(InfoExtractor):
             'id': video_id,
             'title': title,
             'thumbnail': thumbnail,
-            'duration': duration,
+            'duration': int_or_none(xpath_text(video, './duration')),
             'age_limit': age_limit,
             'formats': formats,
         }

From 20c50c65566be31607e59d7ca2467c664a29c843 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 28 Nov 2020 15:02:31 +0700
Subject: [PATCH 258/340] [youtube] Improve yt initial player response
 extraction (closes #27216)

---
 youtube_dl/extractor/youtube.py | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1e3ff7d44..4616ac53d 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -283,6 +283,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
     }
 
     _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
+    _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
 
     def _call_api(self, ep, query, video_id):
         data = self._DEFAULT_API_DATA.copy()
@@ -1068,7 +1069,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             },
         },
         {
-            # with '};' inside yt initial data (see https://github.com/ytdl-org/youtube-dl/issues/27093)
+            # with '};' inside yt initial data (see [1])
+            # see [2] for an example with '};' inside ytInitialPlayerResponse
+            # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
+            # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
             'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
             'info_dict': {
                 'id': 'CHqg6qOn4no',
@@ -1686,7 +1690,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         if not video_info and not player_response:
             player_response = extract_player_response(
                 self._search_regex(
-                    r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;', video_webpage,
+                    (r'%s\s*(?:var\s+meta|</script|\n)' % self._YT_INITIAL_PLAYER_RESPONSE_RE,
+                     self._YT_INITIAL_PLAYER_RESPONSE_RE), video_webpage,
                     'initial player response', default='{}'),
                 video_id)
 

From f04cfe24e0554ca7e08db94ef138d44d22493af0 Mon Sep 17 00:00:00 2001
From: JChris246 <43832407+JChris246@users.noreply.github.com>
Date: Sat, 28 Nov 2020 15:32:13 -0400
Subject: [PATCH 259/340] [pornhub] Fix like and dislike count extraction
 (closes #27227) (#27234)

---
 youtube_dl/extractor/pornhub.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 529f3f711..a50b6260c 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -346,9 +346,9 @@ class PornHubIE(PornHubBaseIE):
         view_count = self._extract_count(
             r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view')
         like_count = self._extract_count(
-            r'<span class="votesUp">([\d,\.]+)</span>', webpage, 'like')
+            r'<span[^>]+class="votesUp"[^>]*>([\d,\.]+)</span>', webpage, 'like')
         dislike_count = self._extract_count(
-            r'<span class="votesDown">([\d,\.]+)</span>', webpage, 'dislike')
+            r'<span[^>]+class="votesDown"[^>]*>([\d,\.]+)</span>', webpage, 'dislike')
         comment_count = self._extract_count(
             r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
 

From 9585b376db8cb0a3f0e7a866e5d64b351fb6ecf7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Nov 2020 03:55:49 +0700
Subject: [PATCH 260/340] [YoutubeDL] Write static debug to stderr and respect
 quiet for dynamic debug (closes #14579, closes #22593)

TODO: logging and verbosity needs major refactoring (refs #10894)
---
 youtube_dl/YoutubeDL.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 855a73157..fdd68f616 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -1610,7 +1610,7 @@ class YoutubeDL(object):
         if req_format is None:
             req_format = self._default_format_spec(info_dict, download=download)
             if self.params.get('verbose'):
-                self.to_stdout('[debug] Default format spec: %s' % req_format)
+                self._write_string('[debug] Default format spec: %s\n' % req_format)
 
         format_selector = self.build_format_selector(req_format)
 
@@ -1871,7 +1871,7 @@ class YoutubeDL(object):
                     for ph in self._progress_hooks:
                         fd.add_progress_hook(ph)
                     if self.params.get('verbose'):
-                        self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
+                        self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
                     return fd.download(name, info)
 
                 if info_dict.get('requested_formats') is not None:

From 717d1d2d5acf88aba6e227870e15bbff0be89ff1 Mon Sep 17 00:00:00 2001
From: bopol <bopol@e.email>
Date: Sat, 28 Nov 2020 22:15:53 +0100
Subject: [PATCH 261/340] [ina] Add support for mobile URLs (#27229)

---
 youtube_dl/extractor/ina.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/ina.py b/youtube_dl/extractor/ina.py
index 12695af27..b3b2683cb 100644
--- a/youtube_dl/extractor/ina.py
+++ b/youtube_dl/extractor/ina.py
@@ -12,7 +12,7 @@ from ..utils import (
 
 
 class InaIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?ina\.fr/(?:video|audio)/(?P<id>[A-Z0-9_]+)'
+    _VALID_URL = r'https?://(?:(?:www|m)\.)?ina\.fr/(?:video|audio)/(?P<id>[A-Z0-9_]+)'
     _TESTS = [{
         'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
         'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
@@ -31,6 +31,9 @@ class InaIE(InfoExtractor):
     }, {
         'url': 'https://www.ina.fr/video/P16173408-video.html',
         'only_matching': True,
+    }, {
+        'url': 'http://m.ina.fr/video/I12055569',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From fb626c05867deab04425bad0c0b16b55473841a2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Nov 2020 08:09:20 +0700
Subject: [PATCH 262/340] [tiktok] Fix extraction (closes #20809, closes
 #22838, closes #22850, closes #25987, closes #26281, closes #26411, closes
 #26639, closes #26776, closes #27237)

---
 youtube_dl/extractor/tiktok.py | 122 +++++++++++++++++----------------
 1 file changed, 64 insertions(+), 58 deletions(-)

diff --git a/youtube_dl/extractor/tiktok.py b/youtube_dl/extractor/tiktok.py
index 66088b9ab..ea1beb8af 100644
--- a/youtube_dl/extractor/tiktok.py
+++ b/youtube_dl/extractor/tiktok.py
@@ -5,6 +5,7 @@ from .common import InfoExtractor
 from ..utils import (
     compat_str,
     ExtractorError,
+    float_or_none,
     int_or_none,
     str_or_none,
     try_get,
@@ -13,7 +14,7 @@ from ..utils import (
 
 
 class TikTokBaseIE(InfoExtractor):
-    def _extract_aweme(self, data):
+    def _extract_video(self, data, video_id=None):
         video = data['video']
         description = str_or_none(try_get(data, lambda x: x['desc']))
         width = int_or_none(try_get(data, lambda x: video['width']))
@@ -21,43 +22,54 @@ class TikTokBaseIE(InfoExtractor):
 
         format_urls = set()
         formats = []
-        for format_id in (
-                'play_addr_lowbr', 'play_addr', 'play_addr_h264',
-                'download_addr'):
-            for format in try_get(
-                    video, lambda x: x[format_id]['url_list'], list) or []:
-                format_url = url_or_none(format)
-                if not format_url:
-                    continue
-                if format_url in format_urls:
-                    continue
-                format_urls.add(format_url)
-                formats.append({
-                    'url': format_url,
-                    'ext': 'mp4',
-                    'height': height,
-                    'width': width,
-                })
+        for format_id in ('download', 'play'):
+            format_url = url_or_none(video.get('%sAddr' % format_id))
+            if not format_url:
+                continue
+            if format_url in format_urls:
+                continue
+            format_urls.add(format_url)
+            formats.append({
+                'url': format_url,
+                'ext': 'mp4',
+                'height': height,
+                'width': width,
+                'http_headers': {
+                    'Referer': 'https://www.tiktok.com/',
+                }
+            })
         self._sort_formats(formats)
 
-        thumbnail = url_or_none(try_get(
-            video, lambda x: x['cover']['url_list'][0], compat_str))
-        uploader = try_get(data, lambda x: x['author']['nickname'], compat_str)
-        timestamp = int_or_none(data.get('create_time'))
-        comment_count = int_or_none(data.get('comment_count')) or int_or_none(
-            try_get(data, lambda x: x['statistics']['comment_count']))
-        repost_count = int_or_none(try_get(
-            data, lambda x: x['statistics']['share_count']))
+        thumbnail = url_or_none(video.get('cover'))
+        duration = float_or_none(video.get('duration'))
 
-        aweme_id = data['aweme_id']
+        uploader = try_get(data, lambda x: x['author']['nickname'], compat_str)
+        uploader_id = try_get(data, lambda x: x['author']['id'], compat_str)
+
+        timestamp = int_or_none(data.get('createTime'))
+
+        def stats(key):
+            return int_or_none(try_get(
+                data, lambda x: x['stats']['%sCount' % key]))
+
+        view_count = stats('play')
+        like_count = stats('digg')
+        comment_count = stats('comment')
+        repost_count = stats('share')
+
+        aweme_id = data.get('id') or video_id
 
         return {
             'id': aweme_id,
             'title': uploader or aweme_id,
             'description': description,
             'thumbnail': thumbnail,
+            'duration': duration,
             'uploader': uploader,
+            'uploader_id': uploader_id,
             'timestamp': timestamp,
+            'view_count': view_count,
+            'like_count': like_count,
             'comment_count': comment_count,
             'repost_count': repost_count,
             'formats': formats,
@@ -65,62 +77,56 @@ class TikTokBaseIE(InfoExtractor):
 
 
 class TikTokIE(TikTokBaseIE):
-    _VALID_URL = r'''(?x)
-                        https?://
-                            (?:
-                                (?:m\.)?tiktok\.com/v|
-                                (?:www\.)?tiktok\.com/share/video
-                            )
-                            /(?P<id>\d+)
-                    '''
+    _VALID_URL = r'https?://(?:www\.)?tiktok\.com/@[^/]+/video/(?P<id>\d+)'
     _TESTS = [{
-        'url': 'https://m.tiktok.com/v/6606727368545406213.html',
-        'md5': 'd584b572e92fcd48888051f238022420',
+        'url': 'https://www.tiktok.com/@zureeal/video/6606727368545406213',
+        'md5': '163ceff303bb52de60e6887fe399e6cd',
         'info_dict': {
             'id': '6606727368545406213',
             'ext': 'mp4',
             'title': 'Zureeal',
             'description': '#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay',
-            'thumbnail': r're:^https?://.*~noop.image',
+            'thumbnail': r're:^https?://.*',
+            'duration': 15,
             'uploader': 'Zureeal',
+            'uploader_id': '188294915489964032',
             'timestamp': 1538248586,
             'upload_date': '20180929',
+            'view_count': int,
+            'like_count': int,
             'comment_count': int,
             'repost_count': int,
         }
-    }, {
-        'url': 'https://www.tiktok.com/share/video/6606727368545406213',
-        'only_matching': True,
     }]
 
+    def _real_initialize(self):
+        # Setup session (will set necessary cookies)
+        self._request_webpage(
+            'https://www.tiktok.com/', None, note='Setting up session')
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(
-            'https://m.tiktok.com/v/%s.html' % video_id, video_id)
+        webpage = self._download_webpage(url, video_id)
         data = self._parse_json(self._search_regex(
-            r'\bdata\s*=\s*({.+?})\s*;', webpage, 'data'), video_id)
-        return self._extract_aweme(data)
+            r'<script[^>]+\bid=["\']__NEXT_DATA__[^>]+>\s*({.+?})\s*</script',
+            webpage, 'data'), video_id)['props']['pageProps']['itemInfo']['itemStruct']
+        return self._extract_video(data, video_id)
 
 
 class TikTokUserIE(TikTokBaseIE):
-    _VALID_URL = r'''(?x)
-                        https?://
-                            (?:
-                                (?:m\.)?tiktok\.com/h5/share/usr|
-                                (?:www\.)?tiktok\.com/share/user
-                            )
-                            /(?P<id>\d+)
-                    '''
+    _VALID_URL = r'https://(?:www\.)?tiktok\.com/@(?P<id>[^/?#&]+)'
     _TESTS = [{
-        'url': 'https://m.tiktok.com/h5/share/usr/188294915489964032.html',
+        'url': 'https://www.tiktok.com/@zureeal',
         'info_dict': {
             'id': '188294915489964032',
         },
         'playlist_mincount': 24,
-    }, {
-        'url': 'https://www.tiktok.com/share/user/188294915489964032',
-        'only_matching': True,
     }]
+    _WORKING = False
+
+    @classmethod
+    def suitable(cls, url):
+        return False if TikTokIE.suitable(url) else super(TikTokUserIE, cls).suitable(url)
 
     def _real_extract(self, url):
         user_id = self._match_id(url)
@@ -130,7 +136,7 @@ class TikTokUserIE(TikTokBaseIE):
         entries = []
         for aweme in data['aweme_list']:
             try:
-                entry = self._extract_aweme(aweme)
+                entry = self._extract_video(aweme)
             except ExtractorError:
                 continue
             entry['extractor_key'] = TikTokIE.ie_key()

From 43181707799fe89b477759a6a20ce0bb6a1e5d82 Mon Sep 17 00:00:00 2001
From: Michael Munch <mm.munk@gmail.com>
Date: Sun, 29 Nov 2020 07:44:36 +0100
Subject: [PATCH 263/340] [drtv] Extend _VALID_URL (#27243)

---
 youtube_dl/extractor/drtv.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py
index 390e79f8c..c0036adb6 100644
--- a/youtube_dl/extractor/drtv.py
+++ b/youtube_dl/extractor/drtv.py
@@ -29,7 +29,7 @@ class DRTVIE(InfoExtractor):
                     https?://
                         (?:
                             (?:www\.)?dr\.dk/(?:tv/se|nyheder|radio(?:/ondemand)?)/(?:[^/]+/)*|
-                            (?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode)/
+                            (?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/
                         )
                         (?P<id>[\da-z_-]+)
                     '''
@@ -111,6 +111,9 @@ class DRTVIE(InfoExtractor):
     }, {
         'url': 'https://dr-massive.com/drtv/se/bonderoeven_71769',
         'only_matching': True,
+    }, {
+        'url': 'https://www.dr.dk/drtv/program/jagten_220924',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):

From 16c822e91e6cbd433bd562c213538553574b2a94 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Nov 2020 13:49:12 +0700
Subject: [PATCH 264/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/ChangeLog b/ChangeLog
index b785e89ad..fc75cf7c2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,19 @@
+version <unreleased>
+
+Core
+* [YoutubeDL] Write static debug to stderr and respect quiet for dynamic debug
+  (#14579, #22593)
+
+Extractors
+* [drtv] Extend URL regular expression (#27243)
+* [tiktok] Fix extraction (#20809, #22838, #22850, #25987, #26281, #26411,
+  #26639, #26776, #27237)
++ [ina] Add support for mobile URLs (#27229)
+* [pornhub] Fix like and dislike count extraction (#27227, #27234)
+* [youtube] Improve yt initial player response extraction (#27216)
+* [videa] Fix extraction (#25650, #25973, #26301)
+
+
 version 2020.11.26
 
 Core
@@ -12,7 +28,7 @@ Extractors
 * [bbc] Fix BBC Three clip extraction
 * [bbc] Fix BBC News videos extraction
 + [medaltv] Add support for medal.tv (#27149)
-* [youtube] Imporve music metadata and license extraction (#26013)
+* [youtube] Improve music metadata and license extraction (#26013)
 * [nrk] Fix extraction
 * [cda] Fix extraction (#17803, #24458, #24518, #26381)
 

From b449b73dccf6fdd676770ea83cff322348cdf778 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Nov 2020 13:53:01 +0700
Subject: [PATCH 265/340] release 2020.11.29

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 2 +-
 youtube_dl/version.py                            | 2 +-
 8 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 98c075cb1..95b6f674f 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.26
+ [debug] youtube-dl version 2020.11.29
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 12423c0e1..a3b53f5f3 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 11522132a..476fdc3b7 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 9be368760..2ae00f75e 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.26
+ [debug] youtube-dl version 2020.11.29
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index a6e1eabe6..f9b7edb49 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.26**
+- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index fc75cf7c2..966f9379f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.11.29
 
 Core
 * [YoutubeDL] Write static debug to stderr and respect quiet for dynamic debug
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 8387dcce0..702930e71 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -912,7 +912,7 @@
  - **ThisAV**
  - **ThisOldHouse**
  - **TikTok**
- - **TikTokUser**
+ - **TikTokUser** (Currently broken)
  - **tinypic**: tinypic.com videos
  - **TMZ**
  - **TMZArticle**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index d8a53b96e..ebb8a200d 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.26'
+__version__ = '2020.11.29'

From d6ce649f15cff2e259a0f38b176b97fec293f0ff Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 29 Nov 2020 22:15:51 +0700
Subject: [PATCH 266/340] [yandexmusic:track] Fix extraction (closes #26449,
 closes #26669, closes #26747, closes #26748, closes #26762)

---
 youtube_dl/extractor/yandexmusic.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index 08d35e04c..5805caef2 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -109,8 +109,7 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
             'Downloading track location JSON',
             query={'format': 'json'})
         key = hashlib.md5(('XGRlBW9FXlekgbPrRHuSiA' + fd_data['path'][1:] + fd_data['s']).encode('utf-8')).hexdigest()
-        storage = track['storageDir'].split('.')
-        f_url = 'http://%s/get-mp3/%s/%s?track-id=%s ' % (fd_data['host'], key, fd_data['ts'] + fd_data['path'], storage[1])
+        f_url = 'http://%s/get-mp3/%s/%s?track-id=%s ' % (fd_data['host'], key, fd_data['ts'] + fd_data['path'], track['id'])
 
         thumbnail = None
         cover_uri = track.get('albums', [{}])[0].get('coverUri')

From 6da0e5e7a238a9a27260198da741a4014787dcae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 30 Nov 2020 00:25:06 +0700
Subject: [PATCH 267/340] [yandexmusic] Refactor and add support for artist's
 tracks and albums (closes #11887, closes #22284)

---
 youtube_dl/extractor/extractors.py  |   2 +
 youtube_dl/extractor/yandexmusic.py | 280 ++++++++++++++++++++--------
 2 files changed, 200 insertions(+), 82 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index fd19f0f0a..e5327d375 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1478,6 +1478,8 @@ from .yandexmusic import (
     YandexMusicTrackIE,
     YandexMusicAlbumIE,
     YandexMusicPlaylistIE,
+    YandexMusicArtistTracksIE,
+    YandexMusicArtistAlbumsIE,
 )
 from .yandexvideo import YandexVideoIE
 from .yapfiles import YapFilesIE
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index 5805caef2..324ac8bba 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -46,57 +46,69 @@ class YandexMusicBaseIE(InfoExtractor):
         self._handle_error(response)
         return response
 
+    def _call_api(self, ep, tld, url, item_id, note, query):
+        return self._download_json(
+            'https://music.yandex.%s/handlers/%s.jsx' % (tld, ep),
+            item_id, note,
+            fatal=False,
+            headers={
+                'Referer': url,
+                'X-Requested-With': 'XMLHttpRequest',
+                'X-Retpath-Y': url,
+            },
+            query=query)
+
 
 class YandexMusicTrackIE(YandexMusicBaseIE):
     IE_NAME = 'yandexmusic:track'
     IE_DESC = 'Яндекс.Музыка - Трек'
-    _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
+    _VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
 
     _TESTS = [{
         'url': 'http://music.yandex.ru/album/540508/track/4878838',
-        'md5': 'f496818aa2f60b6c0062980d2e00dc20',
+        'md5': 'dec8b661f12027ceaba33318787fff76',
         'info_dict': {
             'id': '4878838',
             'ext': 'mp3',
-            'title': 'Carlo Ambrosio & Fabio Di Bari - Gypsy Eyes 1',
-            'filesize': 4628061,
+            'title': 'md5:c63e19341fdbe84e43425a30bc777856',
+            'filesize': int,
             'duration': 193.04,
-            'track': 'Gypsy Eyes 1',
-            'album': 'Gypsy Soul',
-            'album_artist': 'Carlo Ambrosio',
-            'artist': 'Carlo Ambrosio & Fabio Di Bari',
+            'track': 'md5:210508c6ffdfd67a493a6c378f22c3ff',
+            'album': 'md5:cd04fb13c4efeafdfa0a6a6aca36d01a',
+            'album_artist': 'md5:5f54c35462c07952df33d97cfb5fc200',
+            'artist': 'md5:e6fd86621825f14dc0b25db3acd68160',
             'release_year': 2009,
         },
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }, {
         # multiple disks
         'url': 'http://music.yandex.ru/album/3840501/track/705105',
-        'md5': 'ebe7b4e2ac7ac03fe11c19727ca6153e',
+        'md5': '82a54e9e787301dd45aba093cf6e58c0',
         'info_dict': {
             'id': '705105',
             'ext': 'mp3',
-            'title': 'Hooverphonic - Sometimes',
-            'filesize': 5743386,
+            'title': 'md5:f86d4a9188279860a83000277024c1a6',
+            'filesize': int,
             'duration': 239.27,
-            'track': 'Sometimes',
-            'album': 'The Best of Hooverphonic',
-            'album_artist': 'Hooverphonic',
-            'artist': 'Hooverphonic',
+            'track': 'md5:40f887f0666ba1aa10b835aca44807d1',
+            'album': 'md5:624f5224b14f5c88a8e812fd7fbf1873',
+            'album_artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12',
+            'artist': 'md5:dd35f2af4e8927100cbe6f5e62e1fb12',
             'release_year': 2016,
             'genre': 'pop',
             'disc_number': 2,
             'track_number': 9,
         },
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
-        album_id, track_id = mobj.group('album_id'), mobj.group('id')
+        tld, album_id, track_id = mobj.group('tld'), mobj.group('album_id'), mobj.group('id')
 
-        track = self._download_json(
-            'http://music.yandex.ru/handlers/track.jsx?track=%s:%s' % (track_id, album_id),
-            track_id, 'Downloading track JSON')['track']
+        track = self._call_api(
+            'track', tld, url, track_id, 'Downloading track JSON',
+            {'track': '%s:%s' % (track_id, album_id)})['track']
         track_title = track['title']
 
         download_data = self._download_json(
@@ -179,42 +191,85 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
 
 
 class YandexMusicPlaylistBaseIE(YandexMusicBaseIE):
+    def _extract_tracks(self, source, item_id, url, tld):
+        tracks = source['tracks']
+        track_ids = [compat_str(track_id) for track_id in source['trackIds']]
+
+        # tracks dictionary shipped with playlist.jsx API is limited to 150 tracks,
+        # missing tracks should be retrieved manually.
+        if len(tracks) < len(track_ids):
+            present_track_ids = set([
+                compat_str(track['id'])
+                for track in tracks if track.get('id')])
+            missing_track_ids = [
+                track_id for track_id in track_ids
+                if track_id not in present_track_ids]
+            missing_tracks = self._call_api(
+                'track-entries', tld, url, item_id,
+                'Downloading missing tracks JSON', {
+                    'entries': ','.join(missing_track_ids),
+                    'lang': tld,
+                    'external-domain': 'music.yandex.%s' % tld,
+                    'overembed': 'false',
+                    'strict': 'true',
+                })
+            if missing_tracks:
+                tracks.extend(missing_tracks)
+
+        return tracks
+
     def _build_playlist(self, tracks):
-        return [
-            self.url_result(
-                'http://music.yandex.ru/album/%s/track/%s' % (track['albums'][0]['id'], track['id']))
-            for track in tracks if track.get('albums') and isinstance(track.get('albums'), list)]
+        entries = []
+        for track in tracks:
+            track_id = track.get('id') or track.get('realId')
+            if not track_id:
+                continue
+            albums = track.get('albums')
+            if not albums or not isinstance(albums, list):
+                continue
+            album = albums[0]
+            if not isinstance(album, dict):
+                continue
+            album_id = album.get('id')
+            if not album_id:
+                continue
+            entries.append(self.url_result(
+                'http://music.yandex.ru/album/%s/track/%s' % (album_id, track_id),
+                ie=YandexMusicTrackIE.ie_key(), video_id=track_id))
+        return entries
 
 
 class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
     IE_NAME = 'yandexmusic:album'
     IE_DESC = 'Яндекс.Музыка - Альбом'
-    _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)'
+    _VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)'
 
     _TESTS = [{
         'url': 'http://music.yandex.ru/album/540508',
         'info_dict': {
             'id': '540508',
-            'title': 'Carlo Ambrosio - Gypsy Soul (2009)',
+            'title': 'md5:7ed1c3567f28d14be9f61179116f5571',
         },
         'playlist_count': 50,
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }, {
         'url': 'https://music.yandex.ru/album/3840501',
         'info_dict': {
             'id': '3840501',
-            'title': 'Hooverphonic - The Best of Hooverphonic (2016)',
+            'title': 'md5:36733472cdaa7dcb1fd9473f7da8e50f',
         },
         'playlist_count': 33,
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }]
 
     def _real_extract(self, url):
-        album_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        tld = mobj.group('tld')
+        album_id = mobj.group('id')
 
-        album = self._download_json(
-            'http://music.yandex.ru/handlers/album.jsx?album=%s' % album_id,
-            album_id, 'Downloading album JSON')
+        album = self._call_api(
+            'album', tld, url, album_id, 'Downloading album JSON',
+            {'album': album_id})
 
         entries = self._build_playlist([track for volume in album['volumes'] for track in volume])
 
@@ -235,21 +290,24 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
         'url': 'http://music.yandex.ru/users/music.partners/playlists/1245',
         'info_dict': {
             'id': '1245',
-            'title': 'Что слушают Enter Shikari',
+            'title': 'md5:841559b3fe2b998eca88d0d2e22a3097',
             'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9',
         },
-        'playlist_count': 6,
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        'playlist_count': 5,
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }, {
-        # playlist exceeding the limit of 150 tracks shipped with webpage (see
-        # https://github.com/ytdl-org/youtube-dl/issues/6666)
         'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036',
+        'only_matching': True,
+    }, {
+        # playlist exceeding the limit of 150 tracks (see
+        # https://github.com/ytdl-org/youtube-dl/issues/6666)
+        'url': 'https://music.yandex.ru/users/mesiaz/playlists/1364',
         'info_dict': {
-            'id': '1036',
-            'title': 'Музыка 90-х',
+            'id': '1364',
+            'title': 'md5:b3b400f997d3f878a13ae0699653f7db',
         },
-        'playlist_mincount': 300,
-        'skip': 'Travis CI servers blocked by YandexMusic',
+        'playlist_mincount': 437,
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
     }]
 
     def _real_extract(self, url):
@@ -258,16 +316,8 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
         user = mobj.group('user')
         playlist_id = mobj.group('id')
 
-        playlist = self._download_json(
-            'https://music.yandex.%s/handlers/playlist.jsx' % tld,
-            playlist_id, 'Downloading missing tracks JSON',
-            fatal=False,
-            headers={
-                'Referer': url,
-                'X-Requested-With': 'XMLHttpRequest',
-                'X-Retpath-Y': url,
-            },
-            query={
+        playlist = self._call_api(
+            'playlist', tld, url, playlist_id, 'Downloading playlist JSON', {
                 'owner': user,
                 'kinds': playlist_id,
                 'light': 'true',
@@ -276,37 +326,103 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
                 'overembed': 'false',
             })['playlist']
 
-        tracks = playlist['tracks']
-        track_ids = [compat_str(track_id) for track_id in playlist['trackIds']]
-
-        # tracks dictionary shipped with playlist.jsx API is limited to 150 tracks,
-        # missing tracks should be retrieved manually.
-        if len(tracks) < len(track_ids):
-            present_track_ids = set([
-                compat_str(track['id'])
-                for track in tracks if track.get('id')])
-            missing_track_ids = [
-                track_id for track_id in track_ids
-                if track_id not in present_track_ids]
-            missing_tracks = self._download_json(
-                'https://music.yandex.%s/handlers/track-entries.jsx' % tld,
-                playlist_id, 'Downloading missing tracks JSON',
-                fatal=False,
-                headers={
-                    'Referer': url,
-                    'X-Requested-With': 'XMLHttpRequest',
-                },
-                query={
-                    'entries': ','.join(missing_track_ids),
-                    'lang': tld,
-                    'external-domain': 'music.yandex.%s' % tld,
-                    'overembed': 'false',
-                    'strict': 'true',
-                })
-            if missing_tracks:
-                tracks.extend(missing_tracks)
+        tracks = self._extract_tracks(playlist, playlist_id, url, tld)
 
         return self.playlist_result(
             self._build_playlist(tracks),
             compat_str(playlist_id),
             playlist.get('title'), playlist.get('description'))
+
+
+class YandexMusicArtistBaseIE(YandexMusicPlaylistBaseIE):
+    def _call_artist(self, tld, url, artist_id):
+        return self._call_api(
+            'artist', tld, url, artist_id,
+            'Downloading artist %s JSON' % self._ARTIST_WHAT, {
+                'artist': artist_id,
+                'what': self._ARTIST_WHAT,
+                'sort': self._ARTIST_SORT or '',
+                'dir': '',
+                'period': '',
+                'lang': tld,
+                'external-domain': 'music.yandex.%s' % tld,
+                'overembed': 'false',
+            })
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        tld = mobj.group('tld')
+        artist_id = mobj.group('id')
+        data = self._call_artist(tld, url, artist_id)
+        tracks = self._extract_tracks(data, artist_id, url, tld)
+        title = try_get(data, lambda x: x['artist']['name'], compat_str)
+        return self.playlist_result(
+            self._build_playlist(tracks), artist_id, title)
+
+
+class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE):
+    IE_NAME = 'yandexmusic:artist:tracks'
+    IE_DESC = 'Яндекс.Музыка - Артист - Треки'
+    _VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/artist/(?P<id>\d+)/tracks'
+
+    _TESTS = [{
+        'url': 'https://music.yandex.ru/artist/617526/tracks',
+        'info_dict': {
+            'id': '617526',
+            'title': 'md5:131aef29d45fd5a965ca613e708c040b',
+        },
+        'playlist_count': 507,
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
+    }]
+
+    _ARTIST_SORT = ''
+    _ARTIST_WHAT = 'tracks'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        tld = mobj.group('tld')
+        artist_id = mobj.group('id')
+        data = self._call_artist(tld, url, artist_id)
+        tracks = self._extract_tracks(data, artist_id, url, tld)
+        artist = try_get(data, lambda x: x['artist']['name'], compat_str)
+        title = '%s - %s' % (artist or artist_id, 'Треки')
+        return self.playlist_result(
+            self._build_playlist(tracks), artist_id, title)
+
+
+class YandexMusicArtistAlbumsIE(YandexMusicArtistBaseIE):
+    IE_NAME = 'yandexmusic:artist:albums'
+    IE_DESC = 'Яндекс.Музыка - Артист - Альбомы'
+    _VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/artist/(?P<id>\d+)/albums'
+
+    _TESTS = [{
+        'url': 'https://music.yandex.ru/artist/617526/albums',
+        'info_dict': {
+            'id': '617526',
+            'title': 'md5:55dc58d5c85699b7fb41ee926700236c',
+        },
+        'playlist_count': 8,
+        # 'skip': 'Travis CI servers blocked by YandexMusic',
+    }]
+
+    _ARTIST_SORT = 'year'
+    _ARTIST_WHAT = 'albums'
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        tld = mobj.group('tld')
+        artist_id = mobj.group('id')
+        data = self._call_artist(tld, url, artist_id)
+        entries = []
+        for album in data['albums']:
+            if not isinstance(album, dict):
+                continue
+            album_id = album.get('id')
+            if not album_id:
+                continue
+            entries.append(self.url_result(
+                'http://music.yandex.ru/album/%s' % album_id,
+                ie=YandexMusicAlbumIE.ie_key(), video_id=album_id))
+        artist = try_get(data, lambda x: x['artist']['name'], compat_str)
+        title = '%s - %s' % (artist or artist_id, 'Альбомы')
+        return self.playlist_result(entries, artist_id, title)

From cfeba5d17f5f621b129571213e7034c44c719e76 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 30 Nov 2020 09:45:44 +0100
Subject: [PATCH 268/340] [mediaset] add support for movie URLs(closes #27240)

---
 youtube_dl/extractor/mediaset.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/mediaset.py b/youtube_dl/extractor/mediaset.py
index 933df1495..2c16fc9e2 100644
--- a/youtube_dl/extractor/mediaset.py
+++ b/youtube_dl/extractor/mediaset.py
@@ -23,7 +23,7 @@ class MediasetIE(ThePlatformBaseIE):
                         https?://
                             (?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
                             (?:
-                                (?:video|on-demand)/(?:[^/]+/)+[^/]+_|
+                                (?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_|
                                 player/index\.html\?.*?\bprogramGuid=
                             )
                     )(?P<id>[0-9A-Z]{16,})
@@ -88,6 +88,9 @@ class MediasetIE(ThePlatformBaseIE):
     }, {
         'url': 'https://www.mediasetplay.mediaset.it/video/grandefratellovip/benedetta-una-doccia-gelata_F309344401044C135',
         'only_matching': True,
+    }, {
+        'url': 'https://www.mediasetplay.mediaset.it/movie/herculeslaleggendahainizio/hercules-la-leggenda-ha-inizio_F305927501000102',
+        'only_matching': True,
     }]
 
     @staticmethod

From 59d63d8d4aa82c7b1472d5b6a0f9f4e9c2893212 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 Dec 2020 00:49:03 +0700
Subject: [PATCH 269/340] [youtube] Improve age-gated videos extraction (closes
 #27259)

---
 youtube_dl/extractor/youtube.py | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 4616ac53d..6fdc379cd 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -602,7 +602,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
             }
         },
-        # Normal age-gate video (No vevo, embed allowed)
+        # Normal age-gate video (No vevo, embed allowed), available via embed page
         {
             'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
             'info_dict': {
@@ -618,6 +618,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'age_limit': 18,
             },
         },
+        {
+            # Age-gated video only available with authentication (unavailable
+            # via embed page workaround)
+            'url': 'XgnwCQzjau8',
+            'only_matching': True,
+        },
         # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
         # YouTube Red ad is not captured for creator
         {
@@ -1637,8 +1643,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         # Get video info
         video_info = {}
         embed_webpage = None
-        if (self._og_search_property('restrictions:age', video_webpage, default=None) == '18+'
-                or re.search(r'player-age-gate-content">', video_webpage) is not None):
+
+        if re.search(r'["\']status["\']\s*:\s*["\']LOGIN_REQUIRED', video_webpage) is not None:
             age_gate = True
             # We simulate the access to the video from www.youtube.com/v/{video_id}
             # this can be viewed without login into Youtube

From be19ae11fde9eb8cb28ff967ded15b457b5f80ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roman=20Ber=C3=A1nek?= <zavorka@users.noreply.github.com>
Date: Mon, 30 Nov 2020 19:14:29 +0100
Subject: [PATCH 270/340] [cspan] Pass Referer header with format's video URL
 (#26032) (closes #25729)

---
 youtube_dl/extractor/cspan.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py
index 67d6df4b0..3356cc280 100644
--- a/youtube_dl/extractor/cspan.py
+++ b/youtube_dl/extractor/cspan.py
@@ -165,6 +165,8 @@ class CSpanIE(InfoExtractor):
                 formats = self._extract_m3u8_formats(
                     path, video_id, 'mp4', entry_protocol='m3u8_native',
                     m3u8_id='hls') if determine_ext(path) == 'm3u8' else [{'url': path, }]
+            for f in formats:
+                f.setdefault('http_headers', {})['Referer'] = url
             self._sort_formats(formats)
             entries.append({
                 'id': '%s_%d' % (video_id, partnum + 1),

From 3e4e338133463e57125ade82fe5dfd57bb42dc1a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 Dec 2020 01:53:12 +0700
Subject: [PATCH 271/340] [cspan] Extract info from jwplayer data (closes
 #3672, closes #3734, closes #10638, closes #13030, closes #18806, closes
 #23148, closes #24461, closes #26171, closes #26800, closes #27263)

---
 youtube_dl/extractor/cspan.py | 25 +++++++++++++++++++++++--
 1 file changed, 23 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py
index 3356cc280..766942146 100644
--- a/youtube_dl/extractor/cspan.py
+++ b/youtube_dl/extractor/cspan.py
@@ -10,6 +10,8 @@ from ..utils import (
     find_xpath_attr,
     get_element_by_class,
     int_or_none,
+    js_to_json,
+    merge_dicts,
     smuggle_url,
     unescapeHTML,
 )
@@ -98,6 +100,26 @@ class CSpanIE(InfoExtractor):
                     bc_attr['data-bcid'])
                 return self.url_result(smuggle_url(bc_url, {'source_url': url}))
 
+        def add_referer(formats):
+            for f in formats:
+                f.setdefault('http_headers', {})['Referer'] = url
+
+        # As of 01.12.2020 this path looks to cover all cases making the rest
+        # of the code unnecessary
+        jwsetup = self._parse_json(
+            self._search_regex(
+                r'(?s)jwsetup\s*=\s*({.+?})\s*;', webpage, 'jwsetup',
+                default='{}'),
+            video_id, transform_source=js_to_json, fatal=False)
+        if jwsetup:
+            info = self._parse_jwplayer_data(
+                jwsetup, video_id, require_title=False, m3u8_id='hls',
+                base_url=url)
+            add_referer(info['formats'])
+            ld_info = self._search_json_ld(webpage, video_id, default={})
+            return merge_dicts(info, ld_info)
+
+        # Obsolete
         # We first look for clipid, because clipprog always appears before
         patterns = [r'id=\'clip(%s)\'\s*value=\'([0-9]+)\'' % t for t in ('id', 'prog')]
         results = list(filter(None, (re.search(p, webpage) for p in patterns)))
@@ -165,8 +187,7 @@ class CSpanIE(InfoExtractor):
                 formats = self._extract_m3u8_formats(
                     path, video_id, 'mp4', entry_protocol='m3u8_native',
                     m3u8_id='hls') if determine_ext(path) == 'm3u8' else [{'url': path, }]
-            for f in formats:
-                f.setdefault('http_headers', {})['Referer'] = url
+            add_referer(formats)
             self._sort_formats(formats)
             entries.append({
                 'id': '%s_%d' % (video_id, partnum + 1),

From 132aece1ed824aafd849ef63dcb44c835eab6ee9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 1 Dec 2020 04:44:10 +0700
Subject: [PATCH 272/340] [youtube:tab] Extract channels only from channels tab
 (closes #27266)

---
 youtube_dl/extractor/youtube.py | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 6fdc379cd..b796f58b2 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2796,12 +2796,17 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             # TODO
             pass
 
-    def _shelf_entries(self, shelf_renderer):
+    def _shelf_entries(self, shelf_renderer, skip_channels=False):
         ep = try_get(
             shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
             compat_str)
         shelf_url = urljoin('https://www.youtube.com', ep)
         if shelf_url:
+            # Skipping links to another channels, note that checking for
+            # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
+            # will not work
+            if skip_channels and '/channels?' in shelf_url:
+                return
             title = try_get(
                 shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
             yield self.url_result(shelf_url, video_title=title)
@@ -2912,9 +2917,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             }
 
     def _entries(self, tab, identity_token):
-        slr_renderer = try_get(tab, lambda x: x['sectionListRenderer'], dict)
+        tab_content = try_get(tab, lambda x: x['content'], dict)
+        if not tab_content:
+            return
+        slr_renderer = try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
         if not slr_renderer:
             return
+        is_channels_tab = tab.get('title') == 'Channels'
         continuation = None
         slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
         for slr_content in slr_contents:
@@ -2941,7 +2950,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                     continue
                 renderer = isr_content.get('shelfRenderer')
                 if renderer:
-                    for entry in self._shelf_entries(renderer):
+                    for entry in self._shelf_entries(renderer, not is_channels_tab):
                         yield entry
                     continue
                 renderer = isr_content.get('backstagePostThreadRenderer')
@@ -3071,7 +3080,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             description = None
             playlist_id = item_id
         playlist = self.playlist_result(
-            self._entries(selected_tab['content'], identity_token),
+            self._entries(selected_tab, identity_token),
             playlist_id=playlist_id, playlist_title=title,
             playlist_description=description)
         playlist.update(self._extract_uploader(data))

From d3e142b3faca04ab21163595d7dcf308d52e5253 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 1 Dec 2020 10:11:51 +0100
Subject: [PATCH 273/340] [toggle] Add support for new MeWatch URLs (closes
 #27256)

---
 youtube_dl/extractor/extractors.py |  5 +-
 youtube_dl/extractor/toggle.py     | 74 +++++++++++++++++-------------
 2 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index e5327d375..048e72f60 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1185,7 +1185,10 @@ from .tnaflix import (
     EMPFlixIE,
     MovieFapIE,
 )
-from .toggle import ToggleIE
+from .toggle import (
+    ToggleIE,
+    MeWatchIE,
+)
 from .tonline import TOnlineIE
 from .toongoggles import ToonGogglesIE
 from .toutv import TouTvIE
diff --git a/youtube_dl/extractor/toggle.py b/youtube_dl/extractor/toggle.py
index ca2e36efe..cababa69e 100644
--- a/youtube_dl/extractor/toggle.py
+++ b/youtube_dl/extractor/toggle.py
@@ -11,13 +11,13 @@ from ..utils import (
     float_or_none,
     int_or_none,
     parse_iso8601,
-    sanitized_Request,
+    strip_or_none,
 )
 
 
 class ToggleIE(InfoExtractor):
     IE_NAME = 'toggle'
-    _VALID_URL = r'https?://(?:(?:www\.)?mewatch|video\.toggle)\.sg/(?:en|zh)/(?:[^/]+/){2,}(?P<id>[0-9]+)'
+    _VALID_URL = r'(?:https?://(?:(?:www\.)?mewatch|video\.toggle)\.sg/(?:en|zh)/(?:[^/]+/){2,}|toggle:)(?P<id>[0-9]+)'
     _TESTS = [{
         'url': 'http://www.mewatch.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115',
         'info_dict': {
@@ -96,16 +96,6 @@ class ToggleIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(
-            url, video_id, note='Downloading video page')
-
-        api_user = self._search_regex(
-            r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser',
-            default=self._API_USER, group='user')
-        api_pass = self._search_regex(
-            r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass',
-            default=self._API_PASS, group='pass')
-
         params = {
             'initObj': {
                 'Locale': {
@@ -118,17 +108,16 @@ class ToggleIE(InfoExtractor):
                 'SiteGuid': 0,
                 'DomainID': '0',
                 'UDID': '',
-                'ApiUser': api_user,
-                'ApiPass': api_pass
+                'ApiUser': self._API_USER,
+                'ApiPass': self._API_PASS
             },
             'MediaID': video_id,
             'mediaType': 0,
         }
 
-        req = sanitized_Request(
+        info = self._download_json(
             'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo',
-            json.dumps(params).encode('utf-8'))
-        info = self._download_json(req, video_id, 'Downloading video info json')
+            video_id, 'Downloading video info json', data=json.dumps(params).encode('utf-8'))
 
         title = info['MediaName']
 
@@ -172,14 +161,6 @@ class ToggleIE(InfoExtractor):
             raise ExtractorError('No downloadable videos found', expected=True)
         self._sort_formats(formats)
 
-        duration = int_or_none(info.get('Duration'))
-        description = info.get('Description')
-        created_at = parse_iso8601(info.get('CreationDate') or None)
-
-        average_rating = float_or_none(info.get('Rating'))
-        view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter'))
-        like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter'))
-
         thumbnails = []
         for picture in info.get('Pictures', []):
             if not isinstance(picture, dict):
@@ -199,15 +180,46 @@ class ToggleIE(InfoExtractor):
                 })
             thumbnails.append(thumbnail)
 
+        def counter(prefix):
+            return int_or_none(
+                info.get(prefix + 'Counter') or info.get(prefix.lower() + '_counter'))
+
         return {
             'id': video_id,
             'title': title,
-            'description': description,
-            'duration': duration,
-            'timestamp': created_at,
-            'average_rating': average_rating,
-            'view_count': view_count,
-            'like_count': like_count,
+            'description': strip_or_none(info.get('Description')),
+            'duration': int_or_none(info.get('Duration')),
+            'timestamp': parse_iso8601(info.get('CreationDate') or None),
+            'average_rating': float_or_none(info.get('Rating')),
+            'view_count': counter('View'),
+            'like_count': counter('Like'),
             'thumbnails': thumbnails,
             'formats': formats,
         }
+
+
+class MeWatchIE(InfoExtractor):
+    IE_NAME = 'mewatch'
+    _VALID_URL = r'https?://(?:www\.)?mewatch\.sg/watch/[0-9a-zA-Z-]+-(?P<id>[0-9]+)'
+    _TESTS = [{
+        'url': 'https://www.mewatch.sg/watch/Recipe-Of-Life-E1-179371',
+        'info_dict': {
+            'id': '1008625',
+            'ext': 'mp4',
+            'title': 'Recipe Of Life 味之道',
+            'timestamp': 1603306526,
+            'description': 'md5:6e88cde8af2068444fc8e1bc3ebf257c',
+            'upload_date': '20201021',
+        },
+        'params': {
+            'skip_download': 'm3u8 download',
+        },
+    }]
+
+    def _real_extract(self, url):
+        item_id = self._match_id(url)
+        custom_id = self._download_json(
+            'https://cdn.mewatch.sg/api/items/' + item_id,
+            item_id, query={'segments': 'all'})['customId']
+        return self.url_result(
+            'toggle:' + custom_id, ToggleIE.ie_key(), custom_id)

From 95ac4de229921d7e59781179c7f7a9aaba34b5d5 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 1 Dec 2020 10:38:53 +0100
Subject: [PATCH 274/340] [toggle] Detect DRM protected videos (closes
 #16479)(closes #20805)

---
 youtube_dl/extractor/toggle.py | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/youtube_dl/extractor/toggle.py b/youtube_dl/extractor/toggle.py
index cababa69e..91b8023b8 100644
--- a/youtube_dl/extractor/toggle.py
+++ b/youtube_dl/extractor/toggle.py
@@ -84,12 +84,6 @@ class ToggleIE(InfoExtractor):
         'only_matching': True,
     }]
 
-    _FORMAT_PREFERENCES = {
-        'wvm-STBMain': -10,
-        'wvm-iPadMain': -20,
-        'wvm-iPhoneMain': -30,
-        'wvm-Android': -40,
-    }
     _API_USER = 'tvpapi_147'
     _API_PASS = '11111'
 
@@ -130,11 +124,16 @@ class ToggleIE(InfoExtractor):
             vid_format = vid_format.replace(' ', '')
             # if geo-restricted, m3u8 is inaccessible, but mp4 is okay
             if ext == 'm3u8':
-                formats.extend(self._extract_m3u8_formats(
+                m3u8_formats = self._extract_m3u8_formats(
                     video_url, video_id, ext='mp4', m3u8_id=vid_format,
                     note='Downloading %s m3u8 information' % vid_format,
                     errnote='Failed to download %s m3u8 information' % vid_format,
-                    fatal=False))
+                    fatal=False)
+                for f in m3u8_formats:
+                    # Apple FairPlay Streaming
+                    if '/fpshls/' in f['url']:
+                        continue
+                    formats.append(f)
             elif ext == 'mpd':
                 formats.extend(self._extract_mpd_formats(
                     video_url, video_id, mpd_id=vid_format,
@@ -147,16 +146,17 @@ class ToggleIE(InfoExtractor):
                     note='Downloading %s ISM manifest' % vid_format,
                     errnote='Failed to download %s ISM manifest' % vid_format,
                     fatal=False))
-            elif ext in ('mp4', 'wvm'):
-                # wvm are drm-protected files
+            elif ext == 'mp4':
                 formats.append({
                     'ext': ext,
                     'url': video_url,
                     'format_id': vid_format,
-                    'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1,
-                    'format_note': 'DRM-protected video' if ext == 'wvm' else None
                 })
         if not formats:
+            for meta in (info.get('Metas') or []):
+                if meta.get('Key') == 'Encryption' and meta.get('Value') == '1':
+                    raise ExtractorError(
+                        'This video is DRM protected.', expected=True)
             # Most likely because geo-blocked
             raise ExtractorError('No downloadable videos found', expected=True)
         self._sort_formats(formats)

From 37d979ad33717c2b98d4f259d109c3b1c11906ae Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 1 Dec 2020 12:25:02 +0100
Subject: [PATCH 275/340] [tva] Add support for qub.ca (closes #27235)

---
 youtube_dl/extractor/tva.py | 65 +++++++++++++++++++++++++++----------
 1 file changed, 48 insertions(+), 17 deletions(-)

diff --git a/youtube_dl/extractor/tva.py b/youtube_dl/extractor/tva.py
index 443f46e8a..52a4ddf32 100644
--- a/youtube_dl/extractor/tva.py
+++ b/youtube_dl/extractor/tva.py
@@ -4,7 +4,9 @@ from __future__ import unicode_literals
 from .common import InfoExtractor
 from ..utils import (
     float_or_none,
+    int_or_none,
     smuggle_url,
+    strip_or_none,
 )
 
 
@@ -23,7 +25,8 @@ class TVAIE(InfoExtractor):
         'params': {
             # m3u8 download
             'skip_download': True,
-        }
+        },
+        'skip': 'HTTP Error 404: Not Found',
     }, {
         'url': 'https://video.tva.ca/details/_5596811470001',
         'only_matching': True,
@@ -32,26 +35,54 @@ class TVAIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        video_data = self._download_json(
-            'https://videos.tva.ca/proxy/item/_' + video_id, video_id, headers={
-                'Accept': 'application/json',
-            }, query={
-                'appId': '5955fc5f23eec60006c951f1',
-            })
-
-        def get_attribute(key):
-            for attribute in video_data.get('attributes', []):
-                if attribute.get('key') == key:
-                    return attribute.get('value')
-            return None
 
         return {
             '_type': 'url_transparent',
             'id': video_id,
-            'title': get_attribute('title'),
             'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['CA']}),
-            'description': get_attribute('description'),
-            'thumbnail': get_attribute('image-background') or get_attribute('image-landscape'),
-            'duration': float_or_none(get_attribute('video-duration'), 1000),
             'ie_key': 'BrightcoveNew',
         }
+
+
+class QubIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?qub\.ca/(?:[^/]+/)*[0-9a-z-]+-(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://www.qub.ca/tvaplus/tva/alerte-amber/saison-1/episode-01-1000036619',
+        'md5': '949490fd0e7aee11d0543777611fbd53',
+        'info_dict': {
+            'id': '6084352463001',
+            'ext': 'mp4',
+            'title': 'Épisode 01',
+            'uploader_id': '5481942443001',
+            'upload_date': '20190907',
+            'timestamp': 1567899756,
+            'description': 'md5:9c0d7fbb90939420c651fd977df90145',
+        },
+    }, {
+        'url': 'https://www.qub.ca/tele/video/lcn-ca-vous-regarde-rev-30s-ap369664-1009357943',
+        'only_matching': True,
+    }]
+    # reference_id also works with old account_id(5481942443001)
+    # BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5813221784001/default_default/index.html?videoId=ref:%s'
+
+    def _real_extract(self, url):
+        entity_id = self._match_id(url)
+        entity = self._download_json(
+            'https://www.qub.ca/proxy/pfu/content-delivery-service/v1/entities',
+            entity_id, query={'id': entity_id})
+        video_id = entity['videoId']
+        episode = strip_or_none(entity.get('name'))
+
+        return {
+            '_type': 'url_transparent',
+            'id': video_id,
+            'title': episode,
+            # 'url': self.BRIGHTCOVE_URL_TEMPLATE % entity['referenceId'],
+            'url': 'https://videos.tva.ca/details/_' + video_id,
+            'description': entity.get('longDescription'),
+            'duration': float_or_none(entity.get('durationMillis'), 1000),
+            'episode': episode,
+            'episode_number': int_or_none(entity.get('episodeNumber')),
+            # 'ie_key': 'BrightcoveNew',
+            'ie_key': TVAIE.ie_key(),
+        }

From 09d923f2ebad2a24e11abbe86c47ec1efa2b7dc5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 Dec 2020 01:22:43 +0700
Subject: [PATCH 276/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 966f9379f..02dc12645 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+version <unreleased>
+
+Extractors
++ [tva] Add support for qub.ca (#27235)
++ [toggle] Detect DRM protected videos (closes #16479)(closes #20805)
++ [toggle] Add support for new MeWatch URLs (#27256)
+* [youtube:tab] Extract channels only from channels tab (#27266)
++ [cspan] Extract info from jwplayer data (#3672, #3734, #10638, #13030,
+  #18806, #23148, #24461, #26171, #26800, #27263)
+* [cspan] Pass Referer header with format's video URL (#26032, #25729)
+* [youtube] Improve age-gated videos extraction (#27259)
++ [mediaset] Add support for movie URLs (#27240)
+* [yandexmusic] Refactor
++ [yandexmusic] Add support for artist's tracks and albums (#11887, #22284)
+* [yandexmusic:track] Fix extraction (#26449, #26669, #26747, #26748, #26762)
+
+
 version 2020.11.29
 
 Core

From 2bb70750a9bb27aea34cab0e17327f549bae5734 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 2 Dec 2020 01:37:40 +0700
Subject: [PATCH 277/340] release 2020.12.02

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 3 +++
 youtube_dl/version.py                            | 2 +-
 8 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 95b6f674f..5f0e33eaf 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.29
+ [debug] youtube-dl version 2020.12.02
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index a3b53f5f3..631c60958 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 476fdc3b7..19d1b53e2 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 2ae00f75e..6d2f3e16d 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.11.29
+ [debug] youtube-dl version 2020.12.02
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index f9b7edb49..82009b505 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.11.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.11.29**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index 02dc12645..65406eafd 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.12.02
 
 Extractors
 + [tva] Add support for qub.ca (#27235)
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 702930e71..e5b470041 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -486,6 +486,7 @@
  - **META**
  - **metacafe**
  - **Metacritic**
+ - **mewatch**
  - **Mgoon**
  - **MGTV**: 芒果TV
  - **MiaoPai**
@@ -1132,6 +1133,8 @@
  - **yahoo:japannews**: Yahoo! Japan News
  - **YandexDisk**
  - **yandexmusic:album**: Яндекс.Музыка - Альбом
+ - **yandexmusic:artist:albums**: Яндекс.Музыка - Артист - Альбомы
+ - **yandexmusic:artist:tracks**: Яндекс.Музыка - Артист - Треки
  - **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
  - **yandexmusic:track**: Яндекс.Музыка - Трек
  - **YandexVideo**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index ebb8a200d..0b0a6c785 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.29'
+__version__ = '2020.12.02'

From c0820dd52ad2796b8ef159a0f6ab21ac28d4bf67 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 2 Dec 2020 00:41:00 +0100
Subject: [PATCH 278/340] [extractors] Add QubIE import

---
 youtube_dl/extractor/extractors.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 048e72f60..eb5a75f30 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1221,7 +1221,10 @@ from .tv2dk import (
 from .tv2hu import TV2HuIE
 from .tv4 import TV4IE
 from .tv5mondeplus import TV5MondePlusIE
-from .tva import TVAIE
+from .tva import (
+    TVAIE,
+    QubIE,
+)
 from .tvanouvelles import (
     TVANouvellesIE,
     TVANouvellesArticleIE,

From 4ded9c0f0018f45034070092e4899668f58b4690 Mon Sep 17 00:00:00 2001
From: opusforlife2 <53176348+opusforlife2@users.noreply.github.com>
Date: Wed, 2 Dec 2020 18:30:08 +0000
Subject: [PATCH 279/340] [pornhub] Add support for pornhub.org (#27276)

Most ISPs block the other two TLDs through deep packet inspection
---
 youtube_dl/extractor/pornhub.py | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index a50b6260c..69a9797aa 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -53,7 +53,7 @@ class PornHubIE(PornHubBaseIE):
     _VALID_URL = r'''(?x)
                     https?://
                         (?:
-                            (?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
+                            (?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
                             (?:www\.)?thumbzilla\.com/video/
                         )
                         (?P<id>[\da-z]+)
@@ -152,6 +152,9 @@ class PornHubIE(PornHubBaseIE):
     }, {
         'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
         'only_matching': True,
+    }, {
+        'url': 'https://www.pornhub.org/view_video.php?viewkey=203640933',
+        'only_matching': True,
     }, {
         'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5e4acdae54a82',
         'only_matching': True,
@@ -160,7 +163,7 @@ class PornHubIE(PornHubBaseIE):
     @staticmethod
     def _extract_urls(webpage):
         return re.findall(
-            r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net)/embed/[\da-z]+)',
+            r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub\.(?:com|net|org)/embed/[\da-z]+)',
             webpage)
 
     def _extract_count(self, pattern, webpage, name):
@@ -422,7 +425,7 @@ class PornHubPlaylistBaseIE(PornHubBaseIE):
 
 
 class PornHubUserIE(PornHubPlaylistBaseIE):
-    _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)'
+    _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)'
     _TESTS = [{
         'url': 'https://www.pornhub.com/model/zoe_ph',
         'playlist_mincount': 118,
@@ -490,7 +493,7 @@ class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
 
 
 class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
-    _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net))/(?P<id>(?:[^/]+/)*[^/?#&]+)'
+    _VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?P<id>(?:[^/]+/)*[^/?#&]+)'
     _TESTS = [{
         'url': 'https://www.pornhub.com/model/zoe_ph/videos',
         'only_matching': True,
@@ -605,7 +608,7 @@ class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
 
 
 class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
-    _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
+    _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
     _TESTS = [{
         'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
         'info_dict': {

From 64554c12e121a5dc0ab3f715fbf2452e5969a722 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 2 Dec 2020 21:36:51 +0100
Subject: [PATCH 280/340] [tver] Add new extractor (closes #26662)(closes
 #27284)

---
 youtube_dl/extractor/extractors.py |  2 +
 youtube_dl/extractor/fujitv.py     | 35 ++++++++++++++++
 youtube_dl/extractor/tver.py       | 67 ++++++++++++++++++++++++++++++
 3 files changed, 104 insertions(+)
 create mode 100644 youtube_dl/extractor/fujitv.py
 create mode 100644 youtube_dl/extractor/tver.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index eb5a75f30..03aff1d52 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -393,6 +393,7 @@ from .frontendmasters import (
     FrontendMastersLessonIE,
     FrontendMastersCourseIE
 )
+from .fujitv import FujiTVFODPlus7IE
 from .funimation import FunimationIE
 from .funk import FunkIE
 from .fusion import FusionIE
@@ -1233,6 +1234,7 @@ from .tvc import (
     TVCIE,
     TVCArticleIE,
 )
+from .tver import TVerIE
 from .tvigle import TvigleIE
 from .tvland import TVLandIE
 from .tvn24 import TVN24IE
diff --git a/youtube_dl/extractor/fujitv.py b/youtube_dl/extractor/fujitv.py
new file mode 100644
index 000000000..39685e075
--- /dev/null
+++ b/youtube_dl/extractor/fujitv.py
@@ -0,0 +1,35 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+
+
+class FujiTVFODPlus7IE(InfoExtractor):
+    _VALID_URL = r'https?://i\.fod\.fujitv\.co\.jp/plus7/web/[0-9a-z]{4}/(?P<id>[0-9a-z]+)'
+    _BASE_URL = 'http://i.fod.fujitv.co.jp/'
+    _BITRATE_MAP = {
+        300: (320, 180),
+        800: (640, 360),
+        1200: (1280, 720),
+        2000: (1280, 720),
+    }
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        formats = self._extract_m3u8_formats(
+            self._BASE_URL + 'abr/pc_html5/%s.m3u8' % video_id, video_id)
+        for f in formats:
+            wh = self._BITRATE_MAP.get(f.get('tbr'))
+            if wh:
+                f.update({
+                    'width': wh[0],
+                    'height': wh[1],
+                })
+        self._sort_formats(formats)
+
+        return {
+            'id': video_id,
+            'title': video_id,
+            'formats': formats,
+            'thumbnail': self._BASE_URL + 'pc/image/wbtn/wbtn_%s.jpg' % video_id,
+        }
diff --git a/youtube_dl/extractor/tver.py b/youtube_dl/extractor/tver.py
new file mode 100644
index 000000000..c5299722d
--- /dev/null
+++ b/youtube_dl/extractor/tver.py
@@ -0,0 +1,67 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    int_or_none,
+    remove_start,
+    smuggle_url,
+    try_get,
+)
+
+
+class TVerIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?tver\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\d+))'
+    # videos are only available for 7 days
+    _TESTS = [{
+        'url': 'https://tver.jp/corner/f0062178',
+        'only_matching': True,
+    }, {
+        'url': 'https://tver.jp/feature/f0062413',
+        'only_matching': True,
+    }, {
+        'url': 'https://tver.jp/episode/79622438',
+        'only_matching': True,
+    }]
+    _TOKEN = None
+    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
+
+    def _real_initialize(self):
+        self._TOKEN = self._download_json(
+            'https://tver.jp/api/access_token.php', None)['token']
+
+    def _real_extract(self, url):
+        path, video_id = re.match(self._VALID_URL, url).groups()
+        main = self._download_json(
+            'https://api.tver.jp/v4/' + path, video_id,
+            query={'token': self._TOKEN})['main']
+        p_id = main['publisher_id']
+        service = remove_start(main['service'], 'ts_')
+        info = {
+            '_type': 'url_transparent',
+            'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
+            'episode': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
+        }
+
+        if service == 'cx':
+            info.update({
+                'title': main.get('subtitle') or main['title'],
+                'url': 'https://i.fod.fujitv.co.jp/plus7/web/%s/%s.html' % (p_id[:4], p_id),
+                'ie_key': 'FujiTVFODPlus7',
+            })
+        else:
+            r_id = main['reference_id']
+            if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
+                r_id = 'ref:' + r_id
+            bc_url = smuggle_url(
+                self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),
+                {'geo_countries': ['JP']})
+            info.update({
+                'url': bc_url,
+                'ie_key': 'BrightcoveNew',
+            })
+
+        return info

From 664dd8ba854238207fa1421a4f353606406dcded Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 2 Dec 2020 21:37:14 +0100
Subject: [PATCH 281/340] [extractor/common] improve Akamai HTTP format
 extraction

- Allow m3u8 manifest without an additional audio format
- Fix extraction for qualities starting with a number
Solution provided by @nixxo based on: https://stackoverflow.com/a/5984688
---
 youtube_dl/extractor/common.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 16aff885c..4463e06b3 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2623,7 +2623,7 @@ class InfoExtractor(object):
             REPL_REGEX = r'https://[^/]+/i/([^,]+),([^/]+),([^/]+).csmil/.+'
             qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
             qualities_length = len(qualities)
-            if len(formats) in (qualities_length + 1, qualities_length * 2 + 1):
+            if len(formats) in (qualities_length, qualities_length + 1, qualities_length * 2, qualities_length * 2 + 1):
                 i = 0
                 http_formats = []
                 for f in formats:
@@ -2632,7 +2632,7 @@ class InfoExtractor(object):
                             http_f = f.copy()
                             del http_f['manifest_url']
                             http_url = re.sub(
-                                REPL_REGEX, protocol + r'://%s/\1%s\3' % (http_host, qualities[i]), f['url'])
+                                REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
                             http_f.update({
                                 'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
                                 'url': http_url,

From ea89680aeae7c26af355ac23038e3a45f116649e Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 2 Dec 2020 22:49:51 +0100
Subject: [PATCH 282/340] [tver] correct episode_number key

---
 youtube_dl/extractor/tver.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/tver.py b/youtube_dl/extractor/tver.py
index c5299722d..931d4d650 100644
--- a/youtube_dl/extractor/tver.py
+++ b/youtube_dl/extractor/tver.py
@@ -43,7 +43,7 @@ class TVerIE(InfoExtractor):
         info = {
             '_type': 'url_transparent',
             'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
-            'episode': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
+            'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
         }
 
         if service == 'cx':

From da4304609dd627b577d7b69c0a05851497fa384c Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 3 Dec 2020 00:33:55 +0100
Subject: [PATCH 283/340] [extractor/commons] improve Akamai HTTP formats
 extraction

---
 youtube_dl/extractor/common.py | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 4463e06b3..a21afefeb 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2614,20 +2614,20 @@ class InfoExtractor(object):
         hls_host = hosts.get('hls')
         if hls_host:
             m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
-        formats.extend(self._extract_m3u8_formats(
+        m3u8_formats = self._extract_m3u8_formats(
             m3u8_url, video_id, 'mp4', 'm3u8_native',
-            m3u8_id='hls', fatal=False))
+            m3u8_id='hls', fatal=False)
+        formats.extend(m3u8_formats)
 
         http_host = hosts.get('http')
-        if http_host and 'hdnea=' not in manifest_url:
-            REPL_REGEX = r'https://[^/]+/i/([^,]+),([^/]+),([^/]+).csmil/.+'
+        if http_host and m3u8_formats and 'hdnea=' not in m3u8_url:
+            REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
             qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
             qualities_length = len(qualities)
-            if len(formats) in (qualities_length, qualities_length + 1, qualities_length * 2, qualities_length * 2 + 1):
+            if len(m3u8_formats) in (qualities_length, qualities_length + 1):
                 i = 0
-                http_formats = []
-                for f in formats:
-                    if f['protocol'] == 'm3u8_native' and f['vcodec'] != 'none':
+                for f in m3u8_formats:
+                    if f['vcodec'] != 'none':
                         for protocol in ('http', 'https'):
                             http_f = f.copy()
                             del http_f['manifest_url']
@@ -2638,9 +2638,8 @@ class InfoExtractor(object):
                                 'url': http_url,
                                 'protocol': protocol,
                             })
-                            http_formats.append(http_f)
+                            formats.append(http_f)
                         i += 1
-                formats.extend(http_formats)
 
         return formats
 

From 00254473691185e567006da00524eedbee9597aa Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Thu, 3 Dec 2020 00:35:47 +0100
Subject: [PATCH 284/340] [gamespot] Extract DASH and HTTP formats

---
 youtube_dl/extractor/gamespot.py | 110 +++++++------------------------
 1 file changed, 25 insertions(+), 85 deletions(-)

diff --git a/youtube_dl/extractor/gamespot.py b/youtube_dl/extractor/gamespot.py
index 4236a5ed8..7a1beae3c 100644
--- a/youtube_dl/extractor/gamespot.py
+++ b/youtube_dl/extractor/gamespot.py
@@ -1,16 +1,7 @@
 from __future__ import unicode_literals
 
-import re
-
 from .once import OnceIE
-from ..compat import (
-    compat_urllib_parse_unquote,
-)
-from ..utils import (
-    unescapeHTML,
-    url_basename,
-    dict_get,
-)
+from ..compat import compat_urllib_parse_unquote
 
 
 class GameSpotIE(OnceIE):
@@ -24,17 +15,16 @@ class GameSpotIE(OnceIE):
             'title': 'Arma 3 - Community Guide: SITREP I',
             'description': 'Check out this video where some of the basics of Arma 3 is explained.',
         },
+        'skip': 'manifest URL give HTTP Error 404: Not Found',
     }, {
         'url': 'http://www.gamespot.com/videos/the-witcher-3-wild-hunt-xbox-one-now-playing/2300-6424837/',
+        'md5': '173ea87ad762cf5d3bf6163dceb255a6',
         'info_dict': {
             'id': 'gs-2300-6424837',
             'ext': 'mp4',
             'title': 'Now Playing - The Witcher 3: Wild Hunt',
             'description': 'Join us as we take a look at the early hours of The Witcher 3: Wild Hunt and more.',
         },
-        'params': {
-            'skip_download': True,  # m3u8 downloads
-        },
     }, {
         'url': 'https://www.gamespot.com/videos/embed/6439218/',
         'only_matching': True,
@@ -49,90 +39,40 @@ class GameSpotIE(OnceIE):
     def _real_extract(self, url):
         page_id = self._match_id(url)
         webpage = self._download_webpage(url, page_id)
-        data_video_json = self._search_regex(
-            r'data-video=["\'](.*?)["\']', webpage, 'data video')
-        data_video = self._parse_json(unescapeHTML(data_video_json), page_id)
+        data_video = self._parse_json(self._html_search_regex(
+            r'data-video=(["\'])({.*?})\1', webpage,
+            'video data', group=2), page_id)
+        title = compat_urllib_parse_unquote(data_video['title'])
         streams = data_video['videoStreams']
-
-        manifest_url = None
         formats = []
-        f4m_url = streams.get('f4m_stream')
-        if f4m_url:
-            manifest_url = f4m_url
-            formats.extend(self._extract_f4m_formats(
-                f4m_url + '?hdcore=3.7.0', page_id, f4m_id='hds', fatal=False))
-        m3u8_url = dict_get(streams, ('m3u8_stream', 'adaptive_stream'))
+
+        m3u8_url = streams.get('adaptive_stream')
         if m3u8_url:
-            manifest_url = m3u8_url
             m3u8_formats = self._extract_m3u8_formats(
                 m3u8_url, page_id, 'mp4', 'm3u8_native',
                 m3u8_id='hls', fatal=False)
-            formats.extend(m3u8_formats)
-        progressive_url = dict_get(
-            streams, ('progressive_hd', 'progressive_high', 'progressive_low', 'other_lr'))
-        if progressive_url and manifest_url:
-            qualities_basename = self._search_regex(
-                r'/([^/]+)\.csmil/',
-                manifest_url, 'qualities basename', default=None)
-            if qualities_basename:
-                QUALITIES_RE = r'((,\d+)+,?)'
-                qualities = self._search_regex(
-                    QUALITIES_RE, qualities_basename,
-                    'qualities', default=None)
-                if qualities:
-                    qualities = list(map(lambda q: int(q), qualities.strip(',').split(',')))
-                    qualities.sort()
-                    http_template = re.sub(QUALITIES_RE, r'%d', qualities_basename)
-                    http_url_basename = url_basename(progressive_url)
-                    if m3u8_formats:
-                        self._sort_formats(m3u8_formats)
-                        m3u8_formats = list(filter(
-                            lambda f: f.get('vcodec') != 'none', m3u8_formats))
-                    if len(qualities) == len(m3u8_formats):
-                        for q, m3u8_format in zip(qualities, m3u8_formats):
-                            f = m3u8_format.copy()
-                            f.update({
-                                'url': progressive_url.replace(
-                                    http_url_basename, http_template % q),
-                                'format_id': f['format_id'].replace('hls', 'http'),
-                                'protocol': 'http',
-                            })
-                            formats.append(f)
-                    else:
-                        for q in qualities:
-                            formats.append({
-                                'url': progressive_url.replace(
-                                    http_url_basename, http_template % q),
-                                'ext': 'mp4',
-                                'format_id': 'http-%d' % q,
-                                'tbr': q,
-                            })
+            for f in m3u8_formats:
+                formats.append(f)
+                http_f = f.copy()
+                del http_f['manifest_url']
+                http_f.update({
+                    'format_id': f['format_id'].replace('hls-', 'http-'),
+                    'protocol': 'http',
+                    'url': f['url'].replace('.m3u8', '.mp4'),
+                })
+                formats.append(http_f)
 
-        onceux_json = self._search_regex(
-            r'data-onceux-options=["\'](.*?)["\']', webpage, 'data video', default=None)
-        if onceux_json:
-            onceux_url = self._parse_json(unescapeHTML(onceux_json), page_id).get('metadataUri')
-            if onceux_url:
-                formats.extend(self._extract_once_formats(re.sub(
-                    r'https?://[^/]+', 'http://once.unicornmedia.com', onceux_url),
-                    http_formats_preference=-1))
+        mpd_url = streams.get('adaptive_dash')
+        if mpd_url:
+            formats.extend(self._extract_mpd_formats(
+                mpd_url, page_id, mpd_id='dash', fatal=False))
 
-        if not formats:
-            for quality in ['sd', 'hd']:
-                # It's actually a link to a flv file
-                flv_url = streams.get('f4m_{0}'.format(quality))
-                if flv_url is not None:
-                    formats.append({
-                        'url': flv_url,
-                        'ext': 'flv',
-                        'format_id': quality,
-                    })
         self._sort_formats(formats)
 
         return {
-            'id': data_video['guid'],
+            'id': data_video.get('guid') or page_id,
             'display_id': page_id,
-            'title': compat_urllib_parse_unquote(data_video['title']),
+            'title': title,
             'formats': formats,
             'description': self._html_search_meta('description', webpage),
             'thumbnail': self._og_search_thumbnail(webpage),

From beab2f88c9c83c2dba3d539f06133345542b7f03 Mon Sep 17 00:00:00 2001
From: Matthew Rayermann <matthew.rayermann@gmail.com>
Date: Wed, 2 Dec 2020 23:47:56 -0800
Subject: [PATCH 285/340] [nhk] Add audio clip test to NHK extractor (#27269)

---
 youtube_dl/extractor/nhk.py | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py
index de6a707c4..6a61a47d2 100644
--- a/youtube_dl/extractor/nhk.py
+++ b/youtube_dl/extractor/nhk.py
@@ -10,7 +10,7 @@ class NhkVodIE(InfoExtractor):
     # Content available only for a limited period of time. Visit
     # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
     _TESTS = [{
-        # clip
+        # video clip
         'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
         'md5': '256a1be14f48d960a7e61e2532d95ec3',
         'info_dict': {
@@ -21,6 +21,19 @@ class NhkVodIE(InfoExtractor):
             'timestamp': 1565965194,
             'upload_date': '20190816',
         },
+    }, {
+        # audio clip
+        'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',
+        'info_dict': {
+            'id': 'r_inventions-20201104-1-en',
+            'ext': 'm4a',
+            'title': "Japan's Top Inventions - Miniature Video Cameras",
+            'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
     }, {
         'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
         'only_matching': True,

From 1c78cb118c306541984c7f5e2c40fd4e3ed1c94c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 3 Dec 2020 23:25:36 +0700
Subject: [PATCH 286/340] [travis] Disable download jobs

Until youtube-dl OSS approval by Travis or moving to GitHub Actions
[ci skip]
---
 .travis.yml | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 51afd469a..d828d027d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,29 +12,29 @@ python:
 dist: trusty
 env:
   - YTDL_TEST_SET=core
-  - YTDL_TEST_SET=download
+#  - YTDL_TEST_SET=download
 jobs:
   include:
     - python: 3.7
       dist: xenial
       env: YTDL_TEST_SET=core
-    - python: 3.7
-      dist: xenial
-      env: YTDL_TEST_SET=download
+#    - python: 3.7
+#      dist: xenial
+#      env: YTDL_TEST_SET=download
     - python: 3.8
       dist: xenial
       env: YTDL_TEST_SET=core
-    - python: 3.8
-      dist: xenial
-      env: YTDL_TEST_SET=download
+#    - python: 3.8
+#      dist: xenial
+#      env: YTDL_TEST_SET=download
     - python: 3.8-dev
       dist: xenial
       env: YTDL_TEST_SET=core
-    - python: 3.8-dev
-      dist: xenial
-      env: YTDL_TEST_SET=download
+#    - python: 3.8-dev
+#      dist: xenial
+#      env: YTDL_TEST_SET=download
     - env: JYTHON=true; YTDL_TEST_SET=core
-    - env: JYTHON=true; YTDL_TEST_SET=download
+#    - env: JYTHON=true; YTDL_TEST_SET=download
     - name: flake8
       python: 3.8
       dist: xenial
@@ -42,9 +42,9 @@ jobs:
       script: flake8 .
   fast_finish: true
   allow_failures:
-    - env: YTDL_TEST_SET=download
+#    - env: YTDL_TEST_SET=download
     - env: JYTHON=true; YTDL_TEST_SET=core
-    - env: JYTHON=true; YTDL_TEST_SET=download
+#    - env: JYTHON=true; YTDL_TEST_SET=download
 before_install:
   - if [ "$JYTHON" == "true" ]; then ./devscripts/install_jython.sh; export PATH="$HOME/jython/bin:$PATH"; fi
 script: ./devscripts/run_tests.sh

From 2e4726423544041887641df18e2f4c4c5e3f4f6d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 4 Dec 2020 14:16:03 +0100
Subject: [PATCH 287/340] [zdf] extract webm formats(closes #26659)

---
 youtube_dl/extractor/zdf.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py
index 656864b2e..5ed2946c2 100644
--- a/youtube_dl/extractor/zdf.py
+++ b/youtube_dl/extractor/zdf.py
@@ -40,7 +40,7 @@ class ZDFBaseIE(InfoExtractor):
 
 class ZDFIE(ZDFBaseIE):
     _VALID_URL = r'https?://www\.zdf\.de/(?:[^/]+/)*(?P<id>[^/?]+)\.html'
-    _QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh')
+    _QUALITIES = ('auto', 'low', 'med', 'high', 'veryhigh', 'hd')
     _GEO_COUNTRIES = ['DE']
 
     _TESTS = [{
@@ -119,7 +119,7 @@ class ZDFIE(ZDFBaseIE):
         if not ptmd_path:
             ptmd_path = t[
                 'http://zdf.de/rels/streams/ptmd-template'].replace(
-                '{playerId}', 'portal')
+                '{playerId}', 'ngplayer_2_4')
 
         ptmd = self._call_api(
             urljoin(url, ptmd_path), player, url, video_id, 'metadata')

From c5636e9bcaf37f2190b1e7d73785fde399001c29 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 4 Dec 2020 23:38:42 +0700
Subject: [PATCH 288/340] [nrktv] Relax _VALID_URL (closes #27299, closes
 #26185)

---
 youtube_dl/extractor/nrk.py | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 4a395546f..0c4b126ed 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -146,7 +146,7 @@ class NRKTVIE(NRKBaseIE):
     _VALID_URL = r'''(?x)
                         https?://
                             (?:tv|radio)\.nrk(?:super)?\.no/
-                            (?:serie(?:/[^/]+){1,2}|program)/
+                            (?:serie(?:/[^/]+){1,}|program)/
                             (?![Ee]pisodes)%s
                             (?:/\d{2}-\d{2}-\d{4})?
                             (?:\#del=(?P<part_id>\d+))?
@@ -275,6 +275,9 @@ class NRKTVIE(NRKBaseIE):
     }, {
         'url': 'https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller',
         'only_matching': True,
+    }, {
+        'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315',
+        'only_matching': True,
     }]
 
     _api_host = None

From e91df0c5501bd5df9987e310a37df51129ee1cf2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Fri, 4 Dec 2020 23:56:50 +0700
Subject: [PATCH 289/340] [pornhub] Handle HTTP errors gracefully (closes
 #26414)

---
 youtube_dl/extractor/pornhub.py | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 69a9797aa..9ad92a8ec 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -31,7 +31,12 @@ class PornHubBaseIE(InfoExtractor):
         def dl(*args, **kwargs):
             return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs)
 
-        webpage, urlh = dl(*args, **kwargs)
+        ret = dl(*args, **kwargs)
+
+        if not ret:
+            return ret
+
+        webpage, urlh = ret
 
         if any(re.search(p, webpage) for p in (
                 r'<body\b[^>]+\bonload=["\']go\(\)',

From 5e95e18ce949e759e8e26de76c386c44e50b2abd Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 4 Dec 2020 18:04:38 +0100
Subject: [PATCH 290/340] [nrk] improve format extraction and geo-restriction
 detection (closes #24221)

---
 youtube_dl/extractor/nrk.py | 43 ++++++++++++++++++-------------------
 1 file changed, 21 insertions(+), 22 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 0c4b126ed..19d820f61 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -24,6 +24,11 @@ from ..utils import (
 class NRKBaseIE(InfoExtractor):
     _GEO_COUNTRIES = ['NO']
 
+    def _extract_nrk_formats(self, asset_url, video_id):
+        return self._extract_m3u8_formats(
+            re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url),
+            video_id, 'mp4', 'm3u8_native', fatal=False)
+
 
 class NRKIE(NRKBaseIE):
     _VALID_URL = r'''(?x)
@@ -94,9 +99,7 @@ class NRKIE(NRKBaseIE):
             if not format_url:
                 continue
             if asset.get('format') == 'HLS' or determine_ext(format_url) == 'm3u8':
-                formats.extend(self._extract_m3u8_formats(
-                    format_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                    m3u8_id='hls', fatal=False))
+                formats.extend(self._extract_nrk_formats(format_url, video_id))
         self._sort_formats(formats)
 
         data = self._download_json(
@@ -298,6 +301,7 @@ class NRKTVIE(NRKBaseIE):
         title = data.get('fullTitle') or data.get('mainTitle') or data['title']
         video_id = data.get('id') or video_id
 
+        urls = []
         entries = []
 
         conviva = data.get('convivaStatistics') or {}
@@ -314,19 +318,13 @@ class NRKTVIE(NRKBaseIE):
                         else ('%s-%d' % (video_id, idx), '%s (Part %d)' % (title, idx)))
             for num, asset in enumerate(media_assets, 1):
                 asset_url = asset.get('url')
-                if not asset_url:
+                if not asset_url or asset_url in urls:
                     continue
-                formats = self._extract_akamai_formats(asset_url, video_id)
+                formats = extract_nrk_formats(asset_url, video_id)
                 if not formats:
                     continue
                 self._sort_formats(formats)
 
-                # Some f4m streams may not work with hdcore in fragments' URLs
-                for f in formats:
-                    extra_param = f.get('extra_param_to_segment_url')
-                    if extra_param and 'hdcore' in extra_param:
-                        del f['extra_param_to_segment_url']
-
                 entry_id, entry_title = video_id_and_title(num)
                 duration = parse_duration(asset.get('duration'))
                 subtitles = {}
@@ -346,16 +344,17 @@ class NRKTVIE(NRKBaseIE):
 
         if not entries:
             media_url = data.get('mediaUrl')
-            if media_url:
-                formats = self._extract_akamai_formats(media_url, video_id)
-                self._sort_formats(formats)
-                duration = parse_duration(data.get('duration'))
-                entries = [{
-                    'id': video_id,
-                    'title': make_title(title),
-                    'duration': duration,
-                    'formats': formats,
-                }]
+            if media_url and media_url not in urls:
+                formats = extract_nrk_formats(media_url, video_id)
+                if formats:
+                    self._sort_formats(formats)
+                    duration = parse_duration(data.get('duration'))
+                    entries = [{
+                        'id': video_id,
+                        'title': make_title(title),
+                        'duration': duration,
+                        'formats': formats,
+                    }]
 
         if not entries:
             MESSAGES = {
@@ -366,7 +365,7 @@ class NRKTVIE(NRKBaseIE):
             }
             message_type = data.get('messageType', '')
             # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
-            if 'IsGeoBlocked' in message_type:
+            if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is Trues:
                 self.raise_geo_restricted(
                     msg=MESSAGES.get('ProgramIsGeoBlocked'),
                     countries=self._GEO_COUNTRIES)

From 957c65b9eaeaf181d76c6aa815a8f946c090129b Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 4 Dec 2020 18:05:27 +0100
Subject: [PATCH 291/340] [nrk] fix typo

---
 youtube_dl/extractor/nrk.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 19d820f61..0f69579c5 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -365,7 +365,7 @@ class NRKTVIE(NRKBaseIE):
             }
             message_type = data.get('messageType', '')
             # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
-            if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is Trues:
+            if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is True:
                 self.raise_geo_restricted(
                     msg=MESSAGES.get('ProgramIsGeoBlocked'),
                     countries=self._GEO_COUNTRIES)

From fe0c28f9569bb99f3bddd59b7f4b2be919368f22 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Fri, 4 Dec 2020 18:08:08 +0100
Subject: [PATCH 292/340] [nrk] fix call to moved method

---
 youtube_dl/extractor/nrk.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 0f69579c5..8595f55b1 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -320,7 +320,7 @@ class NRKTVIE(NRKBaseIE):
                 asset_url = asset.get('url')
                 if not asset_url or asset_url in urls:
                     continue
-                formats = extract_nrk_formats(asset_url, video_id)
+                formats = self._extract_nrk_formats(asset_url, video_id)
                 if not formats:
                     continue
                 self._sort_formats(formats)
@@ -345,7 +345,7 @@ class NRKTVIE(NRKBaseIE):
         if not entries:
             media_url = data.get('mediaUrl')
             if media_url and media_url not in urls:
-                formats = extract_nrk_formats(media_url, video_id)
+                formats = self._extract_nrk_formats(media_url, video_id)
                 if formats:
                     self._sort_formats(formats)
                     duration = parse_duration(data.get('duration'))

From 41c92b8d02c33d6018991f9e5d3a9b616e325355 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 01:19:37 +0700
Subject: [PATCH 293/340] [nrktv:season] Improve extraction

---
 youtube_dl/extractor/nrk.py | 99 +++++++++++++++++++++++++++++++------
 1 file changed, 83 insertions(+), 16 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 8595f55b1..4d5f4c5ba 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import itertools
 import re
 
 from .common import InfoExtractor
@@ -17,6 +18,7 @@ from ..utils import (
     parse_age_limit,
     parse_duration,
     try_get,
+    urljoin,
     url_or_none,
 )
 
@@ -547,44 +549,109 @@ class NRKTVSerieBaseIE(InfoExtractor):
             return []
         entries = []
         for episode in entry_list:
-            nrk_id = episode.get('prfId')
+            nrk_id = episode.get('prfId') or episode.get('episodeId')
             if not nrk_id or not isinstance(nrk_id, compat_str):
                 continue
+            if not re.match(NRKTVIE._EPISODE_RE, nrk_id):
+                continue
             entries.append(self.url_result(
                 'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id))
         return entries
 
 
 class NRKTVSeasonIE(NRKTVSerieBaseIE):
-    _VALID_URL = r'https?://tv\.nrk\.no/serie/[^/]+/sesong/(?P<id>\d+)'
-    _TEST = {
+    _VALID_URL = r'https?://(?P<domain>tv|radio)\.nrk\.no/serie/(?P<serie>[^/]+)/(?:sesong/)?(?P<id>\d+)'
+    _TESTS = [{
         'url': 'https://tv.nrk.no/serie/backstage/sesong/1',
         'info_dict': {
-            'id': '1',
+            'id': 'backstage/1',
             'title': 'Sesong 1',
         },
         'playlist_mincount': 30,
-    }
+    }, {
+        # no /sesong/ in path
+        'url': 'https://tv.nrk.no/serie/lindmo/2016',
+        'info_dict': {
+            'id': 'lindmo/2016',
+            'title': '2016',
+        },
+        'playlist_mincount': 29,
+    }, {
+        # weird nested _embedded in catalog JSON response
+        'url': 'https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1',
+        'info_dict': {
+            'id': 'dickie-dick-dickens/1',
+            'title': 'Sesong 1',
+        },
+        'playlist_mincount': 11,
+    }, {
+        # 841 entries, multi page
+        'url': 'https://radio.nrk.no/serie/dagsnytt/sesong/201509',
+        'info_dict': {
+            'id': 'dagsnytt/201509',
+            'title': 'September 2015',
+        },
+        'playlist_mincount': 841,
+    }, {
+        # 180 entries, single page
+        'url': 'https://tv.nrk.no/serie/spangas/sesong/1',
+        'only_matching': True,
+    }]
 
     @classmethod
     def suitable(cls, url):
         return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url)
                 else super(NRKTVSeasonIE, cls).suitable(url))
 
+    _ASSETS_KEYS = ('episodes', 'instalments',)
+
+    def _entries(self, data, display_id):
+        for page_num in itertools.count(1):
+            embedded = data.get('_embedded')
+            if not isinstance(embedded, dict):
+                break
+            # Extract entries
+            for asset_key in self._ASSETS_KEYS:
+                entries = try_get(
+                    embedded,
+                    (lambda x: x[asset_key]['_embedded'][asset_key],
+                     lambda x: x[asset_key]),
+                    list)
+                for e in self._extract_entries(entries):
+                    yield e
+            # Find next URL
+            for asset_key in self._ASSETS_KEYS:
+                next_url = urljoin(
+                    'https://psapi.nrk.no/',
+                    try_get(
+                        data,
+                        (lambda x: x['_links']['next']['href'],
+                         lambda x: x['_embedded'][asset_key]['_links']['next']['href']),
+                        compat_str))
+                if next_url:
+                    break
+            if not next_url:
+                break
+            data = self._download_json(
+                next_url, display_id,
+                'Downloading season JSON page %d' % page_num, fatal=False)
+            if not data:
+                break
+
     def _real_extract(self, url):
-        display_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        domain = mobj.group('domain')
+        serie = mobj.group('serie')
+        season_id = mobj.group('id')
+        display_id = '%s/%s' % (serie, season_id)
 
-        webpage = self._download_webpage(url, display_id)
-
-        series = self._extract_series(webpage, display_id)
-
-        season = next(
-            s for s in series['seasons']
-            if int(display_id) == s.get('seasonNumber'))
-
-        title = try_get(season, lambda x: x['titles']['title'], compat_str)
+        data = self._download_json(
+            'https://psapi.nrk.no/%s/catalog/series/%s/seasons/%s'
+            % (domain, serie, season_id), display_id, query={'pageSize': 50})
+        title = try_get(data, lambda x: x['titles']['title'], compat_str) or display_id
         return self.playlist_result(
-            self._extract_episodes(season), display_id, title)
+            self._entries(data, display_id),
+            display_id, title)
 
 
 class NRKTVSeriesIE(NRKTVSerieBaseIE):

From 02b04785ee22036e53e7883ba3217d17cc181916 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 02:21:07 +0700
Subject: [PATCH 294/340] [nrktv:series] Improve extraction

---
 youtube_dl/extractor/nrk.py | 138 +++++++++++++++++++++---------------
 1 file changed, 82 insertions(+), 56 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 4d5f4c5ba..7cfbe7856 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -558,6 +558,46 @@ class NRKTVSerieBaseIE(InfoExtractor):
                 'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id))
         return entries
 
+    _ASSETS_KEYS = ('episodes', 'instalments',)
+
+    def _extract_assets_key(self, embedded):
+        for asset_key in self._ASSETS_KEYS:
+            if embedded.get(asset_key):
+                return asset_key
+
+    def _entries(self, data, display_id):
+        for page_num in itertools.count(1):
+            embedded = data.get('_embedded')
+            if not isinstance(embedded, dict):
+                break
+            assets_key = self._extract_assets_key(embedded)
+            if not assets_key:
+                break
+            # Extract entries
+            entries = try_get(
+                embedded,
+                (lambda x: x[assets_key]['_embedded'][assets_key],
+                 lambda x: x[assets_key]),
+                list)
+            for e in self._extract_entries(entries):
+                yield e
+            # Find next URL
+            next_url = urljoin(
+                'https://psapi.nrk.no/',
+                try_get(
+                    data,
+                    (lambda x: x['_links']['next']['href'],
+                     lambda x: x['_embedded'][assets_key]['_links']['next']['href']),
+                    compat_str))
+            if not next_url:
+                break
+            data = self._download_json(
+                next_url, display_id,
+                'Downloading %s JSON page %d' % (assets_key, page_num),
+                fatal=False)
+            if not data:
+                break
+
 
 class NRKTVSeasonIE(NRKTVSerieBaseIE):
     _VALID_URL = r'https?://(?P<domain>tv|radio)\.nrk\.no/serie/(?P<serie>[^/]+)/(?:sesong/)?(?P<id>\d+)'
@@ -603,41 +643,6 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
         return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url)
                 else super(NRKTVSeasonIE, cls).suitable(url))
 
-    _ASSETS_KEYS = ('episodes', 'instalments',)
-
-    def _entries(self, data, display_id):
-        for page_num in itertools.count(1):
-            embedded = data.get('_embedded')
-            if not isinstance(embedded, dict):
-                break
-            # Extract entries
-            for asset_key in self._ASSETS_KEYS:
-                entries = try_get(
-                    embedded,
-                    (lambda x: x[asset_key]['_embedded'][asset_key],
-                     lambda x: x[asset_key]),
-                    list)
-                for e in self._extract_entries(entries):
-                    yield e
-            # Find next URL
-            for asset_key in self._ASSETS_KEYS:
-                next_url = urljoin(
-                    'https://psapi.nrk.no/',
-                    try_get(
-                        data,
-                        (lambda x: x['_links']['next']['href'],
-                         lambda x: x['_embedded'][asset_key]['_links']['next']['href']),
-                        compat_str))
-                if next_url:
-                    break
-            if not next_url:
-                break
-            data = self._download_json(
-                next_url, display_id,
-                'Downloading season JSON page %d' % page_num, fatal=False)
-            if not data:
-                break
-
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
         domain = mobj.group('domain')
@@ -648,6 +653,7 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
         data = self._download_json(
             'https://psapi.nrk.no/%s/catalog/series/%s/seasons/%s'
             % (domain, serie, season_id), display_id, query={'pageSize': 50})
+
         title = try_get(data, lambda x: x['titles']['title'], compat_str) or display_id
         return self.playlist_result(
             self._entries(data, display_id),
@@ -655,26 +661,9 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
 
 
 class NRKTVSeriesIE(NRKTVSerieBaseIE):
-    _VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
+    _VALID_URL = r'https?://(?P<domain>tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
     _ITEM_RE = r'(?:data-season=["\']|id=["\']season-)(?P<id>\d+)'
     _TESTS = [{
-        'url': 'https://tv.nrk.no/serie/blank',
-        'info_dict': {
-            'id': 'blank',
-            'title': 'Blank',
-            'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e',
-        },
-        'playlist_mincount': 30,
-    }, {
-        # new layout, seasons
-        'url': 'https://tv.nrk.no/serie/backstage',
-        'info_dict': {
-            'id': 'backstage',
-            'title': 'Backstage',
-            'description': 'md5:c3ec3a35736fca0f9e1207b5511143d3',
-        },
-        'playlist_mincount': 60,
-    }, {
         # new layout, instalments
         'url': 'https://tv.nrk.no/serie/groenn-glede',
         'info_dict': {
@@ -682,7 +671,30 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
             'title': 'Grønn glede',
             'description': 'md5:7576e92ae7f65da6993cf90ee29e4608',
         },
-        'playlist_mincount': 10,
+        'playlist_mincount': 90,
+    }, {
+        # new layout, instalments, more entries
+        'url': 'https://tv.nrk.no/serie/lindmo',
+        'only_matching': True,
+    }, {
+        'url': 'https://tv.nrk.no/serie/blank',
+        'info_dict': {
+            'id': 'blank',
+            'title': 'Blank',
+            'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e',
+        },
+        'playlist_mincount': 30,
+        'expected_warnings': ['HTTP Error 404: Not Found'],
+    }, {
+        # new layout, seasons
+        'url': 'https://tv.nrk.no/serie/backstage',
+        'info_dict': {
+            'id': 'backstage',
+            'title': 'Backstage',
+            'description': 'md5:63692ceb96813d9a207e9910483d948b',
+        },
+        'playlist_mincount': 60,
+        'expected_warnings': ['HTTP Error 404: Not Found'],
     }, {
         # old layout
         'url': 'https://tv.nrksuper.no/serie/labyrint',
@@ -711,16 +723,30 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
             else super(NRKTVSeriesIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        series_id = self._match_id(url)
+        mobj = re.match(self._VALID_URL, url)
+        domain = mobj.group('domain')
+        series_id = mobj.group('id')
+
+        title = description = None
 
         webpage = self._download_webpage(url, series_id)
 
-        # New layout (e.g. https://tv.nrk.no/serie/backstage)
         series = self._extract_series(webpage, series_id, fatal=False)
         if series:
             title = try_get(series, lambda x: x['titles']['title'], compat_str)
             description = try_get(
                 series, lambda x: x['titles']['subtitle'], compat_str)
+
+        data = self._download_json(
+            'https://psapi.nrk.no/%s/catalog/series/%s/instalments'
+            % (domain, series_id), series_id, query={'pageSize': 50},
+            fatal=False)
+        if data:
+            return self.playlist_result(
+                self._entries(data, series_id), series_id, title, description)
+
+        # New layout (e.g. https://tv.nrk.no/serie/backstage)
+        if series:
             entries = []
             entries.extend(self._extract_seasons(series.get('seasons')))
             entries.extend(self._extract_entries(series.get('instalments')))

From 5d769860c32cd3c5a4719a46a8c11863d3ce0bc5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 02:43:33 +0700
Subject: [PATCH 295/340] [nrktv:series] Improve extraction (closes #21926)

---
 youtube_dl/extractor/nrk.py | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 7cfbe7856..4a82b11fd 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -521,7 +521,8 @@ class NRKTVSerieBaseIE(InfoExtractor):
         config = self._parse_json(
             self._search_regex(
                 (r'INITIAL_DATA(?:_V\d)?_*\s*=\s*({.+?})\s*;',
-                 r'({.+?})\s*,\s*"[^"]+"\s*\)\s*</script>'),
+                 r'({.+?})\s*,\s*"[^"]+"\s*\)\s*</script>',
+                 r'PRELOADED_STATE_*\s*=\s*({.+?})\s*\n'),
                 webpage, 'config', default='{}' if not fatal else NO_DEFAULT),
             display_id, fatal=False, transform_source=js_to_json)
         if not config:
@@ -531,12 +532,26 @@ class NRKTVSerieBaseIE(InfoExtractor):
             (lambda x: x['initialState']['series'], lambda x: x['series']),
             dict)
 
-    def _extract_seasons(self, seasons):
+    def _extract_seasons(self, domain, series_id, seasons):
+        if isinstance(seasons, dict):
+            seasons = seasons.get('seasons')
         if not isinstance(seasons, list):
             return []
         entries = []
         for season in seasons:
-            entries.extend(self._extract_episodes(season))
+            if not isinstance(season, dict):
+                continue
+            episodes = self._extract_episodes(season)
+            if episodes:
+                entries.extend(episodes)
+                continue
+            season_name = season.get('name')
+            if season_name and isinstance(season_name, compat_str):
+                entries.append(self.url_result(
+                    'https://%s.nrk.no/serie/%s/sesong/%s'
+                    % (domain, series_id, season_name),
+                    ie=NRKTVSeasonIE.ie_key(),
+                    video_title=season.get('title')))
         return entries
 
     def _extract_episodes(self, season):
@@ -713,6 +728,13 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
     }, {
         'url': 'https://tv.nrk.no/serie/postmann-pat',
         'only_matching': True,
+    }, {
+        'url': 'https://radio.nrk.no/serie/dickie-dick-dickens',
+        'info_dict': {
+            'id': 'dickie-dick-dickens',
+        },
+        'playlist_mincount': 8,
+        'expected_warnings': ['HTTP Error 404: Not Found'],
     }]
 
     @classmethod
@@ -748,7 +770,7 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
         # New layout (e.g. https://tv.nrk.no/serie/backstage)
         if series:
             entries = []
-            entries.extend(self._extract_seasons(series.get('seasons')))
+            entries.extend(self._extract_seasons(domain, series_id, series.get('seasons')))
             entries.extend(self._extract_entries(series.get('instalments')))
             entries.extend(self._extract_episodes(series.get('extraMaterial')))
             return self.playlist_result(entries, series_id, title, description)

From e8c0af04b79f84ab3b697498423f8dc27caca24a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 02:46:28 +0700
Subject: [PATCH 296/340] [nrktv] Relax _VALID_URL

---
 youtube_dl/extractor/nrk.py | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 4a82b11fd..08e331893 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -148,14 +148,7 @@ class NRKIE(NRKBaseIE):
 class NRKTVIE(NRKBaseIE):
     IE_DESC = 'NRK TV and NRK Radio'
     _EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
-    _VALID_URL = r'''(?x)
-                        https?://
-                            (?:tv|radio)\.nrk(?:super)?\.no/
-                            (?:serie(?:/[^/]+){1,}|program)/
-                            (?![Ee]pisodes)%s
-                            (?:/\d{2}-\d{2}-\d{4})?
-                            (?:\#del=(?P<part_id>\d+))?
-                    ''' % _EPISODE_RE
+    _VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s' % _EPISODE_RE
     _API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
     _TESTS = [{
         'url': 'https://tv.nrk.no/program/MDDP12000117',

From c67b33888f4b8ea21189a966f087f795c6ea52bc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 03:00:10 +0700
Subject: [PATCH 297/340] [nrk] Improve error extraction

---
 youtube_dl/extractor/nrk.py | 36 ++++++++++++++++++++----------------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 08e331893..f5e964753 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -31,6 +31,22 @@ class NRKBaseIE(InfoExtractor):
             re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url),
             video_id, 'mp4', 'm3u8_native', fatal=False)
 
+    def _raise_error(self, data):
+        MESSAGES = {
+            'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
+            'ProgramRightsHasExpired': 'Programmet har gått ut',
+            'NoProgramRights': 'Ikke tilgjengelig',
+            'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
+        }
+        message_type = data.get('messageType', '')
+        # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
+        if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is True:
+            self.raise_geo_restricted(
+                msg=MESSAGES.get('ProgramIsGeoBlocked'),
+                countries=self._GEO_COUNTRIES)
+        message = data.get('endUserMessage') or MESSAGES.get(message_type, message_type)
+        raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
+
 
 class NRKIE(NRKBaseIE):
     _VALID_URL = r'''(?x)
@@ -89,6 +105,9 @@ class NRKIE(NRKBaseIE):
             'http://psapi.nrk.no/playback/manifest/%s' % video_id,
             video_id, 'Downloading manifest JSON')
 
+        if manifest.get('playability') == 'nonPlayable':
+            self._raise_error(manifest['nonPlayable'])
+
         playable = manifest['playable']
 
         formats = []
@@ -352,22 +371,7 @@ class NRKTVIE(NRKBaseIE):
                     }]
 
         if not entries:
-            MESSAGES = {
-                'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet',
-                'ProgramRightsHasExpired': 'Programmet har gått ut',
-                'NoProgramRights': 'Ikke tilgjengelig',
-                'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
-            }
-            message_type = data.get('messageType', '')
-            # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
-            if 'IsGeoBlocked' in message_type or try_get(data, lambda x: x['usageRights']['isGeoBlocked']) is True:
-                self.raise_geo_restricted(
-                    msg=MESSAGES.get('ProgramIsGeoBlocked'),
-                    countries=self._GEO_COUNTRIES)
-            raise ExtractorError(
-                '%s said: %s' % (self.IE_NAME, MESSAGES.get(
-                    message_type, message_type)),
-                expected=True)
+            self._raise_error(data)
 
         series = conviva.get('seriesName') or data.get('seriesTitle')
         episode = conviva.get('episodeName') or data.get('episodeNumberOrDate')

From 58f7ada235801aae3fdcb959df04a4bfdb790415 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 03:24:56 +0700
Subject: [PATCH 298/340] [teachable:course] Improve extraction (closes #24507,
 closes #27286)

---
 youtube_dl/extractor/teachable.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/teachable.py b/youtube_dl/extractor/teachable.py
index a75369dbe..6f264bddc 100644
--- a/youtube_dl/extractor/teachable.py
+++ b/youtube_dl/extractor/teachable.py
@@ -269,7 +269,7 @@ class TeachableCourseIE(TeachableBaseIE):
                 r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)',
                 webpage):
             li = mobj.group('li')
-            if 'fa-youtube-play' not in li:
+            if 'fa-youtube-play' not in li and not re.search(r'\d{1,2}:\d{2}', li):
                 continue
             lecture_url = self._search_regex(
                 r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li,

From e7f93fbd85549896102fe4e870ec840046f9025a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 03:31:07 +0700
Subject: [PATCH 299/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 65406eafd..aaca9748b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+version <unreleased>
+
+Core
+* [extractor/common] Improve Akamai HTTP format extraction
+    * Allow m3u8 manifest without an additional audio format
+    * Fix extraction for qualities starting with a number
+
+Extractors
+* [teachable:course] Improve extraction (#24507, #27286)
+* [nrk] Improve error extraction
+* [nrktv:series] Improve extraction (#21926)
+* [nrktv:season] Improve extraction
+* [nrk] Improve format extraction and geo-restriction detection (#24221)
+* [pornhub] Handle HTTP errors gracefully (#26414)
+* [nrktv] Relax URL regular expression (#27299, #26185)
++ [zdf] Extract webm formats (#26659)
++ [gamespot] Extract DASH and HTTP formats
++ [tver] Add support for tver.jp (#26662, #27284)
++ [pornhub] Add support for pornhub.org (#27276)
+
+
 version 2020.12.02
 
 Extractors

From e1476196692131fa7218f9cf91571b40bb4facb8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 03:40:00 +0700
Subject: [PATCH 300/340] release 2020.12.05

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 3 +++
 youtube_dl/version.py                            | 2 +-
 8 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 5f0e33eaf..4cbf763be 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.02
+ [debug] youtube-dl version 2020.12.05
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 631c60958..5e20c5505 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 19d1b53e2..23c309dc0 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 6d2f3e16d..43363cfa0 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.02
+ [debug] youtube-dl version 2020.12.05
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 82009b505..8b03ed273 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.02. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.02**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index aaca9748b..d9b8336ab 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.12.05
 
 Core
 * [extractor/common] Improve Akamai HTTP format extraction
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index e5b470041..651a754d9 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -308,6 +308,7 @@
  - **FrontendMasters**
  - **FrontendMastersCourse**
  - **FrontendMastersLesson**
+ - **FujiTVFODPlus7**
  - **Funimation**
  - **Funk**
  - **Fusion**
@@ -710,6 +711,7 @@
  - **qqmusic:singer**: QQ音乐 - 歌手
  - **qqmusic:toplist**: QQ音乐 - 排行榜
  - **QuantumTV**
+ - **Qub**
  - **Quickline**
  - **QuicklineLive**
  - **R7**
@@ -952,6 +954,7 @@
  - **TVANouvellesArticle**
  - **TVC**
  - **TVCArticle**
+ - **TVer**
  - **tvigle**: Интернет-телевидение Tvigle.ru
  - **tvland.com**
  - **TVN24**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 0b0a6c785..9e876731b 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.12.02'
+__version__ = '2020.12.05'

From 51579d87e4435d5e661329dcefd0422d263bb56b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 04:41:33 +0700
Subject: [PATCH 301/340] [peertube] Recognize audio-only formats (closes
 #27295)

---
 youtube_dl/extractor/peertube.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py
index 48fb95416..2d00791d2 100644
--- a/youtube_dl/extractor/peertube.py
+++ b/youtube_dl/extractor/peertube.py
@@ -541,6 +541,8 @@ class PeerTubeIE(InfoExtractor):
                 'format_id': format_id,
                 'filesize': file_size,
             })
+            if format_id == '0p':
+                f['vcodec'] = 'none'
             formats.append(f)
         self._sort_formats(formats)
 

From 13ec444a982fa0c3b2e61666792069d35111ff31 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sat, 5 Dec 2020 04:45:30 +0700
Subject: [PATCH 302/340] [peertube] Extract fps

---
 youtube_dl/extractor/peertube.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/youtube_dl/extractor/peertube.py b/youtube_dl/extractor/peertube.py
index 2d00791d2..c39d12728 100644
--- a/youtube_dl/extractor/peertube.py
+++ b/youtube_dl/extractor/peertube.py
@@ -543,6 +543,8 @@ class PeerTubeIE(InfoExtractor):
             })
             if format_id == '0p':
                 f['vcodec'] = 'none'
+            else:
+                f['fps'] = int_or_none(file_.get('fps'))
             formats.append(f)
         self._sort_formats(formats)
 

From 1b26bfd425eaf5f7e314a6ebc810d62e8608ce5a Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 5 Dec 2020 00:32:59 +0100
Subject: [PATCH 303/340] [nrk] improve extraction

- improve format extraction for old akamai formats
- update some of the tests
- add is_live value to entry info dict
- request instalments only when their available
- fix skole extraction
---
 youtube_dl/extractor/nrk.py | 252 ++++++++++++------------------------
 1 file changed, 81 insertions(+), 171 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index f5e964753..8b31a6ad2 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -13,8 +13,6 @@ from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
-    js_to_json,
-    NO_DEFAULT,
     parse_age_limit,
     parse_duration,
     try_get,
@@ -24,9 +22,10 @@ from ..utils import (
 
 
 class NRKBaseIE(InfoExtractor):
-    _GEO_COUNTRIES = ['NO']
-
     def _extract_nrk_formats(self, asset_url, video_id):
+        if re.match(r'https?://[^/]+\.akamaihd\.net/i/', asset_url):
+            return self._extract_akamai_formats(
+                re.sub(r'(?:b=\d+-\d+|__a__=off)&?', '', asset_url), video_id)
         return self._extract_m3u8_formats(
             re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url),
             video_id, 'mp4', 'm3u8_native', fatal=False)
@@ -47,6 +46,12 @@ class NRKBaseIE(InfoExtractor):
         message = data.get('endUserMessage') or MESSAGES.get(message_type, message_type)
         raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
 
+    def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None):
+        return self._download_json(
+            urljoin('http://psapi.nrk.no/', path),
+            video_id, note or 'Downloading %s JSON' % item,
+            fatal=fatal, query=query)
+
 
 class NRKIE(NRKBaseIE):
     _VALID_URL = r'''(?x)
@@ -64,7 +69,7 @@ class NRKIE(NRKBaseIE):
     _TESTS = [{
         # video
         'url': 'http://www.nrk.no/video/PS*150533',
-        'md5': '706f34cdf1322577589e369e522b50ef',
+        'md5': 'f46be075326e23ad0e524edfcb06aeb6',
         'info_dict': {
             'id': '150533',
             'ext': 'mp4',
@@ -78,7 +83,7 @@ class NRKIE(NRKBaseIE):
         # MD5 is unstable
         'info_dict': {
             'id': '154915',
-            'ext': 'flv',
+            'ext': 'mp4',
             'title': 'Slik høres internett ut når du er blind',
             'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
             'duration': 20,
@@ -101,9 +106,9 @@ class NRKIE(NRKBaseIE):
     }]
 
     def _extract_from_playback(self, video_id):
-        manifest = self._download_json(
-            'http://psapi.nrk.no/playback/manifest/%s' % video_id,
-            video_id, 'Downloading manifest JSON')
+        path_templ = 'playback/%s/' + video_id
+        call_playback_api = lambda x: self._call_api(path_templ % x, video_id, x)
+        manifest = call_playback_api('manifest')
 
         if manifest.get('playability') == 'nonPlayable':
             self._raise_error(manifest['nonPlayable'])
@@ -123,9 +128,7 @@ class NRKIE(NRKBaseIE):
                 formats.extend(self._extract_nrk_formats(format_url, video_id))
         self._sort_formats(formats)
 
-        data = self._download_json(
-            'http://psapi.nrk.no/playback/metadata/%s' % video_id,
-            video_id, 'Downloading metadata JSON')
+        data = call_playback_api('metadata')
 
         preplay = data['preplay']
         titles = preplay['titles']
@@ -171,18 +174,18 @@ class NRKTVIE(NRKBaseIE):
     _API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
     _TESTS = [{
         'url': 'https://tv.nrk.no/program/MDDP12000117',
-        'md5': '8270824df46ec629b66aeaa5796b36fb',
+        'md5': 'c4a5960f1b00b40d47db65c1064e0ab1',
         'info_dict': {
             'id': 'MDDP12000117AA',
             'ext': 'mp4',
             'title': 'Alarm Trolltunga',
             'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
-            'duration': 2223,
+            'duration': 2223.44,
             'age_limit': 6,
         },
     }, {
         'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
-        'md5': '9a167e54d04671eb6317a37b7bc8a280',
+        'md5': '8d40dab61cea8ab0114e090b029a0565',
         'info_dict': {
             'id': 'MUHH48000314AA',
             'ext': 'mp4',
@@ -200,7 +203,7 @@ class NRKTVIE(NRKBaseIE):
             'ext': 'mp4',
             'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
             'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
-            'duration': 4605,
+            'duration': 4605.08,
             'series': 'Kunnskapskanalen',
             'episode': '24.05.2014',
         },
@@ -223,39 +226,13 @@ class NRKTVIE(NRKBaseIE):
         'skip': 'particular part is not supported currently',
     }, {
         'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
-        'playlist': [{
-            'info_dict': {
-                'id': 'MSPO40010515AH',
-                'ext': 'mp4',
-                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 1)',
-                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
-                'duration': 772,
-                'series': 'Tour de Ski',
-                'episode': '06.01.2015',
-            },
-            'params': {
-                'skip_download': True,
-            },
-        }, {
-            'info_dict': {
-                'id': 'MSPO40010515BH',
-                'ext': 'mp4',
-                'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015 (Part 2)',
-                'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
-                'duration': 6175,
-                'series': 'Tour de Ski',
-                'episode': '06.01.2015',
-            },
-            'params': {
-                'skip_download': True,
-            },
-        }],
         'info_dict': {
-            'id': 'MSPO40010515',
+            'id': 'MSPO40010515AH',
+            'ext': 'mp4',
             'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
-            'description': 'md5:1f97a41f05a9486ee00c56f35f82993d',
+            'description': 'md5:c03aba1e917561eface5214020551b7a',
         },
-        'expected_warnings': ['Video is geo restricted'],
+        'skip': 'Video is geo restricted',
     }, {
         'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
         'info_dict': {
@@ -286,6 +263,7 @@ class NRKTVIE(NRKBaseIE):
         'params': {
             'skip_download': True,
         },
+        'skip': 'ProgramRightsHasExpired',
     }, {
         'url': 'https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#',
         'only_matching': True,
@@ -354,6 +332,7 @@ class NRKTVIE(NRKBaseIE):
                     'duration': duration,
                     'subtitles': subtitles,
                     'formats': formats,
+                    'is_live': live,
                 })
 
         if not entries:
@@ -368,6 +347,7 @@ class NRKTVIE(NRKBaseIE):
                         'title': make_title(title),
                         'duration': duration,
                         'formats': formats,
+                        'is_live': live,
                     }]
 
         if not entries:
@@ -513,49 +493,7 @@ class NRKTVEpisodeIE(InfoExtractor):
         return info
 
 
-class NRKTVSerieBaseIE(InfoExtractor):
-    def _extract_series(self, webpage, display_id, fatal=True):
-        config = self._parse_json(
-            self._search_regex(
-                (r'INITIAL_DATA(?:_V\d)?_*\s*=\s*({.+?})\s*;',
-                 r'({.+?})\s*,\s*"[^"]+"\s*\)\s*</script>',
-                 r'PRELOADED_STATE_*\s*=\s*({.+?})\s*\n'),
-                webpage, 'config', default='{}' if not fatal else NO_DEFAULT),
-            display_id, fatal=False, transform_source=js_to_json)
-        if not config:
-            return
-        return try_get(
-            config,
-            (lambda x: x['initialState']['series'], lambda x: x['series']),
-            dict)
-
-    def _extract_seasons(self, domain, series_id, seasons):
-        if isinstance(seasons, dict):
-            seasons = seasons.get('seasons')
-        if not isinstance(seasons, list):
-            return []
-        entries = []
-        for season in seasons:
-            if not isinstance(season, dict):
-                continue
-            episodes = self._extract_episodes(season)
-            if episodes:
-                entries.extend(episodes)
-                continue
-            season_name = season.get('name')
-            if season_name and isinstance(season_name, compat_str):
-                entries.append(self.url_result(
-                    'https://%s.nrk.no/serie/%s/sesong/%s'
-                    % (domain, series_id, season_name),
-                    ie=NRKTVSeasonIE.ie_key(),
-                    video_title=season.get('title')))
-        return entries
-
-    def _extract_episodes(self, season):
-        if not isinstance(season, dict):
-            return []
-        return self._extract_entries(season.get('episodes'))
-
+class NRKTVSerieBaseIE(NRKBaseIE):
     def _extract_entries(self, entry_list):
         if not isinstance(entry_list, list):
             return []
@@ -579,7 +517,7 @@ class NRKTVSerieBaseIE(InfoExtractor):
 
     def _entries(self, data, display_id):
         for page_num in itertools.count(1):
-            embedded = data.get('_embedded')
+            embedded = data.get('_embedded') or data
             if not isinstance(embedded, dict):
                 break
             assets_key = self._extract_assets_key(embedded)
@@ -594,18 +532,16 @@ class NRKTVSerieBaseIE(InfoExtractor):
             for e in self._extract_entries(entries):
                 yield e
             # Find next URL
-            next_url = urljoin(
-                'https://psapi.nrk.no/',
-                try_get(
-                    data,
-                    (lambda x: x['_links']['next']['href'],
-                     lambda x: x['_embedded'][assets_key]['_links']['next']['href']),
-                    compat_str))
-            if not next_url:
+            next_url_path = try_get(
+                data,
+                (lambda x: x['_links']['next']['href'],
+                 lambda x: x['_embedded'][assets_key]['_links']['next']['href']),
+                compat_str)
+            if not next_url_path:
                 break
-            data = self._download_json(
-                next_url, display_id,
-                'Downloading %s JSON page %d' % (assets_key, page_num),
+            data = self._call_api(
+                next_url_path, display_id,
+                note='Downloading %s JSON page %d' % (assets_key, page_num),
                 fatal=False)
             if not data:
                 break
@@ -656,15 +592,12 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
                 else super(NRKTVSeasonIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        domain = mobj.group('domain')
-        serie = mobj.group('serie')
-        season_id = mobj.group('id')
+        domain, serie, season_id = re.match(self._VALID_URL, url).groups()
         display_id = '%s/%s' % (serie, season_id)
 
-        data = self._download_json(
-            'https://psapi.nrk.no/%s/catalog/series/%s/seasons/%s'
-            % (domain, serie, season_id), display_id, query={'pageSize': 50})
+        data = self._call_api(
+            '%s/catalog/series/%s/seasons/%s' % (domain, serie, season_id),
+            display_id, 'season', query={'pageSize': 50})
 
         title = try_get(data, lambda x: x['titles']['title'], compat_str) or display_id
         return self.playlist_result(
@@ -673,8 +606,7 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
 
 
 class NRKTVSeriesIE(NRKTVSerieBaseIE):
-    _VALID_URL = r'https?://(?P<domain>tv|radio)\.nrk(?:super)?\.no/serie/(?P<id>[^/]+)'
-    _ITEM_RE = r'(?:data-season=["\']|id=["\']season-)(?P<id>\d+)'
+    _VALID_URL = r'https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/serie/(?P<id>[^/]+)'
     _TESTS = [{
         # new layout, instalments
         'url': 'https://tv.nrk.no/serie/groenn-glede',
@@ -696,7 +628,6 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
             'description': 'md5:7664b4e7e77dc6810cd3bca367c25b6e',
         },
         'playlist_mincount': 30,
-        'expected_warnings': ['HTTP Error 404: Not Found'],
     }, {
         # new layout, seasons
         'url': 'https://tv.nrk.no/serie/backstage',
@@ -706,14 +637,13 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
             'description': 'md5:63692ceb96813d9a207e9910483d948b',
         },
         'playlist_mincount': 60,
-        'expected_warnings': ['HTTP Error 404: Not Found'],
     }, {
         # old layout
         'url': 'https://tv.nrksuper.no/serie/labyrint',
         'info_dict': {
             'id': 'labyrint',
             'title': 'Labyrint',
-            'description': 'md5:318b597330fdac5959247c9b69fdb1ec',
+            'description': 'I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.',
         },
         'playlist_mincount': 3,
     }, {
@@ -729,9 +659,13 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
         'url': 'https://radio.nrk.no/serie/dickie-dick-dickens',
         'info_dict': {
             'id': 'dickie-dick-dickens',
+            'title': 'Dickie Dick Dickens',
+            'description': 'md5:19e67411ffe57f7dce08a943d7a0b91f',
         },
         'playlist_mincount': 8,
-        'expected_warnings': ['HTTP Error 404: Not Found'],
+    }, {
+        'url': 'https://nrksuper.no/serie/labyrint',
+        'only_matching': True,
     }]
 
     @classmethod
@@ -742,57 +676,39 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
             else super(NRKTVSeriesIE, cls).suitable(url))
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        domain = mobj.group('domain')
-        series_id = mobj.group('id')
+        site, series_id = re.match(self._VALID_URL, url).groups()
+        domain = 'radio' if site == 'radio.nrk' else 'tv'
 
-        title = description = None
+        series = self._call_api(
+            '%s/catalog/series/%s' % (domain, series_id), series_id, 'serie')
+        titles = try_get(series, [
+            lambda x: x['titles'],
+            lambda x: x[x['type']]['titles'],
+            lambda x: x[x['seriesType']]['titles'],
+        ]) or {}
 
-        webpage = self._download_webpage(url, series_id)
+        entries = []
+        entries.extend(self._entries(series, series_id))
+        embedded = series.get('_embedded') or {}
+        linked_seasons = try_get(series, lambda x: x['_links']['seasons']) or []
+        embedded_seasons = embedded.get('seasons') or []
+        if len(linked_seasons) > len(embedded_seasons):
+            for season in linked_seasons:
+                season_name = season.get('name')
+                if season_name and isinstance(season_name, compat_str):
+                    entries.append(self.url_result(
+                        'https://%s.nrk.no/serie/%s/sesong/%s'
+                        % (domain, series_id, season_name),
+                        ie=NRKTVSeasonIE.ie_key(),
+                        video_title=season.get('title')))
+        else:
+            for season in embedded_seasons:
+                entries.extend(self._entries(season, series_id))
+        entries.extend(self._entries(
+            embedded.get('extraMaterial') or {}, series_id))
 
-        series = self._extract_series(webpage, series_id, fatal=False)
-        if series:
-            title = try_get(series, lambda x: x['titles']['title'], compat_str)
-            description = try_get(
-                series, lambda x: x['titles']['subtitle'], compat_str)
-
-        data = self._download_json(
-            'https://psapi.nrk.no/%s/catalog/series/%s/instalments'
-            % (domain, series_id), series_id, query={'pageSize': 50},
-            fatal=False)
-        if data:
-            return self.playlist_result(
-                self._entries(data, series_id), series_id, title, description)
-
-        # New layout (e.g. https://tv.nrk.no/serie/backstage)
-        if series:
-            entries = []
-            entries.extend(self._extract_seasons(domain, series_id, series.get('seasons')))
-            entries.extend(self._extract_entries(series.get('instalments')))
-            entries.extend(self._extract_episodes(series.get('extraMaterial')))
-            return self.playlist_result(entries, series_id, title, description)
-
-        # Old layout (e.g. https://tv.nrksuper.no/serie/labyrint)
-        entries = [
-            self.url_result(
-                'https://tv.nrk.no/program/Episodes/{series}/{season}'.format(
-                    series=series_id, season=season_id))
-            for season_id in re.findall(self._ITEM_RE, webpage)
-        ]
-
-        title = self._html_search_meta(
-            'seriestitle', webpage,
-            'title', default=None) or self._og_search_title(
-            webpage, fatal=False)
-        if title:
-            title = self._search_regex(
-                r'NRK (?:Super )?TV\s*[-–]\s*(.+)', title, 'title', default=title)
-
-        description = self._html_search_meta(
-            'series_description', webpage,
-            'description', default=None) or self._og_search_description(webpage)
-
-        return self.playlist_result(entries, series_id, title, description)
+        return self.playlist_result(
+            entries, series_id, titles.get('title'), titles.get('subtitle'))
 
 
 class NRKTVDirekteIE(NRKTVIE):
@@ -896,14 +812,8 @@ class NRKSkoleIE(InfoExtractor):
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(
-            'https://mimir.nrk.no/plugin/1.0/static?mediaId=%s' % video_id,
-            video_id)
-
-        nrk_id = self._parse_json(
-            self._search_regex(
-                r'<script[^>]+type=["\']application/json["\'][^>]*>({.+?})</script>',
-                webpage, 'application json'),
-            video_id)['activeMedia']['psId']
+        nrk_id = self._download_json(
+            'https://nrkno-skole-prod.kube.nrk.no/skole/api/media/%s' % video_id,
+            video_id)['psId']
 
         return self.url_result('nrk:%s' % nrk_id)

From 4c93b2fd155cc15588017c14c033bcea1a1ff931 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 5 Dec 2020 09:13:42 +0100
Subject: [PATCH 304/340] [nrk] improve format extraction

---
 youtube_dl/extractor/nrk.py | 40 ++++++++++++++++++++++++++-----------
 1 file changed, 28 insertions(+), 12 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 8b31a6ad2..289a0a3a4 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -2,6 +2,7 @@
 from __future__ import unicode_literals
 
 import itertools
+import random
 import re
 
 from .common import InfoExtractor
@@ -22,13 +23,26 @@ from ..utils import (
 
 
 class NRKBaseIE(InfoExtractor):
+    _GEO_COUNTRIES = ['NO']
+    _CDN_REPL_REGEX = r'''(?x)://
+        (?:
+            nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0|
+            nrk-od-no\.telenorcdn\.net|
+            minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no
+        )/'''
+
     def _extract_nrk_formats(self, asset_url, video_id):
         if re.match(r'https?://[^/]+\.akamaihd\.net/i/', asset_url):
             return self._extract_akamai_formats(
                 re.sub(r'(?:b=\d+-\d+|__a__=off)&?', '', asset_url), video_id)
-        return self._extract_m3u8_formats(
-            re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url),
-            video_id, 'mp4', 'm3u8_native', fatal=False)
+        asset_url = re.sub(r'(?:bw_(?:low|high)=\d+|no_audio_only)&?', '', asset_url)
+        formats = self._extract_m3u8_formats(
+            asset_url, video_id, 'mp4', 'm3u8_native', fatal=False)
+        if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
+            formats = self._extract_m3u8_formats(
+                re.sub(self._CDN_REPL_REGEX, '://nrk-od-%02d.akamaized.net/no/' % random.randint(0, 99), asset_url),
+                video_id, 'mp4', 'm3u8_native', fatal=False)
+        return formats
 
     def _raise_error(self, data):
         MESSAGES = {
@@ -107,8 +121,10 @@ class NRKIE(NRKBaseIE):
 
     def _extract_from_playback(self, video_id):
         path_templ = 'playback/%s/' + video_id
-        call_playback_api = lambda x: self._call_api(path_templ % x, video_id, x)
-        manifest = call_playback_api('manifest')
+        def call_playback_api(item, query=None):
+            return self._call_api(path_templ % item, video_id, item, query=query)
+        # known values for preferredCdn: akamai, iponly, minicdn and telenor
+        manifest = call_playback_api('manifest', {'preferredCdn': 'akamai'})
 
         if manifest.get('playability') == 'nonPlayable':
             self._raise_error(manifest['nonPlayable'])
@@ -195,7 +211,6 @@ class NRKTVIE(NRKBaseIE):
             'series': '20 spørsmål',
             'episode': '23.05.2014',
         },
-        'skip': 'NoProgramRights',
     }, {
         'url': 'https://tv.nrk.no/program/mdfp15000514',
         'info_dict': {
@@ -214,15 +229,15 @@ class NRKTVIE(NRKBaseIE):
         # single playlist video
         'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
         'info_dict': {
-            'id': 'MSPO40010515-part2',
-            'ext': 'flv',
-            'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
-            'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
+            'id': 'MSPO40010515AH',
+            'ext': 'mp4',
+            'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
+            'description': 'md5:c03aba1e917561eface5214020551b7a',
         },
         'params': {
             'skip_download': True,
         },
-        'expected_warnings': ['Video is geo restricted'],
+        'expected_warnings': ['Failed to download m3u8 information'],
         'skip': 'particular part is not supported currently',
     }, {
         'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
@@ -232,7 +247,7 @@ class NRKTVIE(NRKBaseIE):
             'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
             'description': 'md5:c03aba1e917561eface5214020551b7a',
         },
-        'skip': 'Video is geo restricted',
+        'expected_warnings': ['Failed to download m3u8 information'],
     }, {
         'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
         'info_dict': {
@@ -312,6 +327,7 @@ class NRKTVIE(NRKBaseIE):
                 asset_url = asset.get('url')
                 if not asset_url or asset_url in urls:
                     continue
+                urls.append(asset_url)
                 formats = self._extract_nrk_formats(asset_url, video_id)
                 if not formats:
                     continue

From 6bf95b15ee311d673e63ed5bc98d0b74040f24f0 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 5 Dec 2020 15:35:29 +0100
Subject: [PATCH 305/340] [nrk] reduce the number of instalments requests

---
 youtube_dl/extractor/nrk.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 289a0a3a4..24993b1c8 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -121,6 +121,7 @@ class NRKIE(NRKBaseIE):
 
     def _extract_from_playback(self, video_id):
         path_templ = 'playback/%s/' + video_id
+
         def call_playback_api(item, query=None):
             return self._call_api(path_templ % item, video_id, item, query=query)
         # known values for preferredCdn: akamai, iponly, minicdn and telenor
@@ -696,7 +697,8 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
         domain = 'radio' if site == 'radio.nrk' else 'tv'
 
         series = self._call_api(
-            '%s/catalog/series/%s' % (domain, series_id), series_id, 'serie')
+            '%s/catalog/series/%s' % (domain, series_id),
+            series_id, 'serie', query={'embeddedInstalmentsPageSize': 50})
         titles = try_get(series, [
             lambda x: x['titles'],
             lambda x: x[x['type']]['titles'],

From 082da364166b5e33e1aa3ff643a177c0456b43e6 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sat, 5 Dec 2020 16:24:49 +0100
Subject: [PATCH 306/340] [nrk] reduce requests for Radio series

---
 youtube_dl/extractor/nrk.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 24993b1c8..fdf2d7407 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -694,11 +694,13 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
 
     def _real_extract(self, url):
         site, series_id = re.match(self._VALID_URL, url).groups()
-        domain = 'radio' if site == 'radio.nrk' else 'tv'
+        is_radio = site == 'radio.nrk'
+        domain = 'radio' if is_radio else 'tv'
 
+        size_prefix = 'p' if is_radio else 'embeddedInstalmentsP'
         series = self._call_api(
             '%s/catalog/series/%s' % (domain, series_id),
-            series_id, 'serie', query={'embeddedInstalmentsPageSize': 50})
+            series_id, 'serie', query={size_prefix + 'ageSize': 50})
         titles = try_get(series, [
             lambda x: x['titles'],
             lambda x: x[x['type']]['titles'],

From cc017e07ca1ce70740f45620f2bceb1b0ac25eb2 Mon Sep 17 00:00:00 2001
From: renalid <renalid@gmail.com>
Date: Sun, 6 Dec 2020 16:12:25 +0100
Subject: [PATCH 307/340] [generic] Extract RSS video description (#27177)

---
 youtube_dl/extractor/generic.py | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index f10f11244..8ed2789d0 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -198,11 +198,19 @@ class GenericIE(InfoExtractor):
         {
             'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
             'info_dict': {
-                'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
-                'ext': 'm4v',
-                'upload_date': '20150228',
-                'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
-            }
+                'id': 'http://podcastfeeds.nbcnews.com/nbcnews/video/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
+                'title': 'MSNBC Rachel Maddow (video)',
+                'description': 're:.*her unique approach to storytelling.*',
+            },
+            'playlist': [{
+                'info_dict': {
+                    'ext': 'mov',
+                    'id': 'pdv_maddow_netcast_mov-12-03-2020-223726',
+                    'title': 'MSNBC Rachel Maddow (video) - 12-03-2020-223726',
+                    'description': 're:.*her unique approach to storytelling.*',
+                    'upload_date': '20201204',
+                },
+            }],
         },
         # RSS feed with enclosures and unsupported link URLs
         {
@@ -2199,6 +2207,7 @@ class GenericIE(InfoExtractor):
                 '_type': 'url_transparent',
                 'url': next_url,
                 'title': it.find('title').text,
+                'description': xpath_text(it, 'description', default=None),
             })
 
         return {

From 5e822c252627905036b164cdbc1220dc7b1f5d08 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Dec 2020 22:30:30 +0700
Subject: [PATCH 308/340] [generic] Extract RSS video timestamp

---
 youtube_dl/extractor/generic.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 8ed2789d0..d0e0d6919 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -30,6 +30,7 @@ from ..utils import (
     smuggle_url,
     unescapeHTML,
     unified_strdate,
+    unified_timestamp,
     unsmuggle_url,
     UnsupportedError,
     xpath_text,
@@ -2208,6 +2209,8 @@ class GenericIE(InfoExtractor):
                 'url': next_url,
                 'title': it.find('title').text,
                 'description': xpath_text(it, 'description', default=None),
+                'timestamp': unified_timestamp(
+                    xpath_text(it, 'pubDate', default=None)),
             })
 
         return {

From f2c704e1126d614a7cd3283ed94a2226587d7a33 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Sun, 6 Dec 2020 23:08:03 +0700
Subject: [PATCH 309/340] [generic] Extract RSS video itunes metadata

---
 youtube_dl/extractor/generic.py | 36 ++++++++++++++++++++++++++++++---
 1 file changed, 33 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index d0e0d6919..d2ba07839 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -20,12 +20,14 @@ from ..utils import (
     ExtractorError,
     float_or_none,
     HEADRequest,
+    int_or_none,
     is_html,
     js_to_json,
     KNOWN_EXTENSIONS,
     merge_dicts,
     mimetype2ext,
     orderedSet,
+    parse_duration,
     sanitized_Request,
     smuggle_url,
     unescapeHTML,
@@ -33,7 +35,9 @@ from ..utils import (
     unified_timestamp,
     unsmuggle_url,
     UnsupportedError,
+    url_or_none,
     xpath_text,
+    xpath_with_ns,
 )
 from .commonprotocols import RtmpIE
 from .brightcove import (
@@ -206,10 +210,12 @@ class GenericIE(InfoExtractor):
             'playlist': [{
                 'info_dict': {
                     'ext': 'mov',
-                    'id': 'pdv_maddow_netcast_mov-12-03-2020-223726',
-                    'title': 'MSNBC Rachel Maddow (video) - 12-03-2020-223726',
+                    'id': 'pdv_maddow_netcast_mov-12-04-2020-224335',
+                    'title': 're:MSNBC Rachel Maddow',
                     'description': 're:.*her unique approach to storytelling.*',
-                    'upload_date': '20201204',
+                    'timestamp': int,
+                    'upload_date': compat_str,
+                    'duration': float,
                 },
             }],
         },
@@ -2189,6 +2195,10 @@ class GenericIE(InfoExtractor):
         playlist_desc_el = doc.find('./channel/description')
         playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
 
+        NS_MAP = {
+            'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
+        }
+
         entries = []
         for it in doc.findall('./channel/item'):
             next_url = None
@@ -2204,6 +2214,20 @@ class GenericIE(InfoExtractor):
             if not next_url:
                 continue
 
+            def itunes(key):
+                return xpath_text(
+                    it, xpath_with_ns('./itunes:%s' % key, NS_MAP),
+                    default=None)
+
+            duration = itunes('duration')
+            explicit = itunes('explicit')
+            if explicit == 'true':
+                age_limit = 18
+            elif explicit == 'false':
+                age_limit = 0
+            else:
+                age_limit = None
+
             entries.append({
                 '_type': 'url_transparent',
                 'url': next_url,
@@ -2211,6 +2235,12 @@ class GenericIE(InfoExtractor):
                 'description': xpath_text(it, 'description', default=None),
                 'timestamp': unified_timestamp(
                     xpath_text(it, 'pubDate', default=None)),
+                'duration': int_or_none(duration) or parse_duration(duration),
+                'thumbnail': url_or_none(itunes('image')),
+                'episode': itunes('title'),
+                'episode_number': int_or_none(itunes('episode')),
+                'season_number': int_or_none(itunes('season')),
+                'age_limit': age_limit,
             })
 
         return {

From 6ad0d8781e2b156a4599abcebaa0b91b7e3131f4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 00:45:16 +0700
Subject: [PATCH 310/340] [extractor/common] Fix media type extraction for
 HTML5 media tags in start/end form

---
 youtube_dl/extractor/common.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index a21afefeb..864596e66 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2521,8 +2521,8 @@ class InfoExtractor(object):
             # Allowing more characters may end up in significant slow down (see
             # https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
             # http://www.porntrex.com/maps/videositemap.xml).
-            r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
-        for media_tag, media_type, media_content in media_tags:
+            r'(?s)(<(?P<tag>(?:amp-)?(video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
+        for media_tag, _, media_type, media_content in media_tags:
             media_info = {
                 'formats': [],
                 'subtitles': {},

From 06bf2ac20f2fb64df9f6e4cd1dba267b25effd22 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 00:55:49 +0700
Subject: [PATCH 311/340] [extractor/common] Eliminate media tag name regex
 duplication

---
 youtube_dl/extractor/common.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 864596e66..877873ebd 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2513,15 +2513,16 @@ class InfoExtractor(object):
         # amp-video and amp-audio are very similar to their HTML5 counterparts
         # so we wll include them right here (see
         # https://www.ampproject.org/docs/reference/components/amp-video)
+        _MEDIA_TAG_NAME_RE = r'(?:amp-)?(video|audio)'
         media_tags = [(media_tag, media_type, '')
                       for media_tag, media_type
-                      in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
+                      in re.findall(r'(?s)(<%s[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
         media_tags.extend(re.findall(
             # We only allow video|audio followed by a whitespace or '>'.
             # Allowing more characters may end up in significant slow down (see
             # https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
             # http://www.porntrex.com/maps/videositemap.xml).
-            r'(?s)(<(?P<tag>(?:amp-)?(video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
+            r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
         for media_tag, _, media_type, media_content in media_tags:
             media_info = {
                 'formats': [],

From 91dd25fe1e18aa3f617005799a8f5018a551c7dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 00:59:25 +0700
Subject: [PATCH 312/340] [extractor/common] Add support for dl8-* media tags
 (closes #27283)

---
 youtube_dl/extractor/common.py  | 3 ++-
 youtube_dl/extractor/generic.py | 4 +++-
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 877873ebd..dd07a1cae 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2513,7 +2513,8 @@ class InfoExtractor(object):
         # amp-video and amp-audio are very similar to their HTML5 counterparts
         # so we wll include them right here (see
         # https://www.ampproject.org/docs/reference/components/amp-video)
-        _MEDIA_TAG_NAME_RE = r'(?:amp-)?(video|audio)'
+        # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
+        _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
         media_tags = [(media_tag, media_type, '')
                       for media_tag, media_type
                       in re.findall(r'(?s)(<%s[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index d2ba07839..85dc1d02d 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -2466,7 +2466,9 @@ class GenericIE(InfoExtractor):
         # Sometimes embedded video player is hidden behind percent encoding
         # (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
         # Unescaping the whole page allows to handle those cases in a generic way
-        webpage = compat_urllib_parse_unquote(webpage)
+        # FIXME: unescaping the whole page may break URLs, commenting out for now.
+        # There probably should be a second run of generic extractor on unescaped webpage.
+        # webpage = compat_urllib_parse_unquote(webpage)
 
         # Unescape squarespace embeds to be detected by generic extractor,
         # see https://github.com/ytdl-org/youtube-dl/issues/21294

From dccf4932e15392a35a44c24a2732cc0ea6deada4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 01:15:30 +0700
Subject: [PATCH 313/340] [extractor/common] Extract timestamp from
 Last-Modified header

---
 youtube_dl/extractor/generic.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 85dc1d02d..055ca838f 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -2360,7 +2360,7 @@ class GenericIE(InfoExtractor):
         info_dict = {
             'id': video_id,
             'title': self._generic_title(url),
-            'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
+            'timestamp': unified_timestamp(head_response.headers.get('Last-Modified'))
         }
 
         # Check for direct link to a video

From 2bf0634d16b5732548652093d77dbd1aa76e8189 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 01:48:10 +0700
Subject: [PATCH 314/340] [youtube] Improve youtu.be extraction in non-existing
 playlists (closes #27324)

---
 youtube_dl/extractor/extractors.py |  1 +
 youtube_dl/extractor/youtube.py    | 55 +++++++++++++++++++-----------
 2 files changed, 36 insertions(+), 20 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 03aff1d52..575b4b36f 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1520,6 +1520,7 @@ from .youtube import (
     YoutubeSubscriptionsIE,
     YoutubeTruncatedIDIE,
     YoutubeTruncatedURLIE,
+    YoutubeYtBeIE,
     YoutubeYtUserIE,
     YoutubeWatchLaterIE,
 )
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index b796f58b2..b8219a0ad 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3139,8 +3139,7 @@ class YoutubePlaylistIE(InfoExtractor):
                         (?:
                             (?:
                                 youtube(?:kids)?\.com|
-                                invidio\.us|
-                                youtu\.be
+                                invidio\.us
                             )
                             /.*?\?.*?\blist=
                         )?
@@ -3185,6 +3184,32 @@ class YoutubePlaylistIE(InfoExtractor):
             'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
         }
     }, {
+        'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
+        'only_matching': True,
+    }, {
+        # music album playlist
+        'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
+        'only_matching': True,
+    }]
+
+    @classmethod
+    def suitable(cls, url):
+        return False if YoutubeTabIE.suitable(url) else super(
+            YoutubePlaylistIE, cls).suitable(url)
+
+    def _real_extract(self, url):
+        playlist_id = self._match_id(url)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        if not qs:
+            qs = {'list': playlist_id}
+        return self.url_result(
+            update_url_query('https://www.youtube.com/playlist', qs),
+            ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
+
+
+class YoutubeYtBeIE(InfoExtractor):
+    _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
+    _TESTS = [{
         'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
         'info_dict': {
             'id': 'yeWKywCrFtk',
@@ -3207,28 +3232,18 @@ class YoutubePlaylistIE(InfoExtractor):
     }, {
         'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
         'only_matching': True,
-    }, {
-        'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
-        'only_matching': True,
-    }, {
-        # music album playlist
-        'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
-        'only_matching': True,
     }]
 
-    @classmethod
-    def suitable(cls, url):
-        return False if YoutubeTabIE.suitable(url) else super(
-            YoutubePlaylistIE, cls).suitable(url)
-
     def _real_extract(self, url):
-        playlist_id = self._match_id(url)
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
-        if not qs:
-            qs = {'list': playlist_id}
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        playlist_id = mobj.group('playlist_id')
         return self.url_result(
-            update_url_query('https://www.youtube.com/playlist', qs),
-            ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
+            update_url_query('https://www.youtube.com/watch', {
+                'v': video_id,
+                'list': playlist_id,
+                'feature': 'youtu.be',
+            }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
 
 
 class YoutubeYtUserIE(InfoExtractor):

From fa604d9083805aa2ff398f31d56dc3508b77a37c Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 6 Dec 2020 19:38:27 +0100
Subject: [PATCH 315/340] [aenetworks] Fix extraction

- Fix Fastly format extraction
- Add support for play and watch subdomains
- Extract series metadata

closes #23363
closes #23390
closes #26795
closes #26985
---
 youtube_dl/extractor/aenetworks.py | 277 +++++++++++++++++------------
 youtube_dl/extractor/extractors.py |   2 +
 2 files changed, 165 insertions(+), 114 deletions(-)

diff --git a/youtube_dl/extractor/aenetworks.py b/youtube_dl/extractor/aenetworks.py
index 611b948f5..3d0cf1208 100644
--- a/youtube_dl/extractor/aenetworks.py
+++ b/youtube_dl/extractor/aenetworks.py
@@ -5,20 +5,30 @@ import re
 
 from .theplatform import ThePlatformIE
 from ..utils import (
-    extract_attributes,
     ExtractorError,
     int_or_none,
-    smuggle_url,
     update_url_query,
-)
-from ..compat import (
-    compat_urlparse,
+    urlencode_postdata,
 )
 
 
 class AENetworksBaseIE(ThePlatformIE):
+    _BASE_URL_REGEX = r'''(?x)https?://
+        (?:(?:www|play|watch)\.)?
+        (?P<domain>
+            (?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
+            fyi\.tv
+        )/'''
     _THEPLATFORM_KEY = 'crazyjava'
     _THEPLATFORM_SECRET = 's3cr3t'
+    _DOMAIN_MAP = {
+        'history.com': ('HISTORY', 'history'),
+        'aetv.com': ('AETV', 'aetv'),
+        'mylifetime.com': ('LIFETIME', 'lifetime'),
+        'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc'),
+        'fyi.tv': ('FYI', 'fyi'),
+        'historyvault.com': (None, 'historyvault'),
+    }
 
     def _extract_aen_smil(self, smil_url, video_id, auth=None):
         query = {'mbr': 'true'}
@@ -31,7 +41,7 @@ class AENetworksBaseIE(ThePlatformIE):
             'assetTypes': 'high_video_s3'
         }, {
             'assetTypes': 'high_video_s3',
-            'switch': 'hls_ingest_fastly'
+            'switch': 'hls_high_fastly',
         }]
         formats = []
         subtitles = {}
@@ -61,20 +71,13 @@ class AENetworksBaseIE(ThePlatformIE):
 class AENetworksIE(AENetworksBaseIE):
     IE_NAME = 'aenetworks'
     IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault'
-    _VALID_URL = r'''(?x)
-                    https?://
-                        (?:www\.)?
-                        (?P<domain>
-                            (?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
-                            fyi\.tv
-                        )/
-                        (?:
-                            shows/(?P<show_path>[^/]+(?:/[^/]+){0,2})|
-                            movies/(?P<movie_display_id>[^/]+)(?:/full-movie)?|
-                            specials/(?P<special_display_id>[^/]+)/(?:full-special|preview-)|
-                            collections/[^/]+/(?P<collection_display_id>[^/]+)
-                        )
-                    '''
+    _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'''(?P<id>
+        shows/[^/]+/season-\d+/episode-\d+|
+        (?:
+            (?:movie|special)s/[^/]+|
+            (?:shows/[^/]+/)?videos
+        )/[^/?#&]+
+    )'''
     _TESTS = [{
         'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
         'info_dict': {
@@ -91,22 +94,23 @@ class AENetworksIE(AENetworksBaseIE):
             'skip_download': True,
         },
         'add_ie': ['ThePlatform'],
-    }, {
-        'url': 'http://www.history.com/shows/ancient-aliens/season-1',
-        'info_dict': {
-            'id': '71889446852',
-        },
-        'playlist_mincount': 5,
-    }, {
-        'url': 'http://www.mylifetime.com/shows/atlanta-plastic',
-        'info_dict': {
-            'id': 'SERIES4317',
-            'title': 'Atlanta Plastic',
-        },
-        'playlist_mincount': 2,
+        'skip': 'This video is only available for users of participating TV providers.',
     }, {
         'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
-        'only_matching': True
+        'info_dict': {
+            'id': '600587331957',
+            'ext': 'mp4',
+            'title': 'Inlawful Entry',
+            'description': 'md5:57c12115a2b384d883fe64ca50529e08',
+            'timestamp': 1452634428,
+            'upload_date': '20160112',
+            'uploader': 'AENE-NEW',
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+        'add_ie': ['ThePlatform'],
     }, {
         'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
         'only_matching': True
@@ -117,80 +121,152 @@ class AENetworksIE(AENetworksBaseIE):
         'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
         'only_matching': True
     }, {
-        'url': 'https://www.lifetimemovieclub.com/movies/a-killer-among-us',
+        'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
         'only_matching': True
     }, {
         'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
         'only_matching': True
-    }, {
-        'url': 'https://www.historyvault.com/collections/america-the-story-of-us/westward',
-        'only_matching': True
     }, {
         'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
         'only_matching': True
+    }, {
+        'url': 'http://www.history.com/videos/history-of-valentines-day',
+        'only_matching': True
+    }, {
+        'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
+        'only_matching': True
     }]
-    _DOMAIN_TO_REQUESTOR_ID = {
-        'history.com': 'HISTORY',
-        'aetv.com': 'AETV',
-        'mylifetime.com': 'LIFETIME',
-        'lifetimemovieclub.com': 'LIFETIMEMOVIECLUB',
-        'fyi.tv': 'FYI',
-    }
 
     def _real_extract(self, url):
-        domain, show_path, movie_display_id, special_display_id, collection_display_id = re.match(self._VALID_URL, url).groups()
-        display_id = show_path or movie_display_id or special_display_id or collection_display_id
-        webpage = self._download_webpage(url, display_id, headers=self.geo_verification_headers())
-        if show_path:
-            url_parts = show_path.split('/')
-            url_parts_len = len(url_parts)
-            if url_parts_len == 1:
-                entries = []
-                for season_url_path in re.findall(r'(?s)<li[^>]+data-href="(/shows/%s/season-\d+)"' % url_parts[0], webpage):
-                    entries.append(self.url_result(
-                        compat_urlparse.urljoin(url, season_url_path), 'AENetworks'))
-                if entries:
-                    return self.playlist_result(
-                        entries, self._html_search_meta('aetn:SeriesId', webpage),
-                        self._html_search_meta('aetn:SeriesTitle', webpage))
-                else:
-                    # single season
-                    url_parts_len = 2
-            if url_parts_len == 2:
-                entries = []
-                for episode_item in re.findall(r'(?s)<[^>]+class="[^"]*(?:episode|program)-item[^"]*"[^>]*>', webpage):
-                    episode_attributes = extract_attributes(episode_item)
-                    episode_url = compat_urlparse.urljoin(
-                        url, episode_attributes['data-canonical'])
-                    entries.append(self.url_result(
-                        episode_url, 'AENetworks',
-                        episode_attributes.get('data-videoid') or episode_attributes.get('data-video-id')))
-                return self.playlist_result(
-                    entries, self._html_search_meta('aetn:SeasonId', webpage))
-
-        video_id = self._html_search_meta('aetn:VideoID', webpage)
-        media_url = self._search_regex(
-            [r"media_url\s*=\s*'(?P<url>[^']+)'",
-             r'data-media-url=(?P<url>(?:https?:)?//[^\s>]+)',
-             r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
-            webpage, 'video url', group='url')
+        domain, canonical = re.match(self._VALID_URL, url).groups()
+        requestor_id, brand = self._DOMAIN_MAP[domain]
+        result = self._download_json(
+            'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
+            canonical, query={'filter[canonical]': '/' + canonical})['results'][0]
+        title = result['title']
+        video_id = result['id']
+        media_url = result['publicUrl']
         theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
             r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
         info = self._parse_theplatform_metadata(theplatform_metadata)
         auth = None
         if theplatform_metadata.get('AETN$isBehindWall'):
-            requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
             resource = self._get_mvpd_resource(
                 requestor_id, theplatform_metadata['title'],
                 theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
                 theplatform_metadata['ratings'][0]['rating'])
             auth = self._extract_mvpd_auth(
                 url, video_id, requestor_id, resource)
-        info.update(self._search_json_ld(webpage, video_id, fatal=False))
         info.update(self._extract_aen_smil(media_url, video_id, auth))
+        info.update({
+            'title': title,
+            'series': result.get('seriesName'),
+            'season_number': int_or_none(result.get('tvSeasonNumber')),
+            'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
+        })
         return info
 
 
+class AENetworksListBaseIE(AENetworksBaseIE):
+    def _call_api(self, resource, slug, brand, fields):
+        return self._download_json(
+            'https://yoga.appsvcs.aetnd.com/graphql',
+            slug, query={'brand': brand}, data=urlencode_postdata({
+                'query': '''{
+  %s(slug: "%s") {
+    %s
+  }
+}''' % (resource, slug, fields),
+            }))['data'][resource]
+
+    def _real_extract(self, url):
+        domain, slug = re.match(self._VALID_URL, url).groups()
+        _, brand = self._DOMAIN_MAP[domain]
+        playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
+        base_url = 'http://watch.%s' % domain
+
+        entries = []
+        for item in (playlist.get(self._ITEMS_KEY) or []):
+            doc = self._get_doc(item)
+            canonical = doc.get('canonical')
+            if not canonical:
+                continue
+            entries.append(self.url_result(
+                base_url + canonical, AENetworksIE.ie_key(), doc.get('id')))
+
+        description = None
+        if self._PLAYLIST_DESCRIPTION_KEY:
+            description = playlist.get(self._PLAYLIST_DESCRIPTION_KEY)
+
+        return self.playlist_result(
+            entries, playlist.get('id'),
+            playlist.get(self._PLAYLIST_TITLE_KEY), description)
+
+
+class AENetworksCollectionIE(AENetworksListBaseIE):
+    IE_NAME = 'aenetworks:collection'
+    _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'(?:[^/]+/)*(?:list|collections)/(?P<id>[^/?#&]+)/?(?:[?#&]|$)'
+    _TESTS = [{
+        'url': 'https://watch.historyvault.com/list/america-the-story-of-us',
+        'info_dict': {
+            'id': '282',
+            'title': 'America The Story of Us',
+        },
+        'playlist_mincount': 12,
+    }, {
+        'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
+        'only_matching': True
+    }, {
+        'url': 'https://www.historyvault.com/collections/mysteryquest',
+        'only_matching': True
+    }]
+    _RESOURCE = 'list'
+    _ITEMS_KEY = 'items'
+    _PLAYLIST_TITLE_KEY = 'display_title'
+    _PLAYLIST_DESCRIPTION_KEY = None
+    _FIELDS = '''id
+    display_title
+    items {
+      ... on ListVideoItem {
+        doc {
+          canonical
+          id
+        }
+      }
+    }'''
+
+    def _get_doc(self, item):
+        return item.get('doc') or {}
+
+
+class AENetworksShowIE(AENetworksListBaseIE):
+    IE_NAME = 'aenetworks:show'
+    _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'shows/(?P<id>[^/?#&]+)/?(?:[?#&]|$)'
+    _TESTS = [{
+        'url': 'http://www.history.com/shows/ancient-aliens',
+        'info_dict': {
+            'id': 'SH012427480000',
+            'title': 'Ancient Aliens',
+            'description': 'md5:3f6d74daf2672ff3ae29ed732e37ea7f',
+        },
+        'playlist_mincount': 168,
+    }]
+    _RESOURCE = 'series'
+    _ITEMS_KEY = 'episodes'
+    _PLAYLIST_TITLE_KEY = 'title'
+    _PLAYLIST_DESCRIPTION_KEY = 'description'
+    _FIELDS = '''description
+    id
+    title
+    episodes {
+      canonical
+      id
+    }'''
+
+    def _get_doc(self, item):
+        return item
+
+
 class HistoryTopicIE(AENetworksBaseIE):
     IE_NAME = 'history:topic'
     IE_DESC = 'History.com Topic'
@@ -204,6 +280,7 @@ class HistoryTopicIE(AENetworksBaseIE):
             'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
             'timestamp': 1375819729,
             'upload_date': '20130806',
+            'uploader': 'AENE-NEW',
         },
         'params': {
             # m3u8 download
@@ -212,36 +289,8 @@ class HistoryTopicIE(AENetworksBaseIE):
         'add_ie': ['ThePlatform'],
     }]
 
-    def theplatform_url_result(self, theplatform_url, video_id, query):
-        return {
-            '_type': 'url_transparent',
-            'id': video_id,
-            'url': smuggle_url(
-                update_url_query(theplatform_url, query),
-                {
-                    'sig': {
-                        'key': self._THEPLATFORM_KEY,
-                        'secret': self._THEPLATFORM_SECRET,
-                    },
-                    'force_smil_url': True
-                }),
-            'ie_key': 'ThePlatform',
-        }
-
     def _real_extract(self, url):
         display_id = self._match_id(url)
-        webpage = self._download_webpage(url, display_id)
-        video_id = self._search_regex(
-            r'<phoenix-iframe[^>]+src="[^"]+\btpid=(\d+)', webpage, 'tpid')
-        result = self._download_json(
-            'https://feeds.video.aetnd.com/api/v2/history/videos',
-            video_id, query={'filter[id]': video_id})['results'][0]
-        title = result['title']
-        info = self._extract_aen_smil(result['publicUrl'], video_id)
-        info.update({
-            'title': title,
-            'description': result.get('description'),
-            'duration': int_or_none(result.get('duration')),
-            'timestamp': int_or_none(result.get('added'), 1000),
-        })
-        return info
+        return self.url_result(
+            'http://www.history.com/videos/' + display_id,
+            AENetworksIE.ie_key())
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 575b4b36f..833d5d6aa 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -30,6 +30,8 @@ from .adobetv import (
 from .adultswim import AdultSwimIE
 from .aenetworks import (
     AENetworksIE,
+    AENetworksCollectionIE,
+    AENetworksShowIE,
     HistoryTopicIE,
 )
 from .afreecatv import AfreecaTVIE

From 791b743765b1154cff099c543a6723f2c40b822b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 02:01:47 +0700
Subject: [PATCH 316/340] [extractor/generic] Remove unused import

---
 youtube_dl/extractor/generic.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 055ca838f..9f79e0ba4 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -31,7 +31,6 @@ from ..utils import (
     sanitized_Request,
     smuggle_url,
     unescapeHTML,
-    unified_strdate,
     unified_timestamp,
     unsmuggle_url,
     UnsupportedError,

From d9482c0083e1a6ad6602f063bab46edfa7514338 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 02:01:53 +0700
Subject: [PATCH 317/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index d9b8336ab..eeb7cbb88 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,29 @@
+version <unreleased>
+
+Core
+* [extractor/common] Extract timestamp from Last-Modified header
++ [extractor/common] Add support for dl8-* media tags (#27283)
+* [extractor/common] Fix media type extraction for HTML5 media tags
+  in start/end form
+
+Extractors
+* [aenetworks] Fix extraction (#23363, #23390, #26795, #26985)
+    * Fix Fastly format extraction
+    + Add support for play and watch subdomains
+    + Extract series metadata
+* [youtube] Improve youtu.be extraction in non-existing playlists (#27324)
++ [generic] Extract RSS video description, timestamp and itunes metadata
+  (#27177)
+* [nrk] Reduce the number of instalments and episodes requests
+* [nrk] Improve extraction
+    * Improve format extraction for old akamai formats
+    + Add is_live value to entry info dict
+    * Request instalments only when available
+    * Fix skole extraction
++ [peertube] Extract fps
++ [peertube] Recognize audio-only formats (#27295)
+
+
 version 2020.12.05
 
 Core

From 2717036489924fce092db593fb769d2f20179e05 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 02:03:34 +0700
Subject: [PATCH 318/340] release 2020.12.07

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 ChangeLog                                        | 2 +-
 docs/supportedsites.md                           | 3 +++
 youtube_dl/version.py                            | 2 +-
 8 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index 4cbf763be..f72f57d02 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.05
+ [debug] youtube-dl version 2020.12.07
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 5e20c5505..8160d257a 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index 23c309dc0..f7b6ba9ea 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index 43363cfa0..efeae549f 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.05
+ [debug] youtube-dl version 2020.12.07
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 8b03ed273..0435a0ddf 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.05. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.05**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/ChangeLog b/ChangeLog
index eeb7cbb88..f3571e77b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleased>
+version 2020.12.07
 
 Core
 * [extractor/common] Extract timestamp from Last-Modified header
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 651a754d9..59e73ba59 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -35,6 +35,8 @@
  - **adobetv:video**
  - **AdultSwim**
  - **aenetworks**: A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault
+ - **aenetworks:collection**
+ - **aenetworks:show**
  - **afreecatv**: afreecatv.com
  - **AirMozilla**
  - **AliExpressLive**
@@ -1164,6 +1166,7 @@
  - **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
  - **youtube:tab**: YouTube.com tab
  - **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
+ - **YoutubeYtBe**
  - **YoutubeYtUser**
  - **Zapiks**
  - **Zaq1**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 9e876731b..39a685197 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.12.05'
+__version__ = '2020.12.07'

From 4f1ecca58d19be96b17893edb44308004c2b47fa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Mon, 7 Dec 2020 03:27:21 +0700
Subject: [PATCH 319/340] [slideslive] Add support for yoda service videos and
 extract subtitles (closes #27323)

---
 youtube_dl/extractor/slideslive.py | 55 +++++++++++++++++++++++++++---
 1 file changed, 51 insertions(+), 4 deletions(-)

diff --git a/youtube_dl/extractor/slideslive.py b/youtube_dl/extractor/slideslive.py
index d9ea76831..cd70841a9 100644
--- a/youtube_dl/extractor/slideslive.py
+++ b/youtube_dl/extractor/slideslive.py
@@ -2,7 +2,12 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import smuggle_url
+from ..utils import (
+    bool_or_none,
+    smuggle_url,
+    try_get,
+    url_or_none,
+)
 
 
 class SlidesLiveIE(InfoExtractor):
@@ -18,8 +23,21 @@ class SlidesLiveIE(InfoExtractor):
             'description': 'Watch full version of this video at https://slideslive.com/38902413.',
             'uploader': 'SlidesLive Videos - A',
             'uploader_id': 'UC62SdArr41t_-_fX40QCLRw',
+            'timestamp': 1597615266,
             'upload_date': '20170925',
         }
+    }, {
+        # video_service_name = yoda
+        'url': 'https://slideslive.com/38935785',
+        'md5': '575cd7a6c0acc6e28422fe76dd4bcb1a',
+        'info_dict': {
+            'id': 'RMraDYN5ozA_',
+            'ext': 'mp4',
+            'title': 'Offline Reinforcement Learning: From Algorithms to Practical Challenges',
+        },
+        'params': {
+            'format': 'bestvideo',
+        },
     }, {
         # video_service_name = youtube
         'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend',
@@ -39,18 +57,47 @@ class SlidesLiveIE(InfoExtractor):
         video_data = self._download_json(
             'https://ben.slideslive.com/player/' + video_id, video_id)
         service_name = video_data['video_service_name'].lower()
-        assert service_name in ('url', 'vimeo', 'youtube')
+        assert service_name in ('url', 'yoda', 'vimeo', 'youtube')
         service_id = video_data['video_service_id']
+        subtitles = {}
+        for sub in try_get(video_data, lambda x: x['subtitles'], list) or []:
+            if not isinstance(sub, dict):
+                continue
+            webvtt_url = url_or_none(sub.get('webvtt_url'))
+            if not webvtt_url:
+                continue
+            lang = sub.get('language') or 'en'
+            subtitles.setdefault(lang, []).append({
+                'url': webvtt_url,
+            })
         info = {
             'id': video_id,
             'thumbnail': video_data.get('thumbnail'),
-            'url': service_id,
+            'is_live': bool_or_none(video_data.get('is_live')),
+            'subtitles': subtitles,
         }
-        if service_name == 'url':
+        if service_name in ('url', 'yoda'):
             info['title'] = video_data['title']
+            if service_name == 'url':
+                info['url'] = service_id
+            else:
+                formats = []
+                _MANIFEST_PATTERN = 'https://01.cdn.yoda.slideslive.com/%s/master.%s'
+                formats.extend(self._extract_m3u8_formats(
+                    _MANIFEST_PATTERN % (service_id, 'm3u8'), service_id, 'mp4',
+                    entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
+                formats.extend(self._extract_mpd_formats(
+                    _MANIFEST_PATTERN % (service_id, 'mpd'), service_id,
+                    mpd_id='dash', fatal=False))
+                self._sort_formats(formats)
+                info.update({
+                    'id': service_id,
+                    'formats': formats,
+                })
         else:
             info.update({
                 '_type': 'url_transparent',
+                'url': service_id,
                 'ie_key': service_name.capitalize(),
                 'title': video_data.get('title'),
             })

From 17b01228f84713df850912b554675bbfcbc62fb3 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Sun, 6 Dec 2020 23:37:16 +0100
Subject: [PATCH 320/340] [americastestkitchen] Fix Extraction and add support
 for Cook's Country and Cook's Illustrated

closes #17234
closes #27322
---
 youtube_dl/extractor/americastestkitchen.py | 68 ++++++++-------------
 1 file changed, 26 insertions(+), 42 deletions(-)

diff --git a/youtube_dl/extractor/americastestkitchen.py b/youtube_dl/extractor/americastestkitchen.py
index 9c9d77ae1..e20f00fc3 100644
--- a/youtube_dl/extractor/americastestkitchen.py
+++ b/youtube_dl/extractor/americastestkitchen.py
@@ -1,33 +1,33 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .common import InfoExtractor
 from ..utils import (
     clean_html,
-    int_or_none,
-    js_to_json,
     try_get,
     unified_strdate,
 )
 
 
 class AmericasTestKitchenIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com/(?:episode|videos)/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?P<resource_type>episode|videos)/(?P<id>\d+)'
     _TESTS = [{
         'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
         'md5': 'b861c3e365ac38ad319cfd509c30577f',
         'info_dict': {
             'id': '5b400b9ee338f922cb06450c',
-            'title': 'Weeknight Japanese Suppers',
+            'title': 'Japanese Suppers',
             'ext': 'mp4',
-            'description': 'md5:3d0c1a44bb3b27607ce82652db25b4a8',
+            'description': 'md5:64e606bfee910627efc4b5f050de92b3',
             'thumbnail': r're:^https?://',
             'timestamp': 1523664000,
             'upload_date': '20180414',
-            'release_date': '20180414',
+            'release_date': '20180410',
             'series': "America's Test Kitchen",
             'season_number': 18,
-            'episode': 'Weeknight Japanese Suppers',
+            'episode': 'Japanese Suppers',
             'episode_number': 15,
         },
         'params': {
@@ -36,47 +36,31 @@ class AmericasTestKitchenIE(InfoExtractor):
     }, {
         'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
         'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
+        resource_type, video_id = re.match(self._VALID_URL, url).groups()
+        is_episode = resource_type == 'episode'
+        if is_episode:
+            resource_type = 'episodes'
 
-        webpage = self._download_webpage(url, video_id)
-
-        video_data = self._parse_json(
-            self._search_regex(
-                r'window\.__INITIAL_STATE__\s*=\s*({.+?})\s*;\s*</script>',
-                webpage, 'initial context'),
-            video_id, js_to_json)
-
-        ep_data = try_get(
-            video_data,
-            (lambda x: x['episodeDetail']['content']['data'],
-             lambda x: x['videoDetail']['content']['data']), dict)
-        ep_meta = ep_data.get('full_video', {})
-
-        zype_id = ep_data.get('zype_id') or ep_meta['zype_id']
-
-        title = ep_data.get('title') or ep_meta.get('title')
-        description = clean_html(ep_meta.get('episode_description') or ep_data.get(
-            'description') or ep_meta.get('description'))
-        thumbnail = try_get(ep_meta, lambda x: x['photo']['image_url'])
-        release_date = unified_strdate(ep_data.get('aired_at'))
-
-        season_number = int_or_none(ep_meta.get('season_number'))
-        episode = ep_meta.get('title')
-        episode_number = int_or_none(ep_meta.get('episode_number'))
+        resource = self._download_json(
+            'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id)
+        video = resource['video'] if is_episode else resource
+        episode = resource if is_episode else resource.get('episode') or {}
 
         return {
             '_type': 'url_transparent',
-            'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % zype_id,
+            'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
             'ie_key': 'Zype',
-            'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
-            'release_date': release_date,
-            'series': "America's Test Kitchen",
-            'season_number': season_number,
-            'episode': episode,
-            'episode_number': episode_number,
+            'description': clean_html(video.get('description')),
+            'release_date': unified_strdate(video.get('publishDate')),
+            'series': try_get(episode, lambda x: x['show']['title']),
+            'episode': episode.get('title'),
         }

From 6956db3606e7ffa638856f1816747f544c1b1a53 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 7 Dec 2020 15:12:54 +0100
Subject: [PATCH 321/340] [tvplay:home] Fix extraction(closes #21153)

---
 youtube_dl/extractor/tvplay.py | 90 +++++++++++++---------------------
 1 file changed, 35 insertions(+), 55 deletions(-)

diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
index 3c2450dd0..0d858c025 100644
--- a/youtube_dl/extractor/tvplay.py
+++ b/youtube_dl/extractor/tvplay.py
@@ -12,11 +12,13 @@ from ..utils import (
     determine_ext,
     ExtractorError,
     int_or_none,
+    parse_duration,
     parse_iso8601,
     qualities,
     try_get,
     update_url_query,
     url_or_none,
+    urljoin,
 )
 
 
@@ -414,7 +416,7 @@ class ViafreeIE(InfoExtractor):
 
 
 class TVPlayHomeIE(InfoExtractor):
-    _VALID_URL = r'https?://tvplay\.(?:tv3\.lt|skaties\.lv|tv3\.ee)/[^/]+/[^/?#&]+-(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:tv3?)?play\.(?:tv3\.lt|skaties\.lv|tv3\.ee)/(?:[^/]+/)*[^/?#&]+-(?P<id>\d+)'
     _TESTS = [{
         'url': 'https://tvplay.tv3.lt/aferistai-n-7/aferistai-10047125/',
         'info_dict': {
@@ -433,80 +435,58 @@ class TVPlayHomeIE(InfoExtractor):
         'params': {
             'skip_download': True,
         },
-        'add_ie': [TVPlayIE.ie_key()],
     }, {
         'url': 'https://tvplay.skaties.lv/vinas-melo-labak/vinas-melo-labak-10280317/',
         'only_matching': True,
     }, {
         'url': 'https://tvplay.tv3.ee/cool-d-ga-mehhikosse/cool-d-ga-mehhikosse-10044354/',
         'only_matching': True,
+    }, {
+        'url': 'https://play.tv3.lt/aferistai-10047125',
+        'only_matching': True,
+    }, {
+        'url': 'https://tv3play.skaties.lv/vinas-melo-labak-10280317',
+        'only_matching': True,
+    }, {
+        'url': 'https://play.tv3.ee/cool-d-ga-mehhikosse-10044354',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, video_id)
+        asset = self._download_json(
+            urljoin(url, '/sb/public/asset/' + video_id), video_id)
 
-        video_id = self._search_regex(
-            r'data-asset-id\s*=\s*["\'](\d{5,})\b', webpage, 'video id')
-
-        if len(video_id) < 8:
-            return self.url_result(
-                'mtg:%s' % video_id, ie=TVPlayIE.ie_key(), video_id=video_id)
-
-        m3u8_url = self._search_regex(
-            r'data-file\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
-            'm3u8 url', group='url')
+        m3u8_url = asset['movie']['contentUrl']
+        video_id = asset['assetId']
+        asset_title = asset['title']
+        title = asset_title['title']
 
         formats = self._extract_m3u8_formats(
-            m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
-            m3u8_id='hls')
+            m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
         self._sort_formats(formats)
 
-        title = self._search_regex(
-            r'data-title\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
-            'title', default=None, group='value') or self._html_search_meta(
-            'title', webpage, default=None) or self._og_search_title(
-            webpage)
+        thumbnails = None
+        image_url = asset.get('imageUrl')
+        if image_url:
+            thumbnails = [{
+                'url': urljoin(url, image_url),
+                'ext': 'jpg',
+            }]
 
-        description = self._html_search_meta(
-            'description', webpage,
-            default=None) or self._og_search_description(webpage)
-
-        thumbnail = self._search_regex(
-            r'data-image\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
-            'thumbnail', default=None, group='url') or self._html_search_meta(
-            'thumbnail', webpage, default=None) or self._og_search_thumbnail(
-            webpage)
-
-        duration = int_or_none(self._search_regex(
-            r'data-duration\s*=\s*["\'](\d+)', webpage, 'duration',
-            fatal=False))
-
-        season = self._search_regex(
-            (r'data-series-title\s*=\s*(["\'])[^/]+/(?P<value>(?:(?!\1).)+)\1',
-             r'\bseason\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage,
-            'season', default=None, group='value')
-        season_number = int_or_none(self._search_regex(
-            r'(\d+)(?:[.\s]+sezona|\s+HOOAEG)', season or '', 'season number',
-            default=None))
-        episode = self._search_regex(
-            (r'\bepisode\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
-             r'data-subtitle\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage,
-            'episode', default=None, group='value')
-        episode_number = int_or_none(self._search_regex(
-            r'(?:S[eē]rija|Osa)\s+(\d+)', episode or '', 'episode number',
-            default=None))
+        metadata = asset.get('metadata') or {}
 
         return {
             'id': video_id,
             'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
-            'duration': duration,
-            'season': season,
-            'season_number': season_number,
-            'episode': episode,
-            'episode_number': episode_number,
+            'description': asset_title.get('summaryLong') or asset_title.get('summaryShort'),
+            'thumbnails': thumbnails,
+            'duration': parse_duration(asset_title.get('runTime')),
+            'series': asset.get('tvSeriesTitle'),
+            'season': asset.get('tvSeasonTitle'),
+            'season_number': int_or_none(metadata.get('seasonNumber')),
+            'episode': asset_title.get('titleBrief'),
+            'episode_number': int_or_none(metadata.get('episodeNumber')),
             'formats': formats,
         }

From 3ded7519857ef3fd1e3760e49e7ee26a92d44679 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Mon, 7 Dec 2020 15:27:13 +0100
Subject: [PATCH 322/340] [generic] comment a test covered now by
 AmericasTestKitchenIE

---
 youtube_dl/extractor/generic.py | 34 ++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 9f79e0ba4..9eba9614f 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -2117,23 +2117,23 @@ class GenericIE(InfoExtractor):
                 'skip_download': True,
             },
         },
-        {
-            # Zype embed
-            'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
-            'info_dict': {
-                'id': '5b400b834b32992a310622b9',
-                'ext': 'mp4',
-                'title': 'Smoky Barbecue Favorites',
-                'thumbnail': r're:^https?://.*\.jpe?g',
-                'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
-                'upload_date': '20170909',
-                'timestamp': 1504915200,
-            },
-            'add_ie': [ZypeIE.ie_key()],
-            'params': {
-                'skip_download': True,
-            },
-        },
+        # {
+        #     # Zype embed
+        #     'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
+        #     'info_dict': {
+        #         'id': '5b400b834b32992a310622b9',
+        #         'ext': 'mp4',
+        #         'title': 'Smoky Barbecue Favorites',
+        #         'thumbnail': r're:^https?://.*\.jpe?g',
+        #         'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
+        #         'upload_date': '20170909',
+        #         'timestamp': 1504915200,
+        #     },
+        #     'add_ie': [ZypeIE.ie_key()],
+        #     'params': {
+        #         'skip_download': True,
+        #     },
+        # },
         {
             # videojs embed
             'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',

From 5bd7ad2e811c9bda2e889156bc261a76e9f086ed Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Tue, 8 Dec 2020 01:12:00 +0700
Subject: [PATCH 323/340] [youtube:tab] Capture and output alerts (closes
 #27340)

---
 youtube_dl/extractor/youtube.py | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index b8219a0ad..173f5a2d8 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3060,6 +3060,24 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                         try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
         return uploader
 
+    @staticmethod
+    def _extract_alert(data):
+        alerts = []
+        for alert in try_get(data, lambda x: x['alerts'], list) or []:
+            if not isinstance(alert, dict):
+                continue
+            alert_text = try_get(
+                alert, lambda x: x['alertRenderer']['text'], dict)
+            if not alert_text:
+                continue
+            text = try_get(
+                alert_text,
+                (lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
+                compat_str)
+            if text:
+                alerts.append(text)
+        return '\n'.join(alerts)
+
     def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
         selected_tab = self._extract_selected_tab(tabs)
         renderer = try_get(
@@ -3127,6 +3145,10 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             compat_str) or video_id
         if video_id:
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+        # Capture and output alerts
+        alert = self._extract_alert(data)
+        if alert:
+            raise ExtractorError(alert, expected=True)
         # Failed to recognize
         raise ExtractorError('Unable to recognize tab page')
 

From 07333d00626fe47c4491f762f4d77b51cd69a1b9 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 8 Dec 2020 00:31:47 +0100
Subject: [PATCH 324/340] [telequebec] Fix Extraction and Add Support for
 video.telequebec.tv

closes #25733
closes #26883
closes #27339
---
 youtube_dl/extractor/extractors.py |   1 +
 youtube_dl/extractor/telequebec.py | 160 ++++++++++++++++-------------
 2 files changed, 88 insertions(+), 73 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 833d5d6aa..86589a059 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1152,6 +1152,7 @@ from .telequebec import (
     TeleQuebecSquatIE,
     TeleQuebecEmissionIE,
     TeleQuebecLiveIE,
+    TeleQuebecVideoIE,
 )
 from .teletask import TeleTaskIE
 from .telewebion import TelewebionIE
diff --git a/youtube_dl/extractor/telequebec.py b/youtube_dl/extractor/telequebec.py
index b4c485b9b..800d87b70 100644
--- a/youtube_dl/extractor/telequebec.py
+++ b/youtube_dl/extractor/telequebec.py
@@ -12,25 +12,16 @@ from ..utils import (
 
 
 class TeleQuebecBaseIE(InfoExtractor):
+    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
+
     @staticmethod
-    def _result(url, ie_key):
+    def _brightcove_result(brightcove_id, player_id, account_id='6150020952001'):
         return {
             '_type': 'url_transparent',
-            'url': smuggle_url(url, {'geo_countries': ['CA']}),
-            'ie_key': ie_key,
+            'url': smuggle_url(TeleQuebecBaseIE.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, brightcove_id), {'geo_countries': ['CA']}),
+            'ie_key': 'BrightcoveNew',
         }
 
-    @staticmethod
-    def _limelight_result(media_id):
-        return TeleQuebecBaseIE._result(
-            'limelight:media:' + media_id, 'LimelightMedia')
-
-    @staticmethod
-    def _brightcove_result(brightcove_id):
-        return TeleQuebecBaseIE._result(
-            'http://players.brightcove.net/6150020952001/default_default/index.html?videoId=%s'
-            % brightcove_id, 'BrightcoveNew')
-
 
 class TeleQuebecIE(TeleQuebecBaseIE):
     _VALID_URL = r'''(?x)
@@ -44,14 +35,18 @@ class TeleQuebecIE(TeleQuebecBaseIE):
         # available till 01.01.2023
         'url': 'http://zonevideo.telequebec.tv/media/37578/un-petit-choc-et-puis-repart/un-chef-a-la-cabane',
         'info_dict': {
-            'id': '577116881b4b439084e6b1cf4ef8b1b3',
+            'id': '6155972771001',
             'ext': 'mp4',
             'title': 'Un petit choc et puis repart!',
-            'description': 'md5:067bc84bd6afecad85e69d1000730907',
+            'description': 'md5:b04a7e6b3f74e32d7b294cffe8658374',
+            'timestamp': 1589262469,
+            'uploader_id': '6150020952001',
+            'upload_date': '20200512',
         },
         'params': {
-            'skip_download': True,
+            'format': 'bestvideo',
         },
+        'add_ie': ['BrightcoveNew'],
     }, {
         'url': 'https://zonevideo.telequebec.tv/media/55267/le-soleil/passe-partout',
         'info_dict': {
@@ -65,7 +60,6 @@ class TeleQuebecIE(TeleQuebecBaseIE):
         },
         'params': {
             'format': 'bestvideo',
-            'skip_download': True,
         },
         'add_ie': ['BrightcoveNew'],
     }, {
@@ -79,25 +73,20 @@ class TeleQuebecIE(TeleQuebecBaseIE):
 
     def _real_extract(self, url):
         media_id = self._match_id(url)
-
-        media_data = self._download_json(
-            'https://mnmedias.api.telequebec.tv/api/v2/media/' + media_id,
+        media = self._download_json(
+            'https://mnmedias.api.telequebec.tv/api/v3/media/' + media_id,
             media_id)['media']
-
-        source_id = media_data['streamInfo']['sourceId']
-        source = (try_get(
-            media_data, lambda x: x['streamInfo']['source'],
-            compat_str) or 'limelight').lower()
-        if source == 'brightcove':
-            info = self._brightcove_result(source_id)
-        else:
-            info = self._limelight_result(source_id)
+        source_id = next(source_info['sourceId'] for source_info in media['streamInfos'] if source_info.get('source') == 'Brightcove')
+        info = self._brightcove_result(source_id, '22gPKdt7f')
+        product = media.get('product') or {}
+        season = product.get('season') or {}
         info.update({
-            'title': media_data.get('title'),
-            'description': try_get(
-                media_data, lambda x: x['descriptions'][0]['text'], compat_str),
-            'duration': int_or_none(
-                media_data.get('durationInMilliseconds'), 1000),
+            'description': try_get(media, lambda x: x['descriptions'][-1]['text'], compat_str),
+            'series': try_get(season, lambda x: x['serie']['titre']),
+            'season': season.get('name'),
+            'season_number': int_or_none(season.get('seasonNo')),
+            'episode': product.get('titre'),
+            'episode_number': int_or_none(product.get('episodeNo')),
         })
         return info
 
@@ -148,7 +137,7 @@ class TeleQuebecSquatIE(InfoExtractor):
         }
 
 
-class TeleQuebecEmissionIE(TeleQuebecBaseIE):
+class TeleQuebecEmissionIE(InfoExtractor):
     _VALID_URL = r'''(?x)
                     https?://
                         (?:
@@ -160,15 +149,16 @@ class TeleQuebecEmissionIE(TeleQuebecBaseIE):
     _TESTS = [{
         'url': 'http://lindicemcsween.telequebec.tv/emissions/100430013/des-soins-esthetiques-a-377-d-interets-annuels-ca-vous-tente',
         'info_dict': {
-            'id': '66648a6aef914fe3badda25e81a4d50a',
+            'id': '6154476028001',
             'ext': 'mp4',
-            'title': "Des soins esthétiques à 377 % d'intérêts annuels, ça vous tente?",
-            'description': 'md5:369e0d55d0083f1fc9b71ffb640ea014',
-            'upload_date': '20171024',
-            'timestamp': 1508862118,
+            'title': 'Des soins esthétiques à 377 % d’intérêts annuels, ça vous tente?',
+            'description': 'md5:cb4d378e073fae6cce1f87c00f84ae9f',
+            'upload_date': '20200505',
+            'timestamp': 1588713424,
+            'uploader_id': '6150020952001',
         },
         'params': {
-            'skip_download': True,
+            'format': 'bestvideo',
         },
     }, {
         'url': 'http://bancpublic.telequebec.tv/emissions/emission-49/31986/jeunes-meres-sous-pression',
@@ -187,26 +177,26 @@ class TeleQuebecEmissionIE(TeleQuebecBaseIE):
         webpage = self._download_webpage(url, display_id)
 
         media_id = self._search_regex(
-            r'mediaUID\s*:\s*["\'][Ll]imelight_(?P<id>[a-z0-9]{32})', webpage,
-            'limelight id')
+            r'mediaId\s*:\s*(?P<id>\d+)', webpage, 'media id')
 
-        info = self._limelight_result(media_id)
-        info.update({
-            'title': self._og_search_title(webpage, default=None),
-            'description': self._og_search_description(webpage, default=None),
-        })
-        return info
+        return self.url_result(
+            'http://zonevideo.telequebec.tv/media/' + media_id,
+            TeleQuebecIE.ie_key())
 
 
-class TeleQuebecLiveIE(InfoExtractor):
+class TeleQuebecLiveIE(TeleQuebecBaseIE):
     _VALID_URL = r'https?://zonevideo\.telequebec\.tv/(?P<id>endirect)'
     _TEST = {
         'url': 'http://zonevideo.telequebec.tv/endirect/',
         'info_dict': {
-            'id': 'endirect',
+            'id': '6159095684001',
             'ext': 'mp4',
-            'title': 're:^Télé-Québec - En direct [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+            'title': 're:^Télé-Québec [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
             'is_live': True,
+            'description': 'Canal principal de Télé-Québec',
+            'uploader_id': '6150020952001',
+            'timestamp': 1590439901,
+            'upload_date': '20200525',
         },
         'params': {
             'skip_download': True,
@@ -214,25 +204,49 @@ class TeleQuebecLiveIE(InfoExtractor):
     }
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
+        return self._brightcove_result('6159095684001', 'skCsmi2Uw')
 
-        m3u8_url = None
-        webpage = self._download_webpage(
-            'https://player.telequebec.tv/Tq_VideoPlayer.js', video_id,
-            fatal=False)
-        if webpage:
-            m3u8_url = self._search_regex(
-                r'm3U8Url\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
-                'm3u8 url', default=None, group='url')
-        if not m3u8_url:
-            m3u8_url = 'https://teleqmmd.mmdlive.lldns.net/teleqmmd/f386e3b206814e1f8c8c1c71c0f8e748/manifest.m3u8'
-        formats = self._extract_m3u8_formats(
-            m3u8_url, video_id, 'mp4', m3u8_id='hls')
-        self._sort_formats(formats)
 
-        return {
-            'id': video_id,
-            'title': self._live_title('Télé-Québec - En direct'),
-            'is_live': True,
-            'formats': formats,
-        }
+class TeleQuebecVideoIE(TeleQuebecBaseIE):
+    _VALID_URL = r'https?://video\.telequebec\.tv/player(?:-live)?/(?P<id>\d+)'
+    _TESTS = [{
+        'url': 'https://video.telequebec.tv/player/31110/stream',
+        'info_dict': {
+            'id': '6202570652001',
+            'ext': 'mp4',
+            'title': 'Le coût du véhicule le plus vendu au Canada / Tous les frais liés à la procréation assistée',
+            'description': 'md5:685a7e4c450ba777c60adb6e71e41526',
+            'upload_date': '20201019',
+            'timestamp': 1603115930,
+            'uploader_id': '6101674910001',
+        },
+        'params': {
+            'format': 'bestvideo',
+        },
+    }, {
+        'url': 'https://video.telequebec.tv/player-live/28527',
+        'only_matching': True,
+    }]
+
+    def _call_api(self, path, video_id):
+        return self._download_json(
+            'http://beacon.playback.api.brightcove.com/telequebec/api/assets/' + path,
+            video_id, query={'device_layout': 'web', 'device_type': 'web'})['data']
+
+    def _real_extract(self, url):
+        asset_id = self._match_id(url)
+        asset = self._call_api(asset_id, asset_id)['asset']
+        stream = self._call_api(
+            asset_id + '/streams/' + asset['streams'][0]['id'], asset_id)['stream']
+        stream_url = stream['url']
+        account_id = try_get(
+            stream, lambda x: x['video_provider_details']['account_id']) or '6101674910001'
+        info = self._brightcove_result(stream_url, 'default', account_id)
+        info.update({
+            'description': asset.get('long_description') or asset.get('short_description'),
+            'series': asset.get('series_original_name'),
+            'season_number': int_or_none(asset.get('season_number')),
+            'episode': asset.get('original_name'),
+            'episode_number': int_or_none(asset.get('episode_number')),
+        })
+        return info

From e7eff914cd3ffd8bc2975100036fdac9829dd16d Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 8 Dec 2020 10:52:52 +0100
Subject: [PATCH 325/340] [lbry] add support for short and embed URLs and fix
 channel metadata extraction

---
 youtube_dl/extractor/lbry.py | 44 +++++++++++++++++++++++++++++-------
 1 file changed, 36 insertions(+), 8 deletions(-)

diff --git a/youtube_dl/extractor/lbry.py b/youtube_dl/extractor/lbry.py
index 6177297ab..4fd50a60d 100644
--- a/youtube_dl/extractor/lbry.py
+++ b/youtube_dl/extractor/lbry.py
@@ -11,12 +11,14 @@ from ..utils import (
     int_or_none,
     mimetype2ext,
     try_get,
+    urljoin,
 )
 
 
 class LBRYIE(InfoExtractor):
     IE_NAME = 'lbry.tv'
-    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[^:]+:[0-9a-z]+/[^:]+:[0-9a-z])'
+    _CLAIM_ID_REGEX = r'[0-9a-f]{1,40}'
+    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[^:]+:{0}/[^:]+:{0}|[^:]+:{0}|\$/embed/[^/]+/{0})'.format(_CLAIM_ID_REGEX)
     _TESTS = [{
         # Video
         'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -40,6 +42,11 @@ class LBRYIE(InfoExtractor):
             'description': 'md5:661ac4f1db09f31728931d7b88807a61',
             'timestamp': 1591312601,
             'upload_date': '20200604',
+            'tags': list,
+            'duration': 2570,
+            'channel': 'The LBRY Foundation',
+            'channel_id': '0ed629d2b9c601300cacf7eabe9da0be79010212',
+            'channel_url': 'https://lbry.tv/@LBRYFoundation:0ed629d2b9c601300cacf7eabe9da0be79010212',
         }
     }, {
         'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
@@ -47,6 +54,15 @@ class LBRYIE(InfoExtractor):
     }, {
         'url': "https://odysee.com/@ScammerRevolts:b0/I-SYSKEY'D-THE-SAME-SCAMMERS-3-TIMES!:b",
         'only_matching': True,
+    }, {
+        'url': 'https://lbry.tv/Episode-1:e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
+        'only_matching': True,
+    }, {
+        'url': 'https://lbry.tv/$/embed/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
+        'only_matching': True,
+    }, {
+        'url': 'https://lbry.tv/Episode-1:e7',
+        'only_matching': True,
     }]
 
     def _call_api_proxy(self, method, display_id, params):
@@ -59,22 +75,33 @@ class LBRYIE(InfoExtractor):
             }).encode())['result']
 
     def _real_extract(self, url):
-        display_id = self._match_id(url).replace(':', '#')
+        display_id = self._match_id(url)
+        if display_id.startswith('$/embed/'):
+            display_id = display_id[8:].replace('/', ':')
+        else:
+            display_id = display_id.replace(':', '#')
         uri = 'lbry://' + display_id
         result = self._call_api_proxy(
             'resolve', display_id, {'urls': [uri]})[uri]
         result_value = result['value']
         if result_value.get('stream_type') not in ('video', 'audio'):
             raise ExtractorError('Unsupported URL', expected=True)
+        claim_id = result['claim_id']
+        title = result_value['title']
         streaming_url = self._call_api_proxy(
-            'get', display_id, {'uri': uri})['streaming_url']
+            'get', claim_id, {'uri': uri})['streaming_url']
         source = result_value.get('source') or {}
         media = result_value.get('video') or result_value.get('audio') or {}
-        signing_channel = result_value.get('signing_channel') or {}
+        signing_channel = result.get('signing_channel') or {}
+        channel_name = signing_channel.get('name')
+        channel_claim_id = signing_channel.get('claim_id')
+        channel_url = None
+        if channel_name and channel_claim_id:
+            channel_url = urljoin(url, '/%s:%s' % (channel_name, channel_claim_id))
 
         return {
-            'id': result['claim_id'],
-            'title': result_value['title'],
+            'id': claim_id,
+            'title': title,
             'thumbnail': try_get(result_value, lambda x: x['thumbnail']['url'], compat_str),
             'description': result_value.get('description'),
             'license': result_value.get('license'),
@@ -83,8 +110,9 @@ class LBRYIE(InfoExtractor):
             'width': int_or_none(media.get('width')),
             'height': int_or_none(media.get('height')),
             'duration': int_or_none(media.get('duration')),
-            'channel': signing_channel.get('name'),
-            'channel_id': signing_channel.get('claim_id'),
+            'channel': try_get(signing_channel, lambda x: x['value']['title']),
+            'channel_id': channel_claim_id,
+            'channel_url': channel_url,
             'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
             'filesize': int_or_none(source.get('size')),
             'url': streaming_url,

From c368dc98e06961a84b0751541ee2869c352253a8 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 8 Dec 2020 14:53:22 +0100
Subject: [PATCH 326/340] [lbry] add support for channel extraction(closes
 #25584)

---
 youtube_dl/extractor/extractors.py |   5 +-
 youtube_dl/extractor/lbry.py       | 179 ++++++++++++++++++++++-------
 2 files changed, 141 insertions(+), 43 deletions(-)

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 86589a059..a94502eb3 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -534,7 +534,10 @@ from .laola1tv import (
     EHFTVIE,
     ITTFIE,
 )
-from .lbry import LBRYIE
+from .lbry import (
+    LBRYIE,
+    LBRYChannelIE,
+)
 from .lci import LCIIE
 from .lcp import (
     LcpPlayIE,
diff --git a/youtube_dl/extractor/lbry.py b/youtube_dl/extractor/lbry.py
index 4fd50a60d..41cc245eb 100644
--- a/youtube_dl/extractor/lbry.py
+++ b/youtube_dl/extractor/lbry.py
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import functools
 import json
 
 from .common import InfoExtractor
@@ -10,15 +11,73 @@ from ..utils import (
     ExtractorError,
     int_or_none,
     mimetype2ext,
+    OnDemandPagedList,
     try_get,
     urljoin,
 )
 
 
-class LBRYIE(InfoExtractor):
-    IE_NAME = 'lbry.tv'
+class LBRYBaseIE(InfoExtractor):
+    _BASE_URL_REGEX = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/'
     _CLAIM_ID_REGEX = r'[0-9a-f]{1,40}'
-    _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[^:]+:{0}/[^:]+:{0}|[^:]+:{0}|\$/embed/[^/]+/{0})'.format(_CLAIM_ID_REGEX)
+    _OPT_CLAIM_ID = '[^:/?#&]+(?::%s)?' % _CLAIM_ID_REGEX
+    _SUPPORTED_STREAM_TYPES = ['video', 'audio']
+
+    def _call_api_proxy(self, method, display_id, params, resource):
+        return self._download_json(
+            'https://api.lbry.tv/api/v1/proxy',
+            display_id, 'Downloading %s JSON metadata' % resource,
+            headers={'Content-Type': 'application/json-rpc'},
+            data=json.dumps({
+                'method': method,
+                'params': params,
+            }).encode())['result']
+
+    def _resolve_url(self, url, display_id, resource):
+        return self._call_api_proxy(
+            'resolve', display_id, {'urls': url}, resource)[url]
+
+    def _permanent_url(self, url, claim_name, claim_id):
+        return urljoin(url, '/%s:%s' % (claim_name, claim_id))
+
+    def _parse_stream(self, stream, url):
+        stream_value = stream.get('value') or {}
+        stream_type = stream_value.get('stream_type')
+        source = stream_value.get('source') or {}
+        media = stream_value.get(stream_type) or {}
+        signing_channel = stream.get('signing_channel') or {}
+        channel_name = signing_channel.get('name')
+        channel_claim_id = signing_channel.get('claim_id')
+        channel_url = None
+        if channel_name and channel_claim_id:
+            channel_url = self._permanent_url(url, channel_name, channel_claim_id)
+
+        info = {
+            'thumbnail': try_get(stream_value, lambda x: x['thumbnail']['url'], compat_str),
+            'description': stream_value.get('description'),
+            'license': stream_value.get('license'),
+            'timestamp': int_or_none(stream.get('timestamp')),
+            'tags': stream_value.get('tags'),
+            'duration': int_or_none(media.get('duration')),
+            'channel': try_get(signing_channel, lambda x: x['value']['title']),
+            'channel_id': channel_claim_id,
+            'channel_url': channel_url,
+            'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
+            'filesize': int_or_none(source.get('size')),
+        }
+        if stream_type == 'audio':
+            info['vcodec'] = 'none'
+        else:
+            info.update({
+                'width': int_or_none(media.get('width')),
+                'height': int_or_none(media.get('height')),
+            })
+        return info
+
+
+class LBRYIE(LBRYBaseIE):
+    IE_NAME = 'lbry'
+    _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + r'(?P<id>\$/[^/]+/[^/]+/{1}|@{0}/{0}|(?!@){0})'.format(LBRYBaseIE._OPT_CLAIM_ID, LBRYBaseIE._CLAIM_ID_REGEX)
     _TESTS = [{
         # Video
         'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -30,6 +89,8 @@ class LBRYIE(InfoExtractor):
             'description': 'md5:f6cb5c704b332d37f5119313c2c98f51',
             'timestamp': 1595694354,
             'upload_date': '20200725',
+            'width': 1280,
+            'height': 720,
         }
     }, {
         # Audio
@@ -47,6 +108,7 @@ class LBRYIE(InfoExtractor):
             'channel': 'The LBRY Foundation',
             'channel_id': '0ed629d2b9c601300cacf7eabe9da0be79010212',
             'channel_url': 'https://lbry.tv/@LBRYFoundation:0ed629d2b9c601300cacf7eabe9da0be79010212',
+            'vcodec': 'none',
         }
     }, {
         'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
@@ -63,57 +125,90 @@ class LBRYIE(InfoExtractor):
     }, {
         'url': 'https://lbry.tv/Episode-1:e7',
         'only_matching': True,
+    }, {
+        'url': 'https://lbry.tv/@LBRYFoundation/Episode-1',
+        'only_matching': True,
+    }, {
+        'url': 'https://lbry.tv/$/download/Episode-1/e7d93d772bd87e2b62d5ab993c1c3ced86ebb396',
+        'only_matching': True,
     }]
 
-    def _call_api_proxy(self, method, display_id, params):
-        return self._download_json(
-            'https://api.lbry.tv/api/v1/proxy', display_id,
-            headers={'Content-Type': 'application/json-rpc'},
-            data=json.dumps({
-                'method': method,
-                'params': params,
-            }).encode())['result']
-
     def _real_extract(self, url):
         display_id = self._match_id(url)
-        if display_id.startswith('$/embed/'):
-            display_id = display_id[8:].replace('/', ':')
+        if display_id.startswith('$/'):
+            display_id = display_id.split('/', 2)[-1].replace('/', ':')
         else:
             display_id = display_id.replace(':', '#')
         uri = 'lbry://' + display_id
-        result = self._call_api_proxy(
-            'resolve', display_id, {'urls': [uri]})[uri]
+        result = self._resolve_url(uri, display_id, 'stream')
         result_value = result['value']
-        if result_value.get('stream_type') not in ('video', 'audio'):
+        if result_value.get('stream_type') not in self._SUPPORTED_STREAM_TYPES:
             raise ExtractorError('Unsupported URL', expected=True)
         claim_id = result['claim_id']
         title = result_value['title']
         streaming_url = self._call_api_proxy(
-            'get', claim_id, {'uri': uri})['streaming_url']
-        source = result_value.get('source') or {}
-        media = result_value.get('video') or result_value.get('audio') or {}
-        signing_channel = result.get('signing_channel') or {}
-        channel_name = signing_channel.get('name')
-        channel_claim_id = signing_channel.get('claim_id')
-        channel_url = None
-        if channel_name and channel_claim_id:
-            channel_url = urljoin(url, '/%s:%s' % (channel_name, channel_claim_id))
-
-        return {
+            'get', claim_id, {'uri': uri}, 'streaming url')['streaming_url']
+        info = self._parse_stream(result, url)
+        info.update({
             'id': claim_id,
             'title': title,
-            'thumbnail': try_get(result_value, lambda x: x['thumbnail']['url'], compat_str),
-            'description': result_value.get('description'),
-            'license': result_value.get('license'),
-            'timestamp': int_or_none(result.get('timestamp')),
-            'tags': result_value.get('tags'),
-            'width': int_or_none(media.get('width')),
-            'height': int_or_none(media.get('height')),
-            'duration': int_or_none(media.get('duration')),
-            'channel': try_get(signing_channel, lambda x: x['value']['title']),
-            'channel_id': channel_claim_id,
-            'channel_url': channel_url,
-            'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
-            'filesize': int_or_none(source.get('size')),
             'url': streaming_url,
-        }
+        })
+        return info
+
+
+class LBRYChannelIE(LBRYBaseIE):
+    IE_NAME = 'lbry:channel'
+    _VALID_URL = LBRYBaseIE._BASE_URL_REGEX + r'(?P<id>@%s)/?(?:[?#&]|$)' % LBRYBaseIE._OPT_CLAIM_ID
+    _TESTS = [{
+        'url': 'https://lbry.tv/@LBRYFoundation:0',
+        'info_dict': {
+            'id': '0ed629d2b9c601300cacf7eabe9da0be79010212',
+            'title': 'The LBRY Foundation',
+            'description': 'Channel for the LBRY Foundation. Follow for updates and news.',
+        },
+        'playlist_count': 29,
+    }, {
+        'url': 'https://lbry.tv/@LBRYFoundation',
+        'only_matching': True,
+    }]
+    _PAGE_SIZE = 50
+
+    def _fetch_page(self, claim_id, url, page):
+        page += 1
+        result = self._call_api_proxy(
+            'claim_search', claim_id, {
+                'channel_ids': [claim_id],
+                'claim_type': 'stream',
+                'no_totals': True,
+                'page': page,
+                'page_size': self._PAGE_SIZE,
+                'stream_types': self._SUPPORTED_STREAM_TYPES,
+            }, 'page %d' % page)
+        for item in (result.get('items') or []):
+            stream_claim_name = item.get('name')
+            stream_claim_id = item.get('claim_id')
+            if not (stream_claim_name and stream_claim_id):
+                continue
+
+            info = self._parse_stream(item, url)
+            info.update({
+                '_type': 'url',
+                'id': stream_claim_id,
+                'title': try_get(item, lambda x: x['value']['title']),
+                'url': self._permanent_url(url, stream_claim_name, stream_claim_id),
+            })
+            yield info
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url).replace(':', '#')
+        result = self._resolve_url(
+            'lbry://' + display_id, display_id, 'channel')
+        claim_id = result['claim_id']
+        entries = OnDemandPagedList(
+            functools.partial(self._fetch_page, claim_id, url),
+            self._PAGE_SIZE)
+        result_value = result.get('value') or {}
+        return self.playlist_result(
+            entries, claim_id, result_value.get('title'),
+            result_value.get('description'))

From e2bdf8bf4f3de7698d1d2844687e3acc760b34e7 Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Tue, 8 Dec 2020 17:11:28 +0100
Subject: [PATCH 327/340] [amcnetworks] Fix free content extraction(closes
 #20354)

---
 youtube_dl/extractor/amcnetworks.py | 50 ++++++++++++++---------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/youtube_dl/extractor/amcnetworks.py b/youtube_dl/extractor/amcnetworks.py
index 6fb3d6c53..12b6de0bf 100644
--- a/youtube_dl/extractor/amcnetworks.py
+++ b/youtube_dl/extractor/amcnetworks.py
@@ -1,6 +1,8 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
+
 from .theplatform import ThePlatformIE
 from ..utils import (
     int_or_none,
@@ -11,25 +13,22 @@ from ..utils import (
 
 
 class AMCNetworksIE(ThePlatformIE):
-    _VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<site>amc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?P<id>(?:movies|shows(?:/[^/]+)+)/[^/?#&]+)'
     _TESTS = [{
-        'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
-        'md5': '',
+        'url': 'https://www.bbcamerica.com/shows/the-graham-norton-show/videos/tina-feys-adorable-airline-themed-family-dinner--51631',
         'info_dict': {
-            'id': 's3MX01Nl4vPH',
+            'id': '4Lq1dzOnZGt0',
             'ext': 'mp4',
-            'title': 'Maron - Season 4 - Step 1',
-            'description': 'In denial about his current situation, Marc is reluctantly convinced by his friends to enter rehab. Starring Marc Maron and Constance Zimmer.',
-            'age_limit': 17,
-            'upload_date': '20160505',
-            'timestamp': 1462468831,
+            'title': "The Graham Norton Show - Season 28 - Tina Fey's Adorable Airline-Themed Family Dinner",
+            'description': "It turns out child stewardesses are very generous with the wine! All-new episodes of 'The Graham Norton Show' premiere Fridays at 11/10c on BBC America.",
+            'upload_date': '20201120',
+            'timestamp': 1605904350,
             'uploader': 'AMCN',
         },
         'params': {
             # m3u8 download
             'skip_download': True,
         },
-        'skip': 'Requires TV provider accounts',
     }, {
         'url': 'http://www.bbcamerica.com/shows/the-hunt/full-episodes/season-1/episode-01-the-hardest-challenge',
         'only_matching': True,
@@ -55,32 +54,33 @@ class AMCNetworksIE(ThePlatformIE):
         'url': 'https://www.sundancetv.com/shows/riviera/full-episodes/season-1/episode-01-episode-1',
         'only_matching': True,
     }]
+    _REQUESTOR_ID_MAP = {
+        'amc': 'AMC',
+        'bbcamerica': 'BBCA',
+        'ifc': 'IFC',
+        'sundancetv': 'SUNDANCE',
+        'wetv': 'WETV',
+    }
 
     def _real_extract(self, url):
-        display_id = self._match_id(url)
-        webpage = self._download_webpage(url, display_id)
+        site, display_id = re.match(self._VALID_URL, url).groups()
+        requestor_id = self._REQUESTOR_ID_MAP[site]
+        properties = self._download_json(
+            'https://content-delivery-gw.svc.ds.amcn.com/api/v2/content/amcn/%s/url/%s' % (requestor_id.lower(), display_id),
+            display_id)['data']['properties']
         query = {
             'mbr': 'true',
             'manifest': 'm3u',
         }
-        media_url = self._search_regex(
-            r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)',
-            webpage, 'media url')
-        theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
-            r'link\.theplatform\.com/s/([^?]+)',
-            media_url, 'theplatform_path'), display_id)
+        tp_path = 'M_UwQC/media/' + properties['videoPid']
+        media_url = 'https://link.theplatform.com/s/' + tp_path
+        theplatform_metadata = self._download_theplatform_metadata(tp_path, display_id)
         info = self._parse_theplatform_metadata(theplatform_metadata)
         video_id = theplatform_metadata['pid']
         title = theplatform_metadata['title']
         rating = try_get(
             theplatform_metadata, lambda x: x['ratings'][0]['rating'])
-        auth_required = self._search_regex(
-            r'window\.authRequired\s*=\s*(true|false);',
-            webpage, 'auth required')
-        if auth_required == 'true':
-            requestor_id = self._search_regex(
-                r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)',
-                webpage, 'requestor id')
+        if properties.get('videoCategory') == 'TVE-Auth':
             resource = self._get_mvpd_resource(
                 requestor_id, title, video_id, rating)
             query['auth'] = self._extract_mvpd_auth(

From 5a1fbbf8b7215aab0e6382e93eaa1561093352cf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 00:05:21 +0700
Subject: [PATCH 328/340] [extractor/common] Fix inline HTML5 media tags
 processing and add test (closes #27345)

---
 test/test_InfoExtractor.py     | 12 ++++++++++++
 youtube_dl/extractor/common.py |  6 +++---
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py
index 71f6608fe..644b3759c 100644
--- a/test/test_InfoExtractor.py
+++ b/test/test_InfoExtractor.py
@@ -108,6 +108,18 @@ class TestInfoExtractor(unittest.TestCase):
         self.assertEqual(self.ie._download_json(uri, None, fatal=False), None)
 
     def test_parse_html5_media_entries(self):
+        # inline video tag
+        expect_dict(
+            self,
+            self.ie._parse_html5_media_entries(
+                'https://127.0.0.1/video.html',
+                r'<html><video src="/vid.mp4" /></html>', None)[0],
+            {
+                'formats': [{
+                    'url': 'https://127.0.0.1/vid.mp4',
+                }],
+            })
+
         # from https://www.r18.com/
         # with kpbs in label
         expect_dict(
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index dd07a1cae..74e40fabb 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -2515,9 +2515,9 @@ class InfoExtractor(object):
         # https://www.ampproject.org/docs/reference/components/amp-video)
         # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
         _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
-        media_tags = [(media_tag, media_type, '')
-                      for media_tag, media_type
-                      in re.findall(r'(?s)(<%s[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
+        media_tags = [(media_tag, media_tag_name, media_type, '')
+                      for media_tag, media_tag_name, media_type
+                      in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
         media_tags.extend(re.findall(
             # We only allow video|audio followed by a whitespace or '>'.
             # Allowing more characters may end up in significant slow down (see

From 9d8d0f8b4a65ea05c4bd9a9720c07d2c7f3eaf03 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sone=C3=A9=20John?= <sonee@alphasoftware.co>
Date: Tue, 8 Dec 2020 13:41:47 -0400
Subject: [PATCH 329/340] [youtube] Remove unused code (#27343)

---
 youtube_dl/extractor/youtube.py | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 173f5a2d8..438a93b17 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -67,11 +67,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
 
     _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
 
-    _YOUTUBE_CLIENT_HEADERS = {
-        'x-youtube-client-name': '1',
-        'x-youtube-client-version': '1.20200609.04.02',
-    }
-
     def _set_language(self):
         self._set_cookie(
             '.youtube.com', 'PREF', 'f1=50000000&f6=8&hl=en',

From 644c3ef886c3e7c4974658202c2761cc6570f93a Mon Sep 17 00:00:00 2001
From: EntranceJew <EntranceJew@gmail.com>
Date: Tue, 8 Dec 2020 12:14:46 -0600
Subject: [PATCH 330/340] [tubitv] Extract release year (#27317)

---
 youtube_dl/extractor/tubitv.py | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index a51fa6515..ebfb05c63 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -33,6 +33,19 @@ class TubiTvIE(InfoExtractor):
     }, {
         'url': 'http://tubitv.com/movies/383676/tracker',
         'only_matching': True,
+    }, {
+        'url': 'https://tubitv.com/movies/560057/penitentiary?start=true',
+        'info_dict': {
+            'id': '560057',
+            'ext': 'mp4',
+            'title': 'Penitentiary',
+            'description': 'md5:8d2fc793a93cc1575ff426fdcb8dd3f9',
+            'uploader_id': 'd8fed30d4f24fcb22ec294421b9defc2',
+            'release_year': 1979,
+        },
+        'params': {
+            'skip_download': True,
+        },
     }]
 
     def _login(self):
@@ -93,4 +106,5 @@ class TubiTvIE(InfoExtractor):
             'description': video_data.get('description'),
             'duration': int_or_none(video_data.get('duration')),
             'uploader_id': video_data.get('publisher_id'),
+            'release_year': int_or_none(video_data.get('year')),
         }

From e00b8f60d4ad662d1737e80f154f7afeb738494c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 03:40:02 +0700
Subject: [PATCH 331/340] [youtube:tab] Delegate inline playlists to tab-based
 playlists (closes #27298)

---
 youtube_dl/extractor/youtube.py | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 438a93b17..a3f75b626 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2688,6 +2688,10 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         # no longer available?
         'url': 'https://www.youtube.com/feed/recommended',
         'only_matching': True,
+    }, {
+        # inline playlist with not always working continuations
+        'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
+        'only_matching': True,
     }
         # TODO
         # {
@@ -3099,10 +3103,20 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         playlist.update(self._extract_uploader(data))
         return playlist
 
-    def _extract_from_playlist(self, item_id, data, playlist):
+    def _extract_from_playlist(self, item_id, url, data, playlist):
         title = playlist.get('title') or try_get(
             data, lambda x: x['titleText']['simpleText'], compat_str)
         playlist_id = playlist.get('playlistId') or item_id
+        # Inline playlist rendition continuation does not always work
+        # at Youtube side, so delegating regular tab-based playlist URL
+        # processing whenever possible.
+        playlist_url = urljoin(url, try_get(
+            playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
+            compat_str))
+        if playlist_url and playlist_url != url:
+            return self.url_result(
+                playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
+                video_title=title)
         return self.playlist_result(
             self._playlist_entries(playlist), playlist_id=playlist_id,
             playlist_title=title)
@@ -3132,7 +3146,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         playlist = try_get(
             data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
         if playlist:
-            return self._extract_from_playlist(item_id, data, playlist)
+            return self._extract_from_playlist(item_id, url, data, playlist)
         # Fallback to video extraction if no playlist alike page is recognized.
         # First check for the current video then try the v attribute of URL query.
         video_id = try_get(

From e029da9addef9e41ec9db4a85165ef8e1e352636 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 04:02:50 +0700
Subject: [PATCH 332/340] [youtube:tab] Make click tracking params on
 continuation optional

---
 youtube_dl/extractor/youtube.py | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index a3f75b626..052926ee5 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2870,6 +2870,16 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             for entry in self._post_thread_entries(renderer):
                 yield entry
 
+    @staticmethod
+    def _build_continuation_query(continuation, ctp=None):
+        query = {
+            'ctoken': continuation,
+            'continuation': continuation,
+        }
+        if ctp:
+            query['itct'] = ctp
+        return query
+
     @staticmethod
     def _extract_next_continuation_data(renderer):
         next_continuation = try_get(
@@ -2880,11 +2890,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
         if not continuation:
             return
         ctp = next_continuation.get('clickTrackingParams')
-        return {
-            'ctoken': continuation,
-            'continuation': continuation,
-            'itct': ctp,
-        }
+        return YoutubeTabIE._build_continuation_query(continuation, ctp)
 
     @classmethod
     def _extract_continuation(cls, renderer):
@@ -2907,13 +2913,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             if not continuation:
                 continue
             ctp = continuation_ep.get('clickTrackingParams')
-            if not ctp:
-                continue
-            return {
-                'ctoken': continuation,
-                'continuation': continuation,
-                'itct': ctp,
-            }
+            return YoutubeTabIE._build_continuation_query(continuation, ctp)
 
     def _entries(self, tab, identity_token):
         tab_content = try_get(tab, lambda x: x['content'], dict)

From 470cf496f5e33a24527decc736f9cb204393ea70 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 04:17:47 +0700
Subject: [PATCH 333/340] [youtube:tab] Improve identity token extraction
 (closes #27197)

---
 youtube_dl/extractor/youtube.py | 17 ++++++++++++++---
 1 file changed, 14 insertions(+), 3 deletions(-)

diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 052926ee5..1e7af3fdd 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -3121,6 +3121,19 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             self._playlist_entries(playlist), playlist_id=playlist_id,
             playlist_title=title)
 
+    def _extract_identity_token(self, webpage, item_id):
+        ytcfg = self._parse_json(
+            self._search_regex(
+                r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
+                default='{}'), item_id, fatal=False)
+        if ytcfg:
+            token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
+            if token:
+                return token
+        return self._search_regex(
+            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+            'identity token', default=None)
+
     def _real_extract(self, url):
         item_id = self._match_id(url)
         url = compat_urlparse.urlunparse(
@@ -3135,9 +3148,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
                 return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
             self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
         webpage = self._download_webpage(url, item_id)
-        identity_token = self._search_regex(
-            r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
-            'identity token', default=None)
+        identity_token = self._extract_identity_token(webpage, item_id)
         data = self._extract_yt_initial_data(item_id, webpage)
         tabs = try_get(
             data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)

From 9da0504a09f28ca4ffc92217fa347288e5a4b7ac Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 04:24:31 +0700
Subject: [PATCH 334/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index f3571e77b..339d8bd50 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,27 @@
+version <unreleaed>
+
+Core
+* [extractor/common] Fix inline HTML5 media tags processing (#27345)
+
+Extractors
+* [youtube:tab] Improve identity token extraction (#27197)
+* [youtube:tab] Make click tracking params on continuation optional
+* [youtube:tab] Delegate inline playlists to tab-based playlists (27298)
++ [tubitv] Extract release year (#27317)
+* [amcnetworks] Fix free content extraction (#20354)
++ [lbry:channel] Add support for channels (#25584)
++ [lbry] Add support for short and embed URLs
+* [lbry] Fix channel metadata extraction
++ [telequebec] Add support for video.telequebec.tv (#27339)
+* [telequebec] Fix extraction (#25733, #26883)
++ [youtube:tab] Capture and output alerts (#27340)
+* [tvplay:home] Fix extraction (#21153)
+* [americastestkitchen] Fix Extraction and add support
+  for Cook's Country and Cook's Illustrated (#17234, #27322)
++ [slideslive] Add support for yoda service videos and extract subtitles
+  (#27323)
+
+
 version 2020.12.07
 
 Core

From a4a2fa8754de4d7f9515c09c04e907c8ce199916 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Wed, 9 Dec 2020 04:25:24 +0700
Subject: [PATCH 335/340] release 2020.12.09

---
 .github/ISSUE_TEMPLATE/1_broken_site.md          | 6 +++---
 .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++--
 .github/ISSUE_TEMPLATE/4_bug_report.md           | 6 +++---
 .github/ISSUE_TEMPLATE/5_feature_request.md      | 4 ++--
 docs/supportedsites.md                           | 4 +++-
 youtube_dl/version.py                            | 2 +-
 7 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md
index f72f57d02..5aff47e75 100644
--- a/.github/ISSUE_TEMPLATE/1_broken_site.md
+++ b/.github/ISSUE_TEMPLATE/1_broken_site.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.09. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support
-- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.09**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar issues including closed ones
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.07
+ [debug] youtube-dl version 2020.12.09
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md
index 8160d257a..91c44d9b3 100644
--- a/.github/ISSUE_TEMPLATE/2_site_support_request.md
+++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md
@@ -19,7 +19,7 @@ labels: 'site-support-request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.09. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
 - Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a new site support request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.09**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that none of provided URLs violate any copyrights
 - [ ] I've searched the bugtracker for similar site support requests including closed ones
diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
index f7b6ba9ea..fec9aa55c 100644
--- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md
@@ -18,13 +18,13 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.09. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a site feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.09**
 - [ ] I've searched the bugtracker for similar site feature requests including closed ones
 
 
diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md
index efeae549f..da77bdd7e 100644
--- a/.github/ISSUE_TEMPLATE/4_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/4_bug_report.md
@@ -18,7 +18,7 @@ title: ''
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.09. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
 - Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
 - Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
 -->
 
 - [ ] I'm reporting a broken site support issue
-- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.09**
 - [ ] I've checked that all provided URLs are alive and playable in a browser
 - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
 - [ ] I've searched the bugtracker for similar bug reports including closed ones
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
  [debug] User config: []
  [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
  [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
- [debug] youtube-dl version 2020.12.07
+ [debug] youtube-dl version 2020.12.09
  [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
  [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
  [debug] Proxy map: {}
diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md
index 0435a0ddf..34dad8079 100644
--- a/.github/ISSUE_TEMPLATE/5_feature_request.md
+++ b/.github/ISSUE_TEMPLATE/5_feature_request.md
@@ -19,13 +19,13 @@ labels: 'request'
 
 <!--
 Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
-- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
+- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.09. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
 - Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
 - Finally, put x into all relevant boxes (like this [x])
 -->
 
 - [ ] I'm reporting a feature request
-- [ ] I've verified that I'm running youtube-dl version **2020.12.07**
+- [ ] I've verified that I'm running youtube-dl version **2020.12.09**
 - [ ] I've searched the bugtracker for similar feature requests including closed ones
 
 
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 59e73ba59..b551aa1eb 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -424,7 +424,8 @@
  - **la7.it**
  - **laola1tv**
  - **laola1tv:embed**
- - **lbry.tv**
+ - **lbry**
+ - **lbry:channel**
  - **LCI**
  - **Lcp**
  - **LcpPlay**
@@ -900,6 +901,7 @@
  - **TeleQuebecEmission**
  - **TeleQuebecLive**
  - **TeleQuebecSquat**
+ - **TeleQuebecVideo**
  - **TeleTask**
  - **Telewebion**
  - **TennisTV**
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 39a685197..7b968df59 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.12.07'
+__version__ = '2020.12.09'

From df5e50954b0286eb88e3b0bc5199c8271e3ab9ec Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 9 Dec 2020 10:39:14 +0100
Subject: [PATCH 336/340] [beampro] Remove Extractor

closes #17290
closes #22871
closes #23020
closes #23061
closes #26099
---
 youtube_dl/extractor/beampro.py    | 194 -----------------------------
 youtube_dl/extractor/extractors.py |   4 -
 2 files changed, 198 deletions(-)
 delete mode 100644 youtube_dl/extractor/beampro.py

diff --git a/youtube_dl/extractor/beampro.py b/youtube_dl/extractor/beampro.py
deleted file mode 100644
index 86abdae00..000000000
--- a/youtube_dl/extractor/beampro.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    clean_html,
-    compat_str,
-    float_or_none,
-    int_or_none,
-    parse_iso8601,
-    try_get,
-    urljoin,
-)
-
-
-class BeamProBaseIE(InfoExtractor):
-    _API_BASE = 'https://mixer.com/api/v1'
-    _RATINGS = {'family': 0, 'teen': 13, '18+': 18}
-
-    def _extract_channel_info(self, chan):
-        user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id'])
-        return {
-            'uploader': chan.get('token') or try_get(
-                chan, lambda x: x['user']['username'], compat_str),
-            'uploader_id': compat_str(user_id) if user_id else None,
-            'age_limit': self._RATINGS.get(chan.get('audience')),
-        }
-
-
-class BeamProLiveIE(BeamProBaseIE):
-    IE_NAME = 'Mixer:live'
-    _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/(?P<id>[^/?#&]+)'
-    _TEST = {
-        'url': 'http://mixer.com/niterhayven',
-        'info_dict': {
-            'id': '261562',
-            'ext': 'mp4',
-            'title': 'Introducing The Witcher 3 //  The Grind Starts Now!',
-            'description': 'md5:0b161ac080f15fe05d18a07adb44a74d',
-            'thumbnail': r're:https://.*\.jpg$',
-            'timestamp': 1483477281,
-            'upload_date': '20170103',
-            'uploader': 'niterhayven',
-            'uploader_id': '373396',
-            'age_limit': 18,
-            'is_live': True,
-            'view_count': int,
-        },
-        'skip': 'niterhayven is offline',
-        'params': {
-            'skip_download': True,
-        },
-    }
-
-    _MANIFEST_URL_TEMPLATE = '%s/channels/%%s/manifest.%%s' % BeamProBaseIE._API_BASE
-
-    @classmethod
-    def suitable(cls, url):
-        return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url)
-
-    def _real_extract(self, url):
-        channel_name = self._match_id(url)
-
-        chan = self._download_json(
-            '%s/channels/%s' % (self._API_BASE, channel_name), channel_name)
-
-        if chan.get('online') is False:
-            raise ExtractorError(
-                '{0} is offline'.format(channel_name), expected=True)
-
-        channel_id = chan['id']
-
-        def manifest_url(kind):
-            return self._MANIFEST_URL_TEMPLATE % (channel_id, kind)
-
-        formats = self._extract_m3u8_formats(
-            manifest_url('m3u8'), channel_name, ext='mp4', m3u8_id='hls',
-            fatal=False)
-        formats.extend(self._extract_smil_formats(
-            manifest_url('smil'), channel_name, fatal=False))
-        self._sort_formats(formats)
-
-        info = {
-            'id': compat_str(chan.get('id') or channel_name),
-            'title': self._live_title(chan.get('name') or channel_name),
-            'description': clean_html(chan.get('description')),
-            'thumbnail': try_get(
-                chan, lambda x: x['thumbnail']['url'], compat_str),
-            'timestamp': parse_iso8601(chan.get('updatedAt')),
-            'is_live': True,
-            'view_count': int_or_none(chan.get('viewersTotal')),
-            'formats': formats,
-        }
-        info.update(self._extract_channel_info(chan))
-
-        return info
-
-
-class BeamProVodIE(BeamProBaseIE):
-    IE_NAME = 'Mixer:vod'
-    _VALID_URL = r'https?://(?:\w+\.)?(?:beam\.pro|mixer\.com)/[^/?#&]+\?.*?\bvod=(?P<id>[^?#&]+)'
-    _TESTS = [{
-        'url': 'https://mixer.com/willow8714?vod=2259830',
-        'md5': 'b2431e6e8347dc92ebafb565d368b76b',
-        'info_dict': {
-            'id': '2259830',
-            'ext': 'mp4',
-            'title': 'willow8714\'s Channel',
-            'duration': 6828.15,
-            'thumbnail': r're:https://.*source\.png$',
-            'timestamp': 1494046474,
-            'upload_date': '20170506',
-            'uploader': 'willow8714',
-            'uploader_id': '6085379',
-            'age_limit': 13,
-            'view_count': int,
-        },
-        'params': {
-            'skip_download': True,
-        },
-    }, {
-        'url': 'https://mixer.com/streamer?vod=IxFno1rqC0S_XJ1a2yGgNw',
-        'only_matching': True,
-    }, {
-        'url': 'https://mixer.com/streamer?vod=Rh3LY0VAqkGpEQUe2pN-ig',
-        'only_matching': True,
-    }]
-
-    @staticmethod
-    def _extract_format(vod, vod_type):
-        if not vod.get('baseUrl'):
-            return []
-
-        if vod_type == 'hls':
-            filename, protocol = 'manifest.m3u8', 'm3u8_native'
-        elif vod_type == 'raw':
-            filename, protocol = 'source.mp4', 'https'
-        else:
-            assert False
-
-        data = vod.get('data') if isinstance(vod.get('data'), dict) else {}
-
-        format_id = [vod_type]
-        if isinstance(data.get('Height'), compat_str):
-            format_id.append('%sp' % data['Height'])
-
-        return [{
-            'url': urljoin(vod['baseUrl'], filename),
-            'format_id': '-'.join(format_id),
-            'ext': 'mp4',
-            'protocol': protocol,
-            'width': int_or_none(data.get('Width')),
-            'height': int_or_none(data.get('Height')),
-            'fps': int_or_none(data.get('Fps')),
-            'tbr': int_or_none(data.get('Bitrate'), 1000),
-        }]
-
-    def _real_extract(self, url):
-        vod_id = self._match_id(url)
-
-        vod_info = self._download_json(
-            '%s/recordings/%s' % (self._API_BASE, vod_id), vod_id)
-
-        state = vod_info.get('state')
-        if state != 'AVAILABLE':
-            raise ExtractorError(
-                'VOD %s is not available (state: %s)' % (vod_id, state),
-                expected=True)
-
-        formats = []
-        thumbnail_url = None
-
-        for vod in vod_info['vods']:
-            vod_type = vod.get('format')
-            if vod_type in ('hls', 'raw'):
-                formats.extend(self._extract_format(vod, vod_type))
-            elif vod_type == 'thumbnail':
-                thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png')
-
-        self._sort_formats(formats)
-
-        info = {
-            'id': vod_id,
-            'title': vod_info.get('name') or vod_id,
-            'duration': float_or_none(vod_info.get('duration')),
-            'thumbnail': thumbnail_url,
-            'timestamp': parse_iso8601(vod_info.get('createdAt')),
-            'view_count': int_or_none(vod_info.get('viewsTotal')),
-            'formats': formats,
-        }
-        info.update(self._extract_channel_info(vod_info.get('channel') or {}))
-
-        return info
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index a94502eb3..3e6581add 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -91,10 +91,6 @@ from .bbc import (
     BBCCoUkPlaylistIE,
     BBCIE,
 )
-from .beampro import (
-    BeamProLiveIE,
-    BeamProVodIE,
-)
 from .beeg import BeegIE
 from .behindkink import BehindKinkIE
 from .bellmedia import BellMediaIE

From 842654b6d03085661dfc5a6a57004123a1a9d35f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=B0=AD=E4=B9=9D=E9=BC=8E?= <109224573@qq.com>
Date: Wed, 9 Dec 2020 22:50:42 +0800
Subject: [PATCH 337/340] [README.md] Update travis CI tld (#27354)

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index cd8856828..fb1aee7dc 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-[![Build Status](https://travis-ci.org/ytdl-org/youtube-dl.svg?branch=master)](https://travis-ci.org/ytdl-org/youtube-dl)
+[![Build Status](https://travis-ci.com/ytdl-org/youtube-dl.svg?branch=master)](https://travis-ci.com/ytdl-org/youtube-dl)
 
 youtube-dl - download videos from youtube.com or other video platforms
 

From 772cefef8c9bf467089e65a6890660ca9bfd83b8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com>
Date: Thu, 10 Dec 2020 00:12:59 +0700
Subject: [PATCH 338/340] [ChangeLog] Actualize [ci skip]

---
 ChangeLog | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ChangeLog b/ChangeLog
index 339d8bd50..8a57965e0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,4 @@
-version <unreleaed>
+version 2020.12.09
 
 Core
 * [extractor/common] Fix inline HTML5 media tags processing (#27345)

From b69bb1ed119d07b83cf88ca95b87773209f14d3b Mon Sep 17 00:00:00 2001
From: Andrey Smirnoff <37037851+mashed-potatoes@users.noreply.github.com>
Date: Thu, 10 Dec 2020 00:01:23 +0500
Subject: [PATCH 339/340] [smotri] Remove extractor (#27358)

---
 youtube_dl/extractor/extractors.py |   6 -
 youtube_dl/extractor/generic.py    |   6 -
 youtube_dl/extractor/smotri.py     | 416 -----------------------------
 youtube_dl/options.py              |   2 +-
 4 files changed, 1 insertion(+), 429 deletions(-)
 delete mode 100644 youtube_dl/extractor/smotri.py

diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 3e6581add..533c074b3 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1041,12 +1041,6 @@ from .sky import (
 from .slideshare import SlideshareIE
 from .slideslive import SlidesLiveIE
 from .slutload import SlutloadIE
-from .smotri import (
-    SmotriIE,
-    SmotriCommunityIE,
-    SmotriUserIE,
-    SmotriBroadcastIE,
-)
 from .snotr import SnotrIE
 from .sohu import SohuIE
 from .sonyliv import SonyLIVIE
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 9eba9614f..80ecaf795 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -52,7 +52,6 @@ from .ooyala import OoyalaIE
 from .rutv import RUTVIE
 from .tvc import TVCIE
 from .sportbox import SportBoxIE
-from .smotri import SmotriIE
 from .myvi import MyviIE
 from .condenast import CondeNastIE
 from .udn import UDNEmbedIE
@@ -2815,11 +2814,6 @@ class GenericIE(InfoExtractor):
         if mobj is not None:
             return self.url_result(mobj.group('url'))
 
-        # Look for embedded smotri.com player
-        smotri_url = SmotriIE._extract_url(webpage)
-        if smotri_url:
-            return self.url_result(smotri_url, 'Smotri')
-
         # Look for embedded Myvi.ru player
         myvi_url = MyviIE._extract_url(webpage)
         if myvi_url:
diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py
deleted file mode 100644
index 45995f30f..000000000
--- a/youtube_dl/extractor/smotri.py
+++ /dev/null
@@ -1,416 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-import json
-import hashlib
-import uuid
-
-from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    int_or_none,
-    sanitized_Request,
-    unified_strdate,
-    urlencode_postdata,
-    xpath_text,
-)
-
-
-class SmotriIE(InfoExtractor):
-    IE_DESC = 'Smotri.com'
-    IE_NAME = 'smotri'
-    _VALID_URL = r'https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
-    _NETRC_MACHINE = 'smotri'
-
-    _TESTS = [
-        # real video id 2610366
-        {
-            'url': 'http://smotri.com/video/view/?id=v261036632ab',
-            'md5': '02c0dfab2102984e9c5bb585cc7cc321',
-            'info_dict': {
-                'id': 'v261036632ab',
-                'ext': 'mp4',
-                'title': 'катастрофа с камер видеонаблюдения',
-                'uploader': 'rbc2008',
-                'uploader_id': 'rbc08',
-                'upload_date': '20131118',
-                'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
-            },
-        },
-        # real video id 57591
-        {
-            'url': 'http://smotri.com/video/view/?id=v57591cb20',
-            'md5': '830266dfc21f077eac5afd1883091bcd',
-            'info_dict': {
-                'id': 'v57591cb20',
-                'ext': 'flv',
-                'title': 'test',
-                'uploader': 'Support Photofile@photofile',
-                'uploader_id': 'support-photofile',
-                'upload_date': '20070704',
-                'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
-            },
-        },
-        # video-password, not approved by moderator
-        {
-            'url': 'http://smotri.com/video/view/?id=v1390466a13c',
-            'md5': 'f6331cef33cad65a0815ee482a54440b',
-            'info_dict': {
-                'id': 'v1390466a13c',
-                'ext': 'mp4',
-                'title': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1',
-                'uploader': 'timoxa40',
-                'uploader_id': 'timoxa40',
-                'upload_date': '20100404',
-                'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
-            },
-            'params': {
-                'videopassword': 'qwerty',
-            },
-            'skip': 'Video is not approved by moderator',
-        },
-        # video-password
-        {
-            'url': 'http://smotri.com/video/view/?id=v6984858774#',
-            'md5': 'f11e01d13ac676370fc3b95b9bda11b0',
-            'info_dict': {
-                'id': 'v6984858774',
-                'ext': 'mp4',
-                'title': 'Дача Солженицина ПАРОЛЬ 223322',
-                'uploader': 'psavari1',
-                'uploader_id': 'psavari1',
-                'upload_date': '20081103',
-                'thumbnail': r're:^https?://.*\.jpg$',
-            },
-            'params': {
-                'videopassword': '223322',
-            },
-        },
-        # age limit + video-password, not approved by moderator
-        {
-            'url': 'http://smotri.com/video/view/?id=v15408898bcf',
-            'md5': '91e909c9f0521adf5ee86fbe073aad70',
-            'info_dict': {
-                'id': 'v15408898bcf',
-                'ext': 'flv',
-                'title': 'этот ролик не покажут по ТВ',
-                'uploader': 'zzxxx',
-                'uploader_id': 'ueggb',
-                'upload_date': '20101001',
-                'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
-                'age_limit': 18,
-            },
-            'params': {
-                'videopassword': '333'
-            },
-            'skip': 'Video is not approved by moderator',
-        },
-        # age limit + video-password
-        {
-            'url': 'http://smotri.com/video/view/?id=v7780025814',
-            'md5': 'b4599b068422559374a59300c5337d72',
-            'info_dict': {
-                'id': 'v7780025814',
-                'ext': 'mp4',
-                'title': 'Sexy Beach (пароль 123)',
-                'uploader': 'вАся',
-                'uploader_id': 'asya_prosto',
-                'upload_date': '20081218',
-                'thumbnail': r're:^https?://.*\.jpg$',
-                'age_limit': 18,
-            },
-            'params': {
-                'videopassword': '123'
-            },
-        },
-        # swf player
-        {
-            'url': 'http://pics.smotri.com/scrubber_custom8.swf?file=v9188090500',
-            'md5': '31099eeb4bc906712c5f40092045108d',
-            'info_dict': {
-                'id': 'v9188090500',
-                'ext': 'mp4',
-                'title': 'Shakira - Don\'t Bother',
-                'uploader': 'HannahL',
-                'uploader_id': 'lisaha95',
-                'upload_date': '20090331',
-                'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
-            },
-        },
-    ]
-
-    @classmethod
-    def _extract_url(cls, webpage):
-        mobj = re.search(
-            r'<embed[^>]src=(["\'])(?P<url>http://pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=v.+?\1)',
-            webpage)
-        if mobj is not None:
-            return mobj.group('url')
-
-        mobj = re.search(
-            r'''(?x)<div\s+class="video_file">http://smotri\.com/video/download/file/[^<]+</div>\s*
-                    <div\s+class="video_image">[^<]+</div>\s*
-                    <div\s+class="video_id">(?P<id>[^<]+)</div>''', webpage)
-        if mobj is not None:
-            return 'http://smotri.com/video/view/?id=%s' % mobj.group('id')
-
-    def _search_meta(self, name, html, display_name=None):
-        if display_name is None:
-            display_name = name
-        return self._html_search_meta(name, html, display_name)
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-
-        video_form = {
-            'ticket': video_id,
-            'video_url': '1',
-            'frame_url': '1',
-            'devid': 'LoadupFlashPlayer',
-            'getvideoinfo': '1',
-        }
-
-        video_password = self._downloader.params.get('videopassword')
-        if video_password:
-            video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
-
-        video = self._download_json(
-            'http://smotri.com/video/view/url/bot/',
-            video_id, 'Downloading video JSON',
-            data=urlencode_postdata(video_form),
-            headers={'Content-Type': 'application/x-www-form-urlencoded'})
-
-        video_url = video.get('_vidURL') or video.get('_vidURL_mp4')
-
-        if not video_url:
-            if video.get('_moderate_no'):
-                raise ExtractorError(
-                    'Video %s has not been approved by moderator' % video_id, expected=True)
-
-            if video.get('error'):
-                raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-
-            if video.get('_pass_protected') == 1:
-                msg = ('Invalid video password' if video_password
-                       else 'This video is protected by a password, use the --video-password option')
-                raise ExtractorError(msg, expected=True)
-
-        title = video['title']
-        thumbnail = video.get('_imgURL')
-        upload_date = unified_strdate(video.get('added'))
-        uploader = video.get('userNick')
-        uploader_id = video.get('userLogin')
-        duration = int_or_none(video.get('duration'))
-
-        # Video JSON does not provide enough meta data
-        # We will extract some from the video web page instead
-        webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id
-        webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page')
-
-        # Warning if video is unavailable
-        warning = self._html_search_regex(
-            r'<div[^>]+class="videoUnModer"[^>]*>(.+?)</div>', webpage,
-            'warning message', default=None)
-        if warning is not None:
-            self._downloader.report_warning(
-                'Video %s may not be available; smotri said: %s ' %
-                (video_id, warning))
-
-        # Adult content
-        if 'EroConfirmText">' in webpage:
-            self.report_age_confirmation()
-            confirm_string = self._html_search_regex(
-                r'<a[^>]+href="/video/view/\?id=%s&confirm=([^"]+)"' % video_id,
-                webpage, 'confirm string')
-            confirm_url = webpage_url + '&confirm=%s' % confirm_string
-            webpage = self._download_webpage(
-                confirm_url, video_id,
-                'Downloading video page (age confirmed)')
-            adult_content = True
-        else:
-            adult_content = False
-
-        view_count = self._html_search_regex(
-            r'(?s)Общее количество просмотров.*?<span class="Number">(\d+)</span>',
-            webpage, 'view count', fatal=False)
-
-        return {
-            'id': video_id,
-            'url': video_url,
-            'title': title,
-            'thumbnail': thumbnail,
-            'uploader': uploader,
-            'upload_date': upload_date,
-            'uploader_id': uploader_id,
-            'duration': duration,
-            'view_count': int_or_none(view_count),
-            'age_limit': 18 if adult_content else 0,
-        }
-
-
-class SmotriCommunityIE(InfoExtractor):
-    IE_DESC = 'Smotri.com community videos'
-    IE_NAME = 'smotri:community'
-    _VALID_URL = r'https?://(?:www\.)?smotri\.com/community/video/(?P<id>[0-9A-Za-z_\'-]+)'
-    _TEST = {
-        'url': 'http://smotri.com/community/video/kommuna',
-        'info_dict': {
-            'id': 'kommuna',
-        },
-        'playlist_mincount': 4,
-    }
-
-    def _real_extract(self, url):
-        community_id = self._match_id(url)
-
-        rss = self._download_xml(
-            'http://smotri.com/export/rss/video/by/community/-/%s/video.xml' % community_id,
-            community_id, 'Downloading community RSS')
-
-        entries = [
-            self.url_result(video_url.text, SmotriIE.ie_key())
-            for video_url in rss.findall('./channel/item/link')]
-
-        return self.playlist_result(entries, community_id)
-
-
-class SmotriUserIE(InfoExtractor):
-    IE_DESC = 'Smotri.com user videos'
-    IE_NAME = 'smotri:user'
-    _VALID_URL = r'https?://(?:www\.)?smotri\.com/user/(?P<id>[0-9A-Za-z_\'-]+)'
-    _TESTS = [{
-        'url': 'http://smotri.com/user/inspector',
-        'info_dict': {
-            'id': 'inspector',
-            'title': 'Inspector',
-        },
-        'playlist_mincount': 9,
-    }]
-
-    def _real_extract(self, url):
-        user_id = self._match_id(url)
-
-        rss = self._download_xml(
-            'http://smotri.com/export/rss/user/video/-/%s/video.xml' % user_id,
-            user_id, 'Downloading user RSS')
-
-        entries = [self.url_result(video_url.text, 'Smotri')
-                   for video_url in rss.findall('./channel/item/link')]
-
-        description_text = xpath_text(rss, './channel/description') or ''
-        user_nickname = self._search_regex(
-            '^Видео режиссера (.+)$', description_text,
-            'user nickname', fatal=False)
-
-        return self.playlist_result(entries, user_id, user_nickname)
-
-
-class SmotriBroadcastIE(InfoExtractor):
-    IE_DESC = 'Smotri.com broadcasts'
-    IE_NAME = 'smotri:broadcast'
-    _VALID_URL = r'https?://(?:www\.)?(?P<url>smotri\.com/live/(?P<id>[^/]+))/?.*'
-    _NETRC_MACHINE = 'smotri'
-
-    def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        broadcast_id = mobj.group('id')
-
-        broadcast_url = 'http://' + mobj.group('url')
-        broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page')
-
-        if re.search('>Режиссер с логином <br/>"%s"<br/> <span>не существует<' % broadcast_id, broadcast_page) is not None:
-            raise ExtractorError(
-                'Broadcast %s does not exist' % broadcast_id, expected=True)
-
-        # Adult content
-        if re.search('EroConfirmText">', broadcast_page) is not None:
-
-            (username, password) = self._get_login_info()
-            if username is None:
-                self.raise_login_required(
-                    'Erotic broadcasts allowed only for registered users')
-
-            login_form = {
-                'login-hint53': '1',
-                'confirm_erotic': '1',
-                'login': username,
-                'password': password,
-            }
-
-            request = sanitized_Request(
-                broadcast_url + '/?no_redirect=1', urlencode_postdata(login_form))
-            request.add_header('Content-Type', 'application/x-www-form-urlencoded')
-            broadcast_page = self._download_webpage(
-                request, broadcast_id, 'Logging in and confirming age')
-
-            if '>Неверный логин или пароль<' in broadcast_page:
-                raise ExtractorError(
-                    'Unable to log in: bad username or password', expected=True)
-
-            adult_content = True
-        else:
-            adult_content = False
-
-        ticket = self._html_search_regex(
-            (r'data-user-file=(["\'])(?P<ticket>(?!\1).+)\1',
-             r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'(?P<ticket>[^']+)'\)"),
-            broadcast_page, 'broadcast ticket', group='ticket')
-
-        broadcast_url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket
-
-        broadcast_password = self._downloader.params.get('videopassword')
-        if broadcast_password:
-            broadcast_url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest()
-
-        broadcast_json_page = self._download_webpage(
-            broadcast_url, broadcast_id, 'Downloading broadcast JSON')
-
-        try:
-            broadcast_json = json.loads(broadcast_json_page)
-
-            protected_broadcast = broadcast_json['_pass_protected'] == 1
-            if protected_broadcast and not broadcast_password:
-                raise ExtractorError(
-                    'This broadcast is protected by a password, use the --video-password option',
-                    expected=True)
-
-            broadcast_offline = broadcast_json['is_play'] == 0
-            if broadcast_offline:
-                raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True)
-
-            rtmp_url = broadcast_json['_server']
-            mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url)
-            if not mobj:
-                raise ExtractorError('Unexpected broadcast rtmp URL')
-
-            broadcast_playpath = broadcast_json['_streamName']
-            broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL'])
-            broadcast_thumbnail = broadcast_json.get('_imgURL')
-            broadcast_title = self._live_title(broadcast_json['title'])
-            broadcast_description = broadcast_json.get('description')
-            broadcaster_nick = broadcast_json.get('nick')
-            broadcaster_login = broadcast_json.get('login')
-            rtmp_conn = 'S:%s' % uuid.uuid4().hex
-        except KeyError:
-            if protected_broadcast:
-                raise ExtractorError('Bad broadcast password', expected=True)
-            raise ExtractorError('Unexpected broadcast JSON')
-
-        return {
-            'id': broadcast_id,
-            'url': rtmp_url,
-            'title': broadcast_title,
-            'thumbnail': broadcast_thumbnail,
-            'description': broadcast_description,
-            'uploader': broadcaster_nick,
-            'uploader_id': broadcaster_login,
-            'age_limit': 18 if adult_content else 0,
-            'ext': 'flv',
-            'play_path': broadcast_playpath,
-            'player_url': 'http://pics.smotri.com/broadcast_play.swf',
-            'app': broadcast_app,
-            'rtmp_live': True,
-            'rtmp_conn': rtmp_conn,
-            'is_live': True,
-        }
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 6d5ac62b3..3000ba41e 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -369,7 +369,7 @@ def parseOpts(overrideArguments=None):
     authentication.add_option(
         '--video-password',
         dest='videopassword', metavar='PASSWORD',
-        help='Video password (vimeo, smotri, youku)')
+        help='Video password (vimeo, youku)')
 
     adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
     adobe_pass.add_option(

From aee1f871681af7d6cbd26e33f9fe3e63f742167f Mon Sep 17 00:00:00 2001
From: Remita Amine <remitamine@gmail.com>
Date: Wed, 9 Dec 2020 23:52:25 +0100
Subject: [PATCH 340/340] [facebook] remove hardcoded chrome user-agent

closes #18974
closes #25411
closes #26958
closes #27329
---
 youtube_dl/extractor/facebook.py | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index 610d66745..7459fde34 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -54,8 +54,6 @@ class FacebookIE(InfoExtractor):
     _NETRC_MACHINE = 'facebook'
     IE_NAME = 'facebook'
 
-    _CHROME_USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36'
-
     _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
     _VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
 
@@ -306,9 +304,7 @@ class FacebookIE(InfoExtractor):
         self._login()
 
     def _extract_from_url(self, url, video_id, fatal_if_no_video=True):
-        req = sanitized_Request(url)
-        req.add_header('User-Agent', self._CHROME_USER_AGENT)
-        webpage = self._download_webpage(req, video_id)
+        webpage = self._download_webpage(url, video_id)
 
         video_data = None