1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-12-25 10:17:41 +00:00

Comply with coding conventions a bit more

This commit is contained in:
Bart Broere 2024-03-01 15:05:30 +01:00
parent 21eb4513e0
commit 0dc7d954cb

View file

@ -65,30 +65,29 @@ class NPOIE(InfoExtractor):
url = url[:-9] url = url[:-9]
elif url.endswith('/afspelen/'): elif url.endswith('/afspelen/'):
url = url[:-10] url = url[:-10]
if url.endswith('/'): url = url.rstrip('/')
url = url[:-1]
slug = url.split('/')[-1] slug = url.split('/')[-1]
page = self._download_webpage(url, slug, 'Finding productId using slug: %s' % slug) page = self._download_webpage(url, slug, 'Finding productId using slug: %s' % slug)
# TODO find out what proper HTML parsing utilities are available in youtube-dl # TODO find out what proper HTML parsing utilities are available in youtube-dl
next_data = page.split('<script id="__NEXT_DATA__" type="application/json">')[1].split('</script>')[0] next_data = page.split('<script id="__NEXT_DATA__" type="application/json">')[1].split('</script>')[0]
next_data = json.loads(next_data) next_data = json.loads(next_data)
product_id, description, thumbnail, title = None, None, None, None product_id, title, description, thumbnail = None, None, None, None
for query in next_data['props']['pageProps']['dehydratedState']['queries']: for query in next_data['props']['pageProps']['dehydratedState']['queries']:
if isinstance(query['state']['data'], list): if isinstance(query['state']['data'], list):
for entry in query['state']['data']: for entry in query['state']['data']:
print(entry) if entry['slug'] == slug:
try: product_id = entry.get('productId')
if entry['slug'] == slug: title = entry.get('title')
product_id = entry['productId'] synopsis = entry.get('synopsis', {})
title = entry['title'] description = (
synopsis = entry['synopsis'] synopsis.get('long')
description = synopsis.get('long', synopsis.get('short', synopsis.get('brief', ''))) or synopsis.get('short')
thumbnail = entry['images'][0]['url'] or synopsis.get('brief')
break )
except KeyError: thumbnails = entry.get('images')
continue for thumbnail_entry in thumbnails:
except IndexError: if 'url' in thumbnail_entry:
continue thumbnail = thumbnail_entry.get('url')
if not product_id: if not product_id:
raise ExtractorError('No productId found for slug: %s' % slug) raise ExtractorError('No productId found for slug: %s' % slug)
@ -97,19 +96,18 @@ class NPOIE(InfoExtractor):
stream_link = self._download_json( stream_link = self._download_json(
'https://prod.npoplayer.nl/stream-link', video_id=slug, 'https://prod.npoplayer.nl/stream-link', video_id=slug,
data=json.dumps({ data=json.dumps({
"profileName": "dash", 'profileName': 'dash',
"drmType": "widevine", 'drmType': 'widevine',
"referrerUrl": url, 'referrerUrl': url,
}).encode('utf8'), }).encode('utf8'),
headers={ headers={
"Authorization": token, 'Authorization': token,
"Content-Type": "application/json", 'Content-Type': 'application/json',
} }
) )
stream_url = stream_link['stream']['streamURL']
# TODO other formats than dash / mpd # TODO other formats than dash / mpd
stream_url = stream_link.get('stream', {}).get('streamURL')
mpd = self._extract_mpd_formats(stream_url, slug, mpd_id='dash', fatal=False) mpd = self._extract_mpd_formats(stream_url, slug, mpd_id='dash', fatal=False)
return { return {