mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-11-25 11:41:52 +00:00
Switch codebase to use compat_b64decode
This commit is contained in:
parent
5d7d805ca9
commit
cf2820710d
21 changed files with 70 additions and 65 deletions
|
@ -1,8 +1,8 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
from math import ceil
|
from math import ceil
|
||||||
|
|
||||||
|
from .compat import compat_b64decode
|
||||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
@ -180,7 +180,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
"""
|
"""
|
||||||
NONCE_LENGTH_BYTES = 8
|
NONCE_LENGTH_BYTES = 8
|
||||||
|
|
||||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
data = bytes_to_intlist(compat_b64decode(data))
|
||||||
password = bytes_to_intlist(password.encode('utf-8'))
|
password = bytes_to_intlist(password.encode('utf-8'))
|
||||||
|
|
||||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
from __future__ import division, unicode_literals
|
from __future__ import division, unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
|
@ -312,7 +312,7 @@ class F4mFD(FragmentFD):
|
||||||
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||||
else:
|
else:
|
||||||
bootstrap_url = None
|
bootstrap_url = None
|
||||||
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
bootstrap = compat_b64decode(node.text)
|
||||||
boot_info = read_bootstrap_info(bootstrap)
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
return boot_info, bootstrap_url
|
return boot_info, bootstrap_url
|
||||||
|
|
||||||
|
@ -349,7 +349,7 @@ class F4mFD(FragmentFD):
|
||||||
live = boot_info['live']
|
live = boot_info['live']
|
||||||
metadata_node = media.find(_add_ns('metadata'))
|
metadata_node = media.find(_add_ns('metadata'))
|
||||||
if metadata_node is not None:
|
if metadata_node is not None:
|
||||||
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
|
metadata = compat_b64decode(metadata_node.text)
|
||||||
else:
|
else:
|
||||||
metadata = None
|
metadata = None
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt
|
from ..aes import aes_cbc_decrypt
|
||||||
from ..compat import compat_ord
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_ord,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -48,9 +50,9 @@ class ADNIE(InfoExtractor):
|
||||||
|
|
||||||
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
dec_subtitles = intlist_to_bytes(aes_cbc_decrypt(
|
||||||
bytes_to_intlist(base64.b64decode(enc_subtitles[24:])),
|
bytes_to_intlist(compat_b64decode(enc_subtitles[24:])),
|
||||||
bytes_to_intlist(b'\x1b\xe0\x29\x61\x38\x94\x24\x00\x12\xbd\xc5\x80\xac\xce\xbe\xb0'),
|
bytes_to_intlist(b'\x1b\xe0\x29\x61\x38\x94\x24\x00\x12\xbd\xc5\x80\xac\xce\xbe\xb0'),
|
||||||
bytes_to_intlist(base64.b64decode(enc_subtitles[:24]))
|
bytes_to_intlist(compat_b64decode(enc_subtitles[:24]))
|
||||||
))
|
))
|
||||||
subtitles_json = self._parse_json(
|
subtitles_json = self._parse_json(
|
||||||
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
|
dec_subtitles[:-compat_ord(dec_subtitles[-1])].decode(),
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BigflixIE(InfoExtractor):
|
class BigflixIE(InfoExtractor):
|
||||||
|
@ -39,8 +41,8 @@ class BigflixIE(InfoExtractor):
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
|
|
||||||
def decode_url(quoted_b64_url):
|
def decode_url(quoted_b64_url):
|
||||||
return base64.b64decode(compat_urllib_parse_unquote(
|
return compat_b64decode(compat_urllib_parse_unquote(
|
||||||
quoted_b64_url).encode('ascii')).decode('utf-8')
|
quoted_b64_url)).decode('utf-8')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for height, encoded_url in re.findall(
|
for height, encoded_url in re.findall(
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import base64
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .youtube import YoutubeIE
|
from .youtube import YoutubeIE
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError
|
ExtractorError
|
||||||
|
@ -58,7 +58,7 @@ class ChilloutzoneIE(InfoExtractor):
|
||||||
|
|
||||||
base64_video_info = self._html_search_regex(
|
base64_video_info = self._html_search_regex(
|
||||||
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
||||||
decoded_video_info = base64.b64decode(base64_video_info.encode('utf-8')).decode('utf-8')
|
decoded_video_info = compat_b64decode(base64_video_info).decode('utf-8')
|
||||||
video_info_dict = json.loads(decoded_video_info)
|
video_info_dict = json.loads(decoded_video_info)
|
||||||
|
|
||||||
# get video information from dict
|
# get video information from dict
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import parse_duration
|
from ..utils import parse_duration
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,8 +44,7 @@ class ChirbitIE(InfoExtractor):
|
||||||
|
|
||||||
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
|
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
|
||||||
# for soundURL)
|
# for soundURL)
|
||||||
audio_url = base64.b64decode(
|
audio_url = compat_b64decode(data_fd[::-1]).decode('utf-8')
|
||||||
data_fd[::-1].encode('ascii')).decode('utf-8')
|
|
||||||
|
|
||||||
title = self._search_regex(
|
title = self._search_regex(
|
||||||
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
|
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
|
||||||
|
|
|
@ -3,13 +3,13 @@ from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import base64
|
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from math import pow, sqrt, floor
|
from math import pow, sqrt, floor
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
@ -272,8 +272,8 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||||
}
|
}
|
||||||
|
|
||||||
def _decrypt_subtitles(self, data, iv, id):
|
def _decrypt_subtitles(self, data, iv, id):
|
||||||
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
|
data = bytes_to_intlist(compat_b64decode(data))
|
||||||
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
|
iv = bytes_to_intlist(compat_b64decode(iv))
|
||||||
id = int(id)
|
id = int(id)
|
||||||
|
|
||||||
def obfuscate_key_aux(count, modulo, start):
|
def obfuscate_key_aux(count, modulo, start):
|
||||||
|
|
|
@ -10,6 +10,7 @@ from ..aes import (
|
||||||
aes_cbc_decrypt,
|
aes_cbc_decrypt,
|
||||||
aes_cbc_encrypt,
|
aes_cbc_encrypt,
|
||||||
)
|
)
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
|
@ -93,7 +94,7 @@ class DaisukiMottoIE(InfoExtractor):
|
||||||
|
|
||||||
rtn = self._parse_json(
|
rtn = self._parse_json(
|
||||||
intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(
|
intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(
|
||||||
base64.b64decode(encrypted_rtn)),
|
compat_b64decode(encrypted_rtn)),
|
||||||
aes_key, iv)).decode('utf-8').rstrip('\0'),
|
aes_key, iv)).decode('utf-8').rstrip('\0'),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
qualities,
|
qualities,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
|
@ -42,7 +42,7 @@ class DumpertIE(InfoExtractor):
|
||||||
r'data-files="([^"]+)"', webpage, 'data files')
|
r'data-files="([^"]+)"', webpage, 'data files')
|
||||||
|
|
||||||
files = self._parse_json(
|
files = self._parse_json(
|
||||||
base64.b64decode(files_base64.encode('utf-8')).decode('utf-8'),
|
compat_b64decode(files_base64).decode('utf-8'),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
quality = qualities(['flv', 'mobile', 'tablet', '720p'])
|
quality = qualities(['flv', 'mobile', 'tablet', '720p'])
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urlparse,
|
compat_b64decode,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
|
@ -36,9 +36,9 @@ class EinthusanIE(InfoExtractor):
|
||||||
|
|
||||||
# reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js
|
# reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js
|
||||||
def _decrypt(self, encrypted_data, video_id):
|
def _decrypt(self, encrypted_data, video_id):
|
||||||
return self._parse_json(base64.b64decode((
|
return self._parse_json(compat_b64decode((
|
||||||
encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1]
|
encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1]
|
||||||
).encode('ascii')).decode('utf-8'), video_id)
|
)).decode('utf-8'), video_id)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
|
@ -48,7 +47,7 @@ class HotNewHipHopIE(InfoExtractor):
|
||||||
if 'mediaKey' not in mkd:
|
if 'mediaKey' not in mkd:
|
||||||
raise ExtractorError('Did not get a media key')
|
raise ExtractorError('Did not get a media key')
|
||||||
|
|
||||||
redirect_url = base64.b64decode(video_url_base64).decode('utf-8')
|
redirect_url = compat_b64decode(video_url_base64).decode('utf-8')
|
||||||
redirect_req = HEADRequest(redirect_url)
|
redirect_req = HEADRequest(redirect_url)
|
||||||
req = self._request_webpage(
|
req = self._request_webpage(
|
||||||
redirect_req, video_id,
|
redirect_req, video_id,
|
||||||
|
|
|
@ -2,9 +2,8 @@
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
@ -61,7 +60,7 @@ class InfoQIE(BokeCCBaseIE):
|
||||||
encoded_id = self._search_regex(
|
encoded_id = self._search_regex(
|
||||||
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
|
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
|
||||||
|
|
||||||
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
|
real_id = compat_urllib_parse_unquote(compat_b64decode(encoded_id).decode('utf-8'))
|
||||||
playpath = 'mp4:' + real_id
|
playpath = 'mp4:' + real_id
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import datetime
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
import re
|
import re
|
||||||
|
@ -9,6 +8,7 @@ import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
|
@ -329,7 +329,7 @@ class LetvCloudIE(InfoExtractor):
|
||||||
raise ExtractorError('Letv cloud returned an unknwon error')
|
raise ExtractorError('Letv cloud returned an unknwon error')
|
||||||
|
|
||||||
def b64decode(s):
|
def b64decode(s):
|
||||||
return base64.b64decode(s.encode('utf-8')).decode('utf-8')
|
return compat_b64decode(s).decode('utf-8')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for media in play_json['data']['video_info']['media'].values():
|
for media in play_json['data']['video_info']['media'].values():
|
||||||
|
|
|
@ -1,13 +1,12 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import (
|
||||||
from ..utils import (
|
compat_b64decode,
|
||||||
int_or_none,
|
compat_urllib_parse_unquote,
|
||||||
)
|
)
|
||||||
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
class MangomoloBaseIE(InfoExtractor):
|
class MangomoloBaseIE(InfoExtractor):
|
||||||
|
@ -51,4 +50,4 @@ class MangomoloLiveIE(MangomoloBaseIE):
|
||||||
_IS_LIVE = True
|
_IS_LIVE = True
|
||||||
|
|
||||||
def _get_real_id(self, page_id):
|
def _get_real_id(self, page_id):
|
||||||
return base64.b64decode(compat_urllib_parse_unquote(page_id).encode()).decode()
|
return compat_b64decode(compat_urllib_parse_unquote(page_id)).decode()
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import functools
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
import re
|
import re
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import base64
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_str,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -12,7 +16,6 @@ from ..utils import (
|
||||||
try_get,
|
try_get,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
)
|
)
|
||||||
from ..compat import compat_urllib_parse_urlencode
|
|
||||||
|
|
||||||
|
|
||||||
class OoyalaBaseIE(InfoExtractor):
|
class OoyalaBaseIE(InfoExtractor):
|
||||||
|
@ -44,7 +47,7 @@ class OoyalaBaseIE(InfoExtractor):
|
||||||
url_data = try_get(stream, lambda x: x['url']['data'], compat_str)
|
url_data = try_get(stream, lambda x: x['url']['data'], compat_str)
|
||||||
if not url_data:
|
if not url_data:
|
||||||
continue
|
continue
|
||||||
s_url = base64.b64decode(url_data.encode('ascii')).decode('utf-8')
|
s_url = compat_b64decode(url_data).decode('utf-8')
|
||||||
if not s_url or s_url in urls:
|
if not s_url or s_url in urls:
|
||||||
continue
|
continue
|
||||||
urls.append(s_url)
|
urls.append(s_url)
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt
|
from ..aes import aes_cbc_decrypt
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
|
@ -142,11 +142,11 @@ class RTL2YouIE(RTL2YouBaseIE):
|
||||||
stream_data = self._download_json(
|
stream_data = self._download_json(
|
||||||
self._BACKWERK_BASE_URL + 'stream/video/' + video_id, video_id)
|
self._BACKWERK_BASE_URL + 'stream/video/' + video_id, video_id)
|
||||||
|
|
||||||
data, iv = base64.b64decode(stream_data['streamUrl']).decode().split(':')
|
data, iv = compat_b64decode(stream_data['streamUrl']).decode().split(':')
|
||||||
stream_url = intlist_to_bytes(aes_cbc_decrypt(
|
stream_url = intlist_to_bytes(aes_cbc_decrypt(
|
||||||
bytes_to_intlist(base64.b64decode(data)),
|
bytes_to_intlist(compat_b64decode(data)),
|
||||||
bytes_to_intlist(self._AES_KEY),
|
bytes_to_intlist(self._AES_KEY),
|
||||||
bytes_to_intlist(base64.b64decode(iv))
|
bytes_to_intlist(compat_b64decode(iv))
|
||||||
))
|
))
|
||||||
if b'rtl2_you_video_not_found' in stream_url:
|
if b'rtl2_you_video_not_found' in stream_url:
|
||||||
raise ExtractorError('video not found', expected=True)
|
raise ExtractorError('video not found', expected=True)
|
||||||
|
|
|
@ -7,6 +7,7 @@ import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
compat_struct_unpack,
|
compat_struct_unpack,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -21,7 +22,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
def _decrypt_url(png):
|
def _decrypt_url(png):
|
||||||
encrypted_data = base64.b64decode(png.encode('utf-8'))
|
encrypted_data = compat_b64decode(png)
|
||||||
text_index = encrypted_data.find(b'tEXt')
|
text_index = encrypted_data.find(b'tEXt')
|
||||||
text_chunk = encrypted_data[text_index - 4:]
|
text_chunk = encrypted_data[text_index - 4:]
|
||||||
length = compat_struct_unpack('!I', text_chunk[:4])[0]
|
length = compat_struct_unpack('!I', text_chunk[:4])[0]
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -22,8 +21,8 @@ class SharedBaseIE(InfoExtractor):
|
||||||
|
|
||||||
video_url = self._extract_video_url(webpage, video_id, url)
|
video_url = self._extract_video_url(webpage, video_id, url)
|
||||||
|
|
||||||
title = base64.b64decode(self._html_search_meta(
|
title = compat_b64decode(self._html_search_meta(
|
||||||
'full:title', webpage, 'title').encode('utf-8')).decode('utf-8')
|
'full:title', webpage, 'title')).decode('utf-8')
|
||||||
filesize = int_or_none(self._html_search_meta(
|
filesize = int_or_none(self._html_search_meta(
|
||||||
'full:size', webpage, 'file size', fatal=False))
|
'full:size', webpage, 'file size', fatal=False))
|
||||||
|
|
||||||
|
@ -92,5 +91,4 @@ class VivoIE(SharedBaseIE):
|
||||||
r'InitializeStream\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
|
r'InitializeStream\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||||
webpage, 'stream', group='url'),
|
webpage, 'stream', group='url'),
|
||||||
video_id,
|
video_id,
|
||||||
transform_source=lambda x: base64.b64decode(
|
transform_source=lambda x: compat_b64decode(x).decode('utf-8'))[0]
|
||||||
x.encode('ascii')).decode('utf-8'))[0]
|
|
||||||
|
|
|
@ -1,18 +1,20 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
import binascii
|
import binascii
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_ord,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
qualities,
|
qualities,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
)
|
)
|
||||||
from ..compat import compat_ord
|
|
||||||
|
|
||||||
|
|
||||||
class TeamcocoIE(InfoExtractor):
|
class TeamcocoIE(InfoExtractor):
|
||||||
|
@ -97,7 +99,7 @@ class TeamcocoIE(InfoExtractor):
|
||||||
for i in range(len(cur_fragments)):
|
for i in range(len(cur_fragments)):
|
||||||
cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
|
cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
|
||||||
try:
|
try:
|
||||||
raw_data = base64.b64decode(cur_sequence)
|
raw_data = compat_b64decode(cur_sequence)
|
||||||
if compat_ord(raw_data[0]) == compat_ord('{'):
|
if compat_ord(raw_data[0]) == compat_ord('{'):
|
||||||
return json.loads(raw_data.decode('utf-8'))
|
return json.loads(raw_data.decode('utf-8'))
|
||||||
except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
|
except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_parse_qs
|
from ..compat import (
|
||||||
|
compat_b64decode,
|
||||||
|
compat_parse_qs,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TutvIE(InfoExtractor):
|
class TutvIE(InfoExtractor):
|
||||||
|
@ -26,7 +27,7 @@ class TutvIE(InfoExtractor):
|
||||||
|
|
||||||
data_content = self._download_webpage(
|
data_content = self._download_webpage(
|
||||||
'http://tu.tv/flvurl.php?codVideo=%s' % internal_id, video_id, 'Downloading video info')
|
'http://tu.tv/flvurl.php?codVideo=%s' % internal_id, video_id, 'Downloading video info')
|
||||||
video_url = base64.b64decode(compat_parse_qs(data_content)['kpt'][0].encode('utf-8')).decode('utf-8')
|
video_url = compat_b64decode(compat_parse_qs(data_content)['kpt'][0]).decode('utf-8')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': internal_id,
|
'id': internal_id,
|
||||||
|
|
Loading…
Reference in a new issue