1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-22 02:01:50 +00:00

PEP8: more applied

This commit is contained in:
Jouke Waleson 2014-11-23 21:20:46 +01:00
parent 5f6a1245ff
commit 8bcc875676
34 changed files with 123 additions and 132 deletions

View file

@ -1,8 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import hashlib import hashlib
import shutil
import subprocess
import tempfile
import urllib.request import urllib.request
import json import json

View file

@ -32,7 +32,7 @@ def rsa_verify(message, signature, key):
signature = signature[2:] signature = signature[2:]
if not b('\x00') in signature: if not b('\x00') in signature:
return False return False
signature = signature[signature.index(b('\x00'))+1:] signature = signature[signature.index(b('\x00')) +1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
return False return False
signature = signature[19:] signature = signature[19:]

View file

@ -45,7 +45,6 @@ from youtube_dl.utils import (
escape_rfc3986, escape_rfc3986,
escape_url, escape_url,
js_to_json, js_to_json,
get_filesystem_encoding,
intlist_to_bytes, intlist_to_bytes,
args_to_str, args_to_str,
) )

View file

@ -12,10 +12,6 @@ from test.helper import FakeYDL
from youtube_dl.extractor import ( from youtube_dl.extractor import (
YoutubePlaylistIE, YoutubePlaylistIE,
YoutubeIE, YoutubeIE,
YoutubeChannelIE,
YoutubeShowIE,
YoutubeTopListIE,
YoutubeSearchURLIE,
) )

View file

@ -29,7 +29,6 @@ from .compat import (
compat_str, compat_str,
compat_urllib_error, compat_urllib_error,
compat_urllib_request, compat_urllib_request,
shlex_quote,
) )
from .utils import ( from .utils import (
escape_url, escape_url,

View file

@ -76,10 +76,10 @@ def _real_main(argv=None):
if opts.headers is not None: if opts.headers is not None:
for h in opts.headers: for h in opts.headers:
if h.find(':', 1) < 0: if h.find(':', 1) < 0:
parser.error('wrong header formatting, it should be key:value, not "%s"'%h) parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 2) key, value = h.split(':', 2)
if opts.verbose: if opts.verbose:
write_string('[debug] Adding header from command line option %s:%s\n'%(key, value)) write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value std_headers[key] = value
# Dump user agent # Dump user agent

View file

@ -24,8 +24,8 @@ def aes_ctr_decrypt(data, key, counter):
decrypted_data = [] decrypted_data = []
for i in range(block_count): for i in range(block_count):
counter_block = counter.next_value() counter_block = counter.next_value()
block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES] block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block)) block += [0] *(BLOCK_SIZE_BYTES - len(block))
cipher_counter_block = aes_encrypt(counter_block, expanded_key) cipher_counter_block = aes_encrypt(counter_block, expanded_key)
decrypted_data += xor(block, cipher_counter_block) decrypted_data += xor(block, cipher_counter_block)
@ -49,8 +49,8 @@ def aes_cbc_decrypt(data, key, iv):
decrypted_data = [] decrypted_data = []
previous_cipher_block = iv previous_cipher_block = iv
for i in range(block_count): for i in range(block_count):
block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES] block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block)) block += [0] *(BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key) decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block) decrypted_data += xor(decrypted_block, previous_cipher_block)
@ -76,20 +76,20 @@ def key_expansion(data):
temp = data[-4:] temp = data[-4:]
temp = key_schedule_core(temp, rcon_iteration) temp = key_schedule_core(temp, rcon_iteration)
rcon_iteration += 1 rcon_iteration += 1
data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes])
for _ in range(3): for _ in range(3):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes])
if key_size_bytes == 32: if key_size_bytes == 32:
temp = data[-4:] temp = data[-4:]
temp = sub_bytes(temp) temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes])
data = data[:expanded_key_size_bytes] data = data[:expanded_key_size_bytes]
return data return data
@ -106,12 +106,12 @@ def aes_encrypt(data, expanded_key):
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
for i in range(1, rounds+1): for i in range(1, rounds +1):
data = sub_bytes(data) data = sub_bytes(data)
data = shift_rows(data) data = shift_rows(data)
if i != rounds: if i != rounds:
data = mix_columns(data) data = mix_columns(data)
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES])
return data return data
@ -127,7 +127,7 @@ def aes_decrypt(data, expanded_key):
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
for i in range(rounds, 0, -1): for i in range(rounds, 0, -1):
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES])
if i != rounds: if i != rounds:
data = mix_columns_inv(data) data = mix_columns_inv(data)
data = shift_rows_inv(data) data = shift_rows_inv(data)
@ -155,14 +155,14 @@ def aes_decrypt_text(data, password, key_size_bytes):
data = bytes_to_intlist(base64.b64decode(data)) data = bytes_to_intlist(base64.b64decode(data))
password = bytes_to_intlist(password.encode('utf-8')) password = bytes_to_intlist(password.encode('utf-8'))
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) key = password[:key_size_bytes] + [0] *(key_size_bytes - len(password))
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
nonce = data[:NONCE_LENGTH_BYTES] nonce = data[:NONCE_LENGTH_BYTES]
cipher = data[NONCE_LENGTH_BYTES:] cipher = data[NONCE_LENGTH_BYTES:]
class Counter: class Counter:
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) __value = nonce + [0] *(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
def next_value(self): def next_value(self):
temp = self.__value temp = self.__value
@ -293,7 +293,7 @@ def mix_column(data, matrix):
def mix_columns(data, matrix=MIX_COLUMN_MATRIX): def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = [] data_mixed = []
for i in range(4): for i in range(4):
column = data[i*4: (i+1)*4] column = data[i *4: (i +1) *4]
data_mixed += mix_column(column, matrix) data_mixed += mix_column(column, matrix)
return data_mixed return data_mixed
@ -320,7 +320,7 @@ def shift_rows_inv(data):
def inc(data): def inc(data):
data = data[:] # copy data = data[:] # copy
for i in range(len(data)-1, -1, -1): for i in range(len(data) -1, -1, -1):
if data[i] == 255: if data[i] == 255:
data[i] = 0 data[i] = 0
else: else:

View file

@ -55,7 +55,7 @@ class FlvReader(io.BytesIO):
if size == 1: if size == 1:
real_size = self.read_unsigned_long_long() real_size = self.read_unsigned_long_long()
header_end = 16 header_end = 16
return real_size, box_type, self.read(real_size-header_end) return real_size, box_type, self.read(real_size -header_end)
def read_asrt(self): def read_asrt(self):
# version # version
@ -180,7 +180,7 @@ def build_fragments_list(boot_info):
n_frags = segment_run_entry[1] n_frags = segment_run_entry[1]
fragment_run_entry_table = boot_info['fragments'][0]['fragments'] fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first'] first_frag_number = fragment_run_entry_table[0]['first']
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): for (i, frag_number) in zip(range(1, n_frags +1), itertools.count(first_frag_number)):
res.append((1, frag_number)) res.append((1, frag_number))
return res return res

View file

@ -46,13 +46,13 @@ class RtmpFD(FileDownloader):
continue continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj: if mobj:
downloaded_data_len = int(float(mobj.group(1))*1024) downloaded_data_len = int(float(mobj.group(1)) *1024)
percent = float(mobj.group(2)) percent = float(mobj.group(2))
if not resume_percent: if not resume_percent:
resume_percent = percent resume_percent = percent
resume_downloaded_data_len = downloaded_data_len resume_downloaded_data_len = downloaded_data_len
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) eta = self.calc_eta(start, time.time(), 100 -resume_percent, percent -resume_percent)
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) speed = self.calc_speed(start, time.time(), downloaded_data_len -resume_downloaded_data_len)
data_len = None data_len = None
if percent > 0: if percent > 0:
data_len = int(downloaded_data_len * 100 / percent) data_len = int(downloaded_data_len * 100 / percent)
@ -72,7 +72,7 @@ class RtmpFD(FileDownloader):
# no percent for live streams # no percent for live streams
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
if mobj: if mobj:
downloaded_data_len = int(float(mobj.group(1))*1024) downloaded_data_len = int(float(mobj.group(1)) *1024)
time_now = time.time() time_now = time.time()
speed = self.calc_speed(start, time_now, downloaded_data_len) speed = self.calc_speed(start, time_now, downloaded_data_len)
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
@ -88,7 +88,7 @@ class RtmpFD(FileDownloader):
if not cursor_in_new_line: if not cursor_in_new_line:
self.to_screen('') self.to_screen('')
cursor_in_new_line = True cursor_in_new_line = True
self.to_screen('[rtmpdump] '+line) self.to_screen('[rtmpdump] ' +line)
proc.wait() proc.wait()
if not cursor_in_new_line: if not cursor_in_new_line:
self.to_screen('') self.to_screen('')

View file

@ -529,4 +529,4 @@ def gen_extractors():
def get_info_extractor(ie_name): def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name""" """Returns the info extractor class with the given ie_name"""
return globals()[ie_name+'IE'] return globals()[ie_name + 'IE']

View file

@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor):
if videolist_url: if videolist_url:
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
formats = [] formats = []
baseurl = vidurl[:vidurl.rfind('/')+1] baseurl = vidurl[:vidurl.rfind('/') +1]
for video in videolist.findall('.//video'): for video in videolist.findall('.//video'):
src = video.get('src') src = video.get('src')
if not src: if not src:

View file

@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
formats = [] formats = []
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt] stream_quality, stream_format = self._FORMAT_IDS[fmt]
video_format = fmt+'p' video_format = fmt +'p'
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
# urlencode doesn't work! # urlencode doesn't work!
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' +stream_quality +'&media%5Fid=' +stream_id +'&video%5Fformat=' +stream_format
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
streamdata = self._download_xml( streamdata = self._download_xml(
@ -248,8 +248,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
subtitles = {} subtitles = {}
sub_format = self._downloader.params.get('subtitlesformat', 'srt') sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' +sub_id,\
video_id, note='Downloading subtitles for '+sub_name) video_id, note='Downloading subtitles for ' +sub_name)
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)

View file

@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
info = self._download_json(info_url, video_id) info = self._download_json(info_url, video_id)
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds date = time.gmtime(info['dateCreated'] /1000) # The timestamp is in miliseconds
return { return {
'id': video_id, 'id': video_id,

View file

@ -748,7 +748,7 @@ class GenericIE(InfoExtractor):
# Look for embedded blip.tv player # Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
if mobj: if mobj:
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV') return self.url_result('http://blip.tv/a/a-' +mobj.group(1), 'BlipTV')
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage) mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
if mobj: if mobj:
return self.url_result(mobj.group(1), 'BlipTV') return self.url_result(mobj.group(1), 'BlipTV')

View file

@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor):
player_url = ( player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random()*1073741824), floor(random()*1073741824)) (floor(random() *1073741824), floor(random() *1073741824))
) )
req = compat_urllib_request.Request(player_url) req = compat_urllib_request.Request(player_url)

View file

@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
if len(videos) == 1: if len(videos) == 1:
return make_entry(video_id, videos[0]) return make_entry(video_id, videos[0])
else: else:
return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)] return [make_entry(video_id, media, video_number +1) for video_number, media in enumerate(videos)]

View file

@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor):
'uploader': 'ljfriel2', 'uploader': 'ljfriel2',
'title': 'Most unlucky car accident' 'title': 'Most unlucky car accident'
} }
}, }, {
{
'url': 'http://www.liveleak.com/view?i=f93_1390833151', 'url': 'http://www.liveleak.com/view?i=f93_1390833151',
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf', 'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
'info_dict': { 'info_dict': {
@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor):
'uploader': 'ARD_Stinkt', 'uploader': 'ARD_Stinkt',
'title': 'German Television does first Edward Snowden Interview (ENGLISH)', 'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
} }
}, }, {
{
'url': 'http://www.liveleak.com/view?i=4f7_1392687779', 'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
'md5': '42c6d97d54f1db107958760788c5f48f', 'md5': '42c6d97d54f1db107958760788c5f48f',
'info_dict': { 'info_dict': {

View file

@ -245,7 +245,7 @@ class MTVIE(MTVServicesInfoExtractor):
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";', m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
webpage, re.DOTALL) webpage, re.DOTALL)
if m_vevo: if m_vevo:
vevo_id = m_vevo.group(1); vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id) self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo') return self.url_result('vevo:%s' % vevo_id, ie='Vevo')

View file

@ -69,7 +69,7 @@ class SohuIE(InfoExtractor):
(allot, prot, clipsURL[i], su[i])) (allot, prot, clipsURL[i], su[i]))
part_str = self._download_webpage( part_str = self._download_webpage(
part_url, video_id, part_url, video_id,
note=u'Downloading part %d of %d' % (i+1, part_count)) note=u'Downloading part %d of %d' % (i +1, part_count))
part_info = part_str.split('|') part_info = part_str.split('|')
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])

View file

@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
if media_type == 'Video': if media_type == 'Video':
fmt.update({ fmt.update({
'format_note': ['144p', '288p', '544p', '720p'][quality-1], 'format_note': ['144p', '288p', '544p', '720p'][quality -1],
'vcodec': codec, 'vcodec': codec,
}) })
elif media_type == 'Audio': elif media_type == 'Audio':

View file

@ -118,5 +118,5 @@ class ThePlatformIE(InfoExtractor):
'formats': formats, 'formats': formats,
'description': info['description'], 'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'], 'thumbnail': info['defaultThumbnailUrl'],
'duration': info['duration']//1000, 'duration': info['duration'] //1000,
} }

View file

@ -37,7 +37,7 @@ class TudouIE(InfoExtractor):
}] }]
def _url_for_id(self, id, quality = None): def _url_for_id(self, id, quality = None):
info_url = "http://v2.tudou.com/f?id="+str(id) info_url = "http://v2.tudou.com/f?id=" +str(id)
if quality: if quality:
info_url += '&hd' + quality info_url += '&hd' + quality
webpage = self._download_webpage(info_url, id, "Opening the info webpage") webpage = self._download_webpage(info_url, id, "Opening the info webpage")

View file

@ -97,7 +97,7 @@ class XTubeUserIE(InfoExtractor):
url, username, note='Retrieving profile page') url, username, note='Retrieving profile page')
video_count = int(self._search_regex( video_count = int(self._search_regex(
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>'%username, profile_page, r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' %username, profile_page,
'video count')) 'video count'))
PAGE_SIZE = 25 PAGE_SIZE = 25

View file

@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):
for pagenum in itertools.count(0): for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query, info = self._download_json(result_url, query,
note='Downloading results page '+str(pagenum+1)) note='Downloading results page ' +str(pagenum +1))
m = info['m'] m = info['m']
results = info['results'] results = info['results']

View file

@ -181,8 +181,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'next_url': '/', 'next_url': '/',
'action_confirm': 'Confirm', 'action_confirm': 'Confirm',
} }
req = compat_urllib_request.Request(self._AGE_URL, req = compat_urllib_request.Request(
compat_urllib_parse.urlencode(age_form).encode('ascii')) self._AGE_URL,
compat_urllib_parse.urlencode(age_form).encode('ascii')
)
self._download_webpage( self._download_webpage(
req, None, req, None,
@ -492,7 +494,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def gen_sig_code(idxs): def gen_sig_code(idxs):
def _genslice(start, end, step): def _genslice(start, end, step):
starts = '' if start == 0 else str(start) starts = '' if start == 0 else str(start)
ends = (':%d' % (end+step)) if end + step >= 0 else ':' ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step) steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps) return 's[%s%s%s]' % (starts, ends, steps)

View file

@ -246,7 +246,7 @@ class FFmpegVideoConvertor(FFmpegPostProcessor):
if information['ext'] == self._preferedformat: if information['ext'] == self._preferedformat:
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return True, information return True, information
self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self._downloader.to_screen(u'[' +'ffmpeg' +'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, []) self.run_ffmpeg(path, outpath, [])
information['filepath'] = outpath information['filepath'] = outpath
information['format'] = self._preferedformat information['format'] = self._preferedformat
@ -466,7 +466,7 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy']
for (i, lang) in enumerate(sub_langs): for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text']) opts.extend(['-map', '%d:0' % (i +1), '-c:s:%d' % i, 'mov_text'])
lang_code = self._conver_lang_code(lang) lang_code = self._conver_lang_code(lang)
if lang_code is not None: if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])

View file

@ -41,7 +41,7 @@ def rsa_verify(message, signature, key):
signature = signature[2:] signature = signature[2:]
if not b('\x00') in signature: if not b('\x00') in signature:
return False return False
signature = signature[signature.index(b('\x00'))+1:] signature = signature[signature.index(b('\x00')) +1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
return False return False
signature = signature[19:] signature = signature[19:]