1
0
Fork 0
mirror of https://github.com/ytdl-org/youtube-dl.git synced 2024-11-24 19:22:06 +00:00

Fix all PEP8 issues except E501

This commit is contained in:
Philipp Hagemeister 2014-11-23 22:21:46 +01:00
parent 3d36cea4ac
commit b74e86f48a
17 changed files with 49 additions and 42 deletions

View file

@ -22,7 +22,7 @@ def main():
continue continue
elif ie_desc is not None: elif ie_desc is not None:
ie_html += ': {}'.format(ie.IE_DESC) ie_html += ': {}'.format(ie.IE_DESC)
if ie.working() == False: if not ie.working():
ie_html += ' (Currently broken)' ie_html += ' (Currently broken)'
ie_htmls.append('<li>{}</li>'.format(ie_html)) ie_htmls.append('<li>{}</li>'.format(ie_html))

View file

@ -189,7 +189,7 @@ def _real_main(argv=None):
# --all-sub automatically sets --write-sub if --write-auto-sub is not given # --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given. # this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and (opts.writeautomaticsub == False): if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True opts.writesubtitles = True
if sys.version_info < (3,): if sys.version_info < (3,):

View file

@ -87,7 +87,7 @@ def key_expansion(data):
temp = sub_bytes(temp) temp = sub_bytes(temp)
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
temp = data[-4:] temp = data[-4:]
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
data = data[:expanded_key_size_bytes] data = data[:expanded_key_size_bytes]

View file

@ -73,10 +73,11 @@ class BambuserChannelIE(InfoExtractor):
urls = [] urls = []
last_id = '' last_id = ''
for i in itertools.count(1): for i in itertools.count(1):
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}' req_url = (
'&sort=created&access_mode=0%2C1%2C2&limit={count}' 'http://bambuser.com/xhr-api/index.php?username={user}'
'&method=broadcast&format=json&vid_older_than={last}' '&sort=created&access_mode=0%2C1%2C2&limit={count}'
).format(user=user, count=self._STEP, last=last_id) '&method=broadcast&format=json&vid_older_than={last}'
).format(user=user, count=self._STEP, last=last_id)
req = compat_urllib_request.Request(req_url) req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result # Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)

View file

@ -188,16 +188,17 @@ class Channel9IE(InfoExtractor):
view_count = self._extract_view_count(html) view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html) comment_count = self._extract_comment_count(html)
common = {'_type': 'video', common = {
'id': content_path, '_type': 'video',
'description': description, 'id': content_path,
'thumbnail': thumbnail, 'description': description,
'duration': duration, 'thumbnail': thumbnail,
'avg_rating': avg_rating, 'duration': duration,
'rating_count': rating_count, 'avg_rating': avg_rating,
'view_count': view_count, 'rating_count': rating_count,
'comment_count': comment_count, 'view_count': view_count,
} 'comment_count': comment_count,
}
result = [] result = []

View file

@ -248,8 +248,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
subtitles = {} subtitles = {}
sub_format = self._downloader.params.get('subtitlesformat', 'srt') sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\ sub_page = self._download_webpage(
video_id, note='Downloading subtitles for ' + sub_name) 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
video_id, note='Downloading subtitles for ' + sub_name)
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)

View file

@ -77,7 +77,7 @@ class FacebookIE(InfoExtractor):
'legacy_return': '1', 'legacy_return': '1',
'timezone': '-60', 'timezone': '-60',
'trynum': '1', 'trynum': '1',
} }
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try: try:

View file

@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources)) token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
headers = { headers = {
b'Content-Type': b'application/x-www-form-urlencoded', b'Content-Type': b'application/x-www-form-urlencoded',
b'Origin': b'http://www.4tube.com', b'Origin': b'http://www.4tube.com',
} }
token_req = compat_urllib_request.Request(token_url, b'{}', headers) token_req = compat_urllib_request.Request(token_url, b'{}', headers)
tokens = self._download_json(token_req, video_id) tokens = self._download_json(token_req, video_id)
@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
'format_id': format + 'p', 'format_id': format + 'p',
'resolution': format + 'p', 'resolution': format + 'p',
'quality': int(format), 'quality': int(format),
} for format in sources] } for format in sources]
self._sort_formats(formats) self._sort_formats(formats)

View file

@ -537,9 +537,9 @@ class GenericIE(InfoExtractor):
if default_search in ('error', 'fixup_error'): if default_search in ('error', 'fixup_error'):
raise ExtractorError( raise ExtractorError(
('%r is not a valid URL. ' '%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube' 'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
) % (url, url), expected=True) % (url, url), expected=True)
else: else:
if ':' not in default_search: if ':' not in default_search:
default_search += ':' default_search += ':'

View file

@ -63,8 +63,10 @@ class IGNIE(InfoExtractor):
'id': '078fdd005f6d3c02f63d795faa1b984f', 'id': '078fdd005f6d3c02f63d795faa1b984f',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Rewind Theater - Wild Trailer Gamescom 2014', 'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
'description': 'Giant skeletons, bloody hunts, and captivating' 'description': (
' natural beauty take our breath away.', 'Giant skeletons, bloody hunts, and captivating'
' natural beauty take our breath away.'
),
}, },
}, },
] ]

View file

@ -43,7 +43,7 @@ class IviIE(InfoExtractor):
'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg', 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
}, },
'skip': 'Only works from Russia', 'skip': 'Only works from Russia',
} }
] ]
# Sorted by quality # Sorted by quality

View file

@ -57,9 +57,9 @@ class MyVideoIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
GK = ( GK = (
b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt' b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3' b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
b'TnpsbA0KTVRkbU1tSTRNdz09' b'TnpsbA0KTVRkbU1tSTRNdz09'
) )
# Get video webpage # Get video webpage

View file

@ -371,7 +371,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
entries = [ entries = [
self._extract_info_dict(t, quiet=True, secret_token=token) self._extract_info_dict(t, quiet=True, secret_token=token)
for t in data['tracks']] for t in data['tracks']]
return { return {
'_type': 'playlist', '_type': 'playlist',

View file

@ -36,9 +36,10 @@ class TlcDeIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen', 'title': 'Breaking Amish: Die Welt da draußen',
'uploader': 'Discovery Networks - Germany', 'uploader': 'Discovery Networks - Germany',
'description': 'Vier Amische und eine Mennonitin wagen in New York' 'description': (
'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf' ' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.', ' ihrem spannenden Weg.'),
}, },
} }

View file

@ -36,7 +36,7 @@ class TudouIE(InfoExtractor):
'skip': 'Only works from China' 'skip': 'Only works from China'
}] }]
def _url_for_id(self, id, quality = None): def _url_for_id(self, id, quality=None):
info_url = "http://v2.tudou.com/f?id=" + str(id) info_url = "http://v2.tudou.com/f?id=" + str(id)
if quality: if quality:
info_url += '&hd' + quality info_url += '&hd' + quality

View file

@ -262,7 +262,8 @@ def parseOpts(overrideArguments=None):
video_format.add_option( video_format.add_option(
'-f', '--format', '-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None, action='store', dest='format', metavar='FORMAT', default=None,
help='video format code, specify the order of preference using' help=(
'video format code, specify the order of preference using'
' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also' ' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also'
' supported. You can also use the special names "best",' ' supported. You can also use the special names "best",'
' "bestvideo", "bestaudio", "worst", "worstvideo" and' ' "bestvideo", "bestaudio", "worst", "worstvideo" and'
@ -271,7 +272,7 @@ def parseOpts(overrideArguments=None):
' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.' ' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.'
' You can merge the video and audio of two formats into a single' ' You can merge the video and audio of two formats into a single'
' file using -f <video-format>+<audio-format> (requires ffmpeg or' ' file using -f <video-format>+<audio-format> (requires ffmpeg or'
' avconv), for example -f bestvideo+bestaudio.') ' avconv), for example -f bestvideo+bestaudio.'))
video_format.add_option( video_format.add_option(
'--all-formats', '--all-formats',
action='store_const', dest='format', const='all', action='store_const', dest='format', const='all',

View file

@ -240,9 +240,9 @@ def sanitize_open(filename, open_mode):
# In case of error, try to remove win32 forbidden chars # In case of error, try to remove win32 forbidden chars
alt_filename = os.path.join( alt_filename = os.path.join(
re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
for path_part in os.path.split(filename) for path_part in os.path.split(filename)
) )
if alt_filename == filename: if alt_filename == filename:
raise raise
else: else: