mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2024-11-22 10:11:52 +00:00
Remove no longer needed compat_str around geturl
This commit is contained in:
parent
fca6dba8b8
commit
7947a1f7db
12 changed files with 16 additions and 23 deletions
|
@ -2341,7 +2341,7 @@ class InfoExtractor(object):
|
||||||
return []
|
return []
|
||||||
ism_doc, urlh = res
|
ism_doc, urlh = res
|
||||||
|
|
||||||
return self._parse_ism_formats(ism_doc, compat_str(urlh.geturl()), ism_id)
|
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
|
||||||
|
|
||||||
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encode_base_n,
|
encode_base_n,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -55,7 +54,7 @@ class EpornerIE(InfoExtractor):
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(url, display_id)
|
webpage, urlh = self._download_webpage_handle(url, display_id)
|
||||||
|
|
||||||
video_id = self._match_id(compat_str(urlh.geturl()))
|
video_id = self._match_id(urlh.geturl())
|
||||||
|
|
||||||
hash = self._search_regex(
|
hash = self._search_regex(
|
||||||
r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash')
|
r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash')
|
||||||
|
|
|
@ -2287,7 +2287,7 @@ class GenericIE(InfoExtractor):
|
||||||
|
|
||||||
if head_response is not False:
|
if head_response is not False:
|
||||||
# Check for redirect
|
# Check for redirect
|
||||||
new_url = compat_str(head_response.geturl())
|
new_url = head_response.geturl()
|
||||||
if url != new_url:
|
if url != new_url:
|
||||||
self.report_following_redirect(new_url)
|
self.report_following_redirect(new_url)
|
||||||
if force_videoid:
|
if force_videoid:
|
||||||
|
@ -2387,12 +2387,12 @@ class GenericIE(InfoExtractor):
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
self._parse_xspf(
|
self._parse_xspf(
|
||||||
doc, video_id, xspf_url=url,
|
doc, video_id, xspf_url=url,
|
||||||
xspf_base_url=compat_str(full_response.geturl())),
|
xspf_base_url=full_response.geturl()),
|
||||||
video_id)
|
video_id)
|
||||||
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
|
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
|
||||||
info_dict['formats'] = self._parse_mpd_formats(
|
info_dict['formats'] = self._parse_mpd_formats(
|
||||||
doc,
|
doc,
|
||||||
mpd_base_url=compat_str(full_response.geturl()).rpartition('/')[0],
|
mpd_base_url=full_response.geturl().rpartition('/')[0],
|
||||||
mpd_url=url)
|
mpd_url=url)
|
||||||
self._sort_formats(info_dict['formats'])
|
self._sort_formats(info_dict['formats'])
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|
|
@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -36,7 +35,7 @@ class LecturioBaseIE(InfoExtractor):
|
||||||
self._LOGIN_URL, None, 'Downloading login popup')
|
self._LOGIN_URL, None, 'Downloading login popup')
|
||||||
|
|
||||||
def is_logged(url_handle):
|
def is_logged(url_handle):
|
||||||
return self._LOGIN_URL not in compat_str(url_handle.geturl())
|
return self._LOGIN_URL not in url_handle.geturl()
|
||||||
|
|
||||||
# Already logged in
|
# Already logged in
|
||||||
if is_logged(urlh):
|
if is_logged(urlh):
|
||||||
|
|
|
@ -8,7 +8,6 @@ from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_str,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -99,7 +98,7 @@ class LinuxAcademyIE(InfoExtractor):
|
||||||
'sso': 'true',
|
'sso': 'true',
|
||||||
})
|
})
|
||||||
|
|
||||||
login_state_url = compat_str(urlh.geturl())
|
login_state_url = urlh.geturl()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
|
@ -129,7 +128,7 @@ class LinuxAcademyIE(InfoExtractor):
|
||||||
})
|
})
|
||||||
|
|
||||||
access_token = self._search_regex(
|
access_token = self._search_regex(
|
||||||
r'access_token=([^=&]+)', compat_str(urlh.geturl()),
|
r'access_token=([^=&]+)', urlh.geturl(),
|
||||||
'access token')
|
'access token')
|
||||||
|
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
|
|
|
@ -6,7 +6,6 @@ import re
|
||||||
from .theplatform import ThePlatformBaseIE
|
from .theplatform import ThePlatformBaseIE
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -114,7 +113,7 @@ class MediasetIE(ThePlatformBaseIE):
|
||||||
continue
|
continue
|
||||||
urlh = ie._request_webpage(
|
urlh = ie._request_webpage(
|
||||||
embed_url, video_id, note='Following embed URL redirect')
|
embed_url, video_id, note='Following embed URL redirect')
|
||||||
embed_url = compat_str(urlh.geturl())
|
embed_url = urlh.geturl()
|
||||||
program_guid = _program_guid(_qs(embed_url))
|
program_guid = _program_guid(_qs(embed_url))
|
||||||
if program_guid:
|
if program_guid:
|
||||||
entries.append(embed_url)
|
entries.append(embed_url)
|
||||||
|
|
|
@ -129,7 +129,7 @@ class MediasiteIE(InfoExtractor):
|
||||||
query = mobj.group('query')
|
query = mobj.group('query')
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer?
|
webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer?
|
||||||
redirect_url = compat_str(urlh.geturl())
|
redirect_url = urlh.geturl()
|
||||||
|
|
||||||
# XXX: might have also extracted UrlReferrer and QueryString from the html
|
# XXX: might have also extracted UrlReferrer and QueryString from the html
|
||||||
service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex(
|
service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex(
|
||||||
|
|
|
@ -46,7 +46,7 @@ class PlatziBaseIE(InfoExtractor):
|
||||||
headers={'Referer': self._LOGIN_URL})
|
headers={'Referer': self._LOGIN_URL})
|
||||||
|
|
||||||
# login succeeded
|
# login succeeded
|
||||||
if 'platzi.com/login' not in compat_str(urlh.geturl()):
|
if 'platzi.com/login' not in urlh.geturl():
|
||||||
return
|
return
|
||||||
|
|
||||||
login_error = self._webpage_read_content(
|
login_error = self._webpage_read_content(
|
||||||
|
|
|
@ -8,7 +8,6 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -39,13 +38,13 @@ class SafariBaseIE(InfoExtractor):
|
||||||
'Downloading login page')
|
'Downloading login page')
|
||||||
|
|
||||||
def is_logged(urlh):
|
def is_logged(urlh):
|
||||||
return 'learning.oreilly.com/home/' in compat_str(urlh.geturl())
|
return 'learning.oreilly.com/home/' in urlh.geturl()
|
||||||
|
|
||||||
if is_logged(urlh):
|
if is_logged(urlh):
|
||||||
self.LOGGED_IN = True
|
self.LOGGED_IN = True
|
||||||
return
|
return
|
||||||
|
|
||||||
redirect_url = compat_str(urlh.geturl())
|
redirect_url = urlh.geturl()
|
||||||
parsed_url = compat_urlparse.urlparse(redirect_url)
|
parsed_url = compat_urlparse.urlparse(redirect_url)
|
||||||
qs = compat_parse_qs(parsed_url.query)
|
qs = compat_parse_qs(parsed_url.query)
|
||||||
next_uri = compat_urlparse.urljoin(
|
next_uri = compat_urlparse.urljoin(
|
||||||
|
|
|
@ -4,7 +4,6 @@ import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .wistia import WistiaIE
|
from .wistia import WistiaIE
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -58,7 +57,7 @@ class TeachableBaseIE(InfoExtractor):
|
||||||
self._logged_in = True
|
self._logged_in = True
|
||||||
return
|
return
|
||||||
|
|
||||||
login_url = compat_str(urlh.geturl())
|
login_url = urlh.geturl()
|
||||||
|
|
||||||
login_form = self._hidden_inputs(login_page)
|
login_form = self._hidden_inputs(login_page)
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@ from __future__ import unicode_literals
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -151,7 +150,7 @@ class TumblrIE(InfoExtractor):
|
||||||
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
|
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
|
||||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||||
|
|
||||||
redirect_url = compat_str(urlh.geturl())
|
redirect_url = urlh.geturl()
|
||||||
if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'):
|
if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'):
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'This Tumblr may contain sensitive media. '
|
'This Tumblr may contain sensitive media. '
|
||||||
|
|
|
@ -591,7 +591,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
||||||
# Retrieve video webpage to extract further information
|
# Retrieve video webpage to extract further information
|
||||||
webpage, urlh = self._download_webpage_handle(
|
webpage, urlh = self._download_webpage_handle(
|
||||||
url, video_id, headers=headers)
|
url, video_id, headers=headers)
|
||||||
redirect_url = compat_str(urlh.geturl())
|
redirect_url = urlh.geturl()
|
||||||
except ExtractorError as ee:
|
except ExtractorError as ee:
|
||||||
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
|
||||||
errmsg = ee.cause.read()
|
errmsg = ee.cause.read()
|
||||||
|
|
Loading…
Reference in a new issue