[YoutubeDL] Eliminate implicit global states

Note: it is previously used for the support of multiple proxies. Now
opener_name should be specified for each download request.
This commit is contained in:
Yen Chi Hsuan 2015-02-28 11:09:46 +08:00
parent 1b92dc06cf
commit 63367c442b
3 changed files with 19 additions and 25 deletions

View File

@ -278,6 +278,7 @@ class YoutubeDL(object):
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr self._err_file = sys.stderr
self._openers_pool = {} self._openers_pool = {}
self.DEFAULT_OPENER_NAME = 'default'
self.params = params self.params = params
self.cache = Cache(self) self.cache = Cache(self)
@ -1642,7 +1643,7 @@ class YoutubeDL(object):
[[lang, ', '.join(f['ext'] for f in reversed(formats))] [[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()])) for lang, formats in subtitles.items()]))
def urlopen(self, req): def urlopen(self, req, opener_name=None):
""" Start an HTTP download """ """ Start an HTTP download """
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
@ -1664,7 +1665,12 @@ class YoutubeDL(object):
url_escaped, data=req.data, headers=req.headers, url_escaped, data=req.data, headers=req.headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
return self._opener.open(req, timeout=self._socket_timeout) if not opener_name:
opener_name = self.DEFAULT_OPENER_NAME
if opener_name not in self._openers_pool:
raise Exception('Invalid opener name "%s"' % compat_str(opener_name))
return self._openers_pool[opener_name].open(req, timeout=self._socket_timeout)
def print_debug_header(self): def print_debug_header(self):
if not self.params.get('verbose'): if not self.params.get('verbose'):
@ -1762,8 +1768,6 @@ class YoutubeDL(object):
self._setup_single_opener('default', default_proxy, https_handler, ydlh) self._setup_single_opener('default', default_proxy, https_handler, ydlh)
self._setup_single_opener('alternative', alternative_proxy, https_handler, ydlh) self._setup_single_opener('alternative', alternative_proxy, https_handler, ydlh)
self.use_opener('default')
def _setup_single_opener(self, opener_name, opts_proxy, https_handler, ydlh): def _setup_single_opener(self, opener_name, opts_proxy, https_handler, ydlh):
cookie_processor = compat_urllib_request.HTTPCookieProcessor( cookie_processor = compat_urllib_request.HTTPCookieProcessor(
self.cookiejar) self.cookiejar)
@ -1787,12 +1791,6 @@ class YoutubeDL(object):
opener.addheaders = [] opener.addheaders = []
self._openers_pool[opener_name] = opener self._openers_pool[opener_name] = opener
def use_opener(self, opener_name):
if opener_name in self._openers_pool:
self._opener = self._openers_pool[opener_name]
else:
raise Exception('Invalid opener name ' + opener_name)
def encode(self, s): def encode(self, s):
if isinstance(s, bytes): if isinstance(s, bytes):
return s # Already encoded return s # Already encoded

View File

@ -301,7 +301,7 @@ class InfoExtractor(object):
def IE_NAME(self): def IE_NAME(self):
return type(self).__name__[:-2] return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, opener_name=None):
""" Returns the response handle """ """ Returns the response handle """
if note is None: if note is None:
self.report_download_webpage(video_id) self.report_download_webpage(video_id)
@ -311,7 +311,7 @@ class InfoExtractor(object):
else: else:
self.to_screen('%s: %s' % (video_id, note)) self.to_screen('%s: %s' % (video_id, note))
try: try:
return self._downloader.urlopen(url_or_request) return self._downloader.urlopen(url_or_request, opener_name=opener_name)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False: if errnote is False:
return False return False
@ -324,13 +324,13 @@ class InfoExtractor(object):
self._downloader.report_warning(errmsg) self._downloader.report_warning(errmsg)
return False return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True): def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, opener_name=None):
""" Returns a tuple (page content as string, URL handle) """ """ Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038) # Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)): if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0] url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal) urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, opener_name=opener_name)
if urlh is False: if urlh is False:
assert not fatal assert not fatal
return False return False
@ -410,13 +410,13 @@ class InfoExtractor(object):
return content return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5): def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, opener_name=None):
""" Returns the data of the page as a string """ """ Returns the data of the page as a string """
success = False success = False
try_count = 0 try_count = 0
while success is False: while success is False:
try: try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal) res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, opener_name=opener_name)
success = True success = True
except compat_http_client.IncompleteRead as e: except compat_http_client.IncompleteRead as e:
try_count += 1 try_count += 1
@ -431,10 +431,10 @@ class InfoExtractor(object):
def _download_xml(self, url_or_request, video_id, def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML', note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True): transform_source=None, fatal=True, opener_name=None):
"""Return the xml as an xml.etree.ElementTree.Element""" """Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage( xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal) url_or_request, video_id, note, errnote, fatal=fatal, opener_name=opener_name)
if xml_string is False: if xml_string is False:
return xml_string return xml_string
if transform_source: if transform_source:
@ -445,9 +445,9 @@ class InfoExtractor(object):
note='Downloading JSON metadata', note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', errnote='Unable to download JSON metadata',
transform_source=None, transform_source=None,
fatal=True): fatal=True, opener_name=None):
json_string = self._download_webpage( json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal) url_or_request, video_id, note, errnote, fatal=fatal, opener_name=opener_name)
if (not fatal) and json_string is False: if (not fatal) and json_string is False:
return None return None
return self._parse_json( return self._parse_json(

View File

@ -92,13 +92,9 @@ class LetvIE(InfoExtractor):
'domain': 'www.letv.com' 'domain': 'www.letv.com'
} }
self._use_opener('alternative')
play_json = self._download_json( play_json = self._download_json(
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params), 'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params),
media_id, 'playJson data') media_id, 'playJson data', opener_name='alternative')
self._use_opener('default')
# Check for errors # Check for errors
playstatus = play_json['playstatus'] playstatus = play_json['playstatus']