| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | #!/usr/bin/env python | 
					
						
							|  |  |  | # -*- coding: utf-8 -*- | 
					
						
							| 
									
										
										
										
											2011-08-23 14:58:22 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | __author__  = ( | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 	'Ricardo Garcia Gonzalez', | 
					
						
							|  |  |  | 	'Danny Colligan', | 
					
						
							|  |  |  | 	'Benjamin Johnson', | 
					
						
							|  |  |  | 	'Vasyl\' Vavrychuk', | 
					
						
							|  |  |  | 	'Witold Baryluk', | 
					
						
							|  |  |  | 	'Paweł Paprota', | 
					
						
							|  |  |  | 	'Gergely Imreh', | 
					
						
							| 
									
										
										
										
											2011-08-28 23:17:18 +02:00
										 |  |  | 	'Rogério Brito', | 
					
						
							| 
									
										
										
										
											2011-08-23 14:58:22 +03:00
										 |  |  | 	) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | __license__ = 'Public Domain' | 
					
						
							| 
									
										
										
										
											2011-08-28 23:17:18 +02:00
										 |  |  | __version__ = '2011.08.28-phihag' | 
					
						
							| 
									
										
										
										
											2011-08-23 14:58:22 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | import cookielib | 
					
						
							| 
									
										
										
										
											2010-11-30 18:51:00 +02:00
										 |  |  | import datetime | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | import gzip | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | import htmlentitydefs | 
					
						
							|  |  |  | import httplib | 
					
						
							| 
									
										
										
										
											2008-09-13 13:23:24 +02:00
										 |  |  | import locale | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | import math | 
					
						
							|  |  |  | import netrc | 
					
						
							|  |  |  | import os | 
					
						
							|  |  |  | import os.path | 
					
						
							|  |  |  | import re | 
					
						
							|  |  |  | import socket | 
					
						
							|  |  |  | import string | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | import subprocess | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | import sys | 
					
						
							|  |  |  | import time | 
					
						
							|  |  |  | import urllib | 
					
						
							|  |  |  | import urllib2 | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | import warnings | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | import zlib | 
					
						
							| 
									
										
										
										
											2010-01-06 10:49:38 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-06 11:47:53 +02:00
										 |  |  | if os.name == 'nt': | 
					
						
							|  |  |  | 	import ctypes | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | try: | 
					
						
							|  |  |  | 	import email.utils | 
					
						
							|  |  |  | except ImportError: # Python 2.4 | 
					
						
							|  |  |  | 	import email.Utils | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | try: | 
					
						
							|  |  |  | 	import cStringIO as StringIO | 
					
						
							|  |  |  | except ImportError: | 
					
						
							|  |  |  | 	import StringIO | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-06 10:49:38 +01:00
										 |  |  | # parse_qs was moved from the cgi module to the urlparse module recently. | 
					
						
							|  |  |  | try: | 
					
						
							|  |  |  | 	from urlparse import parse_qs | 
					
						
							|  |  |  | except ImportError: | 
					
						
							|  |  |  | 	from cgi import parse_qs | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | try: | 
					
						
							|  |  |  | 	import lxml.etree | 
					
						
							| 
									
										
										
										
											2011-08-24 23:04:10 +02:00
										 |  |  | except ImportError: | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | 	pass # Handled below | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-02 20:23:13 +02:00
										 |  |  | std_headers = { | 
					
						
							| 
									
										
										
										
											2011-08-04 19:14:19 +02:00
										 |  |  | 	'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1', | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', | 
					
						
							| 
									
										
										
										
											2010-07-13 19:43:06 +02:00
										 |  |  | 	'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', | 
					
						
							| 
									
										
										
										
											2010-12-16 07:09:58 +01:00
										 |  |  | 	'Accept-Encoding': 'gzip, deflate', | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	'Accept-Language': 'en-us,en;q=0.5', | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | simple_title_chars = string.ascii_letters.decode('ascii') + string.digits.decode('ascii') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-10 17:31:54 +02:00
										 |  |  | try: | 
					
						
							|  |  |  | 	import json | 
					
						
							| 
									
										
										
										
											2011-07-18 19:43:21 +02:00
										 |  |  | except ImportError: # Python <2.6, use trivialjson (https://github.com/phihag/trivialjson): | 
					
						
							| 
									
										
										
										
											2011-07-10 17:31:54 +02:00
										 |  |  | 	import re | 
					
						
							|  |  |  | 	class json(object): | 
					
						
							|  |  |  | 		@staticmethod | 
					
						
							|  |  |  | 		def loads(s): | 
					
						
							|  |  |  | 			s = s.decode('UTF-8') | 
					
						
							|  |  |  | 			def raiseError(msg, i): | 
					
						
							|  |  |  | 				raise ValueError(msg + ' at position ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:])) | 
					
						
							|  |  |  | 			def skipSpace(i, expectMore=True): | 
					
						
							|  |  |  | 				while i < len(s) and s[i] in ' \t\r\n': | 
					
						
							|  |  |  | 					i += 1 | 
					
						
							|  |  |  | 				if expectMore: | 
					
						
							|  |  |  | 					if i >= len(s): | 
					
						
							|  |  |  | 						raiseError('Premature end', i) | 
					
						
							|  |  |  | 				return i | 
					
						
							|  |  |  | 			def decodeEscape(match): | 
					
						
							|  |  |  | 				esc = match.group(1) | 
					
						
							|  |  |  | 				_STATIC = { | 
					
						
							|  |  |  | 					'"': '"', | 
					
						
							|  |  |  | 					'\\': '\\', | 
					
						
							|  |  |  | 					'/': '/', | 
					
						
							|  |  |  | 					'b': unichr(0x8), | 
					
						
							|  |  |  | 					'f': unichr(0xc), | 
					
						
							|  |  |  | 					'n': '\n', | 
					
						
							|  |  |  | 					'r': '\r', | 
					
						
							|  |  |  | 					't': '\t', | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				if esc in _STATIC: | 
					
						
							|  |  |  | 					return _STATIC[esc] | 
					
						
							|  |  |  | 				if esc[0] == 'u': | 
					
						
							|  |  |  | 					if len(esc) == 1+4: | 
					
						
							|  |  |  | 						return unichr(int(esc[1:5], 16)) | 
					
						
							|  |  |  | 					if len(esc) == 5+6 and esc[5:7] == '\\u': | 
					
						
							|  |  |  | 						hi = int(esc[1:5], 16) | 
					
						
							|  |  |  | 						low = int(esc[7:11], 16) | 
					
						
							|  |  |  | 						return unichr((hi - 0xd800) * 0x400 + low - 0xdc00 + 0x10000) | 
					
						
							|  |  |  | 				raise ValueError('Unknown escape ' + str(esc)) | 
					
						
							|  |  |  | 			def parseString(i): | 
					
						
							|  |  |  | 				i += 1 | 
					
						
							|  |  |  | 				e = i | 
					
						
							|  |  |  | 				while True: | 
					
						
							|  |  |  | 					e = s.index('"', e) | 
					
						
							|  |  |  | 					bslashes = 0 | 
					
						
							|  |  |  | 					while s[e-bslashes-1] == '\\': | 
					
						
							|  |  |  | 						bslashes += 1 | 
					
						
							|  |  |  | 					if bslashes % 2 == 1: | 
					
						
							|  |  |  | 						e += 1 | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				rexp = re.compile(r'\\(u[dD][89aAbB][0-9a-fA-F]{2}\\u[0-9a-fA-F]{4}|u[0-9a-fA-F]{4}|.|$)') | 
					
						
							|  |  |  | 				stri = rexp.sub(decodeEscape, s[i:e]) | 
					
						
							|  |  |  | 				return (e+1,stri) | 
					
						
							|  |  |  | 			def parseObj(i): | 
					
						
							|  |  |  | 				i += 1 | 
					
						
							|  |  |  | 				res = {} | 
					
						
							|  |  |  | 				i = skipSpace(i) | 
					
						
							|  |  |  | 				if s[i] == '}': # Empty dictionary | 
					
						
							|  |  |  | 					return (i+1,res) | 
					
						
							|  |  |  | 				while True: | 
					
						
							|  |  |  | 					if s[i] != '"': | 
					
						
							|  |  |  | 						raiseError('Expected a string object key', i) | 
					
						
							|  |  |  | 					i,key = parseString(i) | 
					
						
							|  |  |  | 					i = skipSpace(i) | 
					
						
							|  |  |  | 					if i >= len(s) or s[i] != ':': | 
					
						
							|  |  |  | 						raiseError('Expected a colon', i) | 
					
						
							|  |  |  | 					i,val = parse(i+1) | 
					
						
							|  |  |  | 					res[key] = val | 
					
						
							|  |  |  | 					i = skipSpace(i) | 
					
						
							|  |  |  | 					if s[i] == '}': | 
					
						
							|  |  |  | 						return (i+1, res) | 
					
						
							|  |  |  | 					if s[i] != ',': | 
					
						
							|  |  |  | 						raiseError('Expected comma or closing curly brace', i) | 
					
						
							|  |  |  | 					i = skipSpace(i+1) | 
					
						
							|  |  |  | 			def parseArray(i): | 
					
						
							|  |  |  | 				res = [] | 
					
						
							|  |  |  | 				i = skipSpace(i+1) | 
					
						
							|  |  |  | 				if s[i] == ']': # Empty array | 
					
						
							|  |  |  | 					return (i+1,res) | 
					
						
							|  |  |  | 				while True: | 
					
						
							|  |  |  | 					i,val = parse(i) | 
					
						
							|  |  |  | 					res.append(val) | 
					
						
							|  |  |  | 					i = skipSpace(i) # Raise exception if premature end | 
					
						
							|  |  |  | 					if s[i] == ']': | 
					
						
							|  |  |  | 						return (i+1, res) | 
					
						
							|  |  |  | 					if s[i] != ',': | 
					
						
							|  |  |  | 						raiseError('Expected a comma or closing bracket', i) | 
					
						
							|  |  |  | 					i = skipSpace(i+1) | 
					
						
							|  |  |  | 			def parseDiscrete(i): | 
					
						
							|  |  |  | 				for k,v in {'true': True, 'false': False, 'null': None}.items(): | 
					
						
							|  |  |  | 					if s.startswith(k, i): | 
					
						
							|  |  |  | 						return (i+len(k), v) | 
					
						
							|  |  |  | 				raiseError('Not a boolean (or null)', i) | 
					
						
							|  |  |  | 			def parseNumber(i): | 
					
						
							|  |  |  | 				mobj = re.match('^(-?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)', s[i:]) | 
					
						
							|  |  |  | 				if mobj is None: | 
					
						
							|  |  |  | 					raiseError('Not a number', i) | 
					
						
							|  |  |  | 				nums = mobj.group(1) | 
					
						
							|  |  |  | 				if '.' in nums or 'e' in nums or 'E' in nums: | 
					
						
							|  |  |  | 					return (i+len(nums), float(nums)) | 
					
						
							|  |  |  | 				return (i+len(nums), int(nums)) | 
					
						
							|  |  |  | 			CHARMAP = {'{': parseObj, '[': parseArray, '"': parseString, 't': parseDiscrete, 'f': parseDiscrete, 'n': parseDiscrete} | 
					
						
							|  |  |  | 			def parse(i): | 
					
						
							|  |  |  | 				i = skipSpace(i) | 
					
						
							|  |  |  | 				i,res = CHARMAP.get(s[i], parseNumber)(i) | 
					
						
							|  |  |  | 				i = skipSpace(i, False) | 
					
						
							|  |  |  | 				return (i,res) | 
					
						
							|  |  |  | 			i,res = parse(0) | 
					
						
							|  |  |  | 			if i < len(s): | 
					
						
							|  |  |  | 				raise ValueError('Extra data at end of input (index ' + str(i) + ' of ' + repr(s) + ': ' + repr(s[i:]) + ')') | 
					
						
							|  |  |  | 			return res | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-09-13 10:45:04 +02:00
										 |  |  | def preferredencoding(): | 
					
						
							|  |  |  | 	"""Get preferred encoding. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	Returns the best encoding scheme for the system, based on | 
					
						
							|  |  |  | 	locale.getpreferredencoding() and some further tweaks. | 
					
						
							|  |  |  | 	""" | 
					
						
							| 
									
										
										
										
											2009-09-20 00:08:50 +02:00
										 |  |  | 	def yield_preferredencoding(): | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			pref = locale.getpreferredencoding() | 
					
						
							|  |  |  | 			u'TEST'.encode(pref) | 
					
						
							|  |  |  | 		except: | 
					
						
							|  |  |  | 			pref = 'UTF-8' | 
					
						
							|  |  |  | 		while True: | 
					
						
							|  |  |  | 			yield pref | 
					
						
							|  |  |  | 	return yield_preferredencoding().next() | 
					
						
							| 
									
										
										
										
											2009-09-13 10:45:04 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | def htmlentity_transform(matchobj): | 
					
						
							|  |  |  | 	"""Transforms an HTML entity to a Unicode character. | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 	This function receives a match object and is intended to be used with | 
					
						
							|  |  |  | 	the re.sub() function. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	entity = matchobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Known non-numeric HTML entity | 
					
						
							|  |  |  | 	if entity in htmlentitydefs.name2codepoint: | 
					
						
							|  |  |  | 		return unichr(htmlentitydefs.name2codepoint[entity]) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Unicode character | 
					
						
							|  |  |  | 	mobj = re.match(ur'(?u)#(x?\d+)', entity) | 
					
						
							|  |  |  | 	if mobj is not None: | 
					
						
							|  |  |  | 		numstr = mobj.group(1) | 
					
						
							|  |  |  | 		if numstr.startswith(u'x'): | 
					
						
							|  |  |  | 			base = 16 | 
					
						
							|  |  |  | 			numstr = u'0%s' % numstr | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			base = 10 | 
					
						
							|  |  |  | 		return unichr(long(numstr, base)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Unknown entity in name, return its literal representation | 
					
						
							|  |  |  | 	return (u'&%s;' % entity) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def sanitize_title(utitle): | 
					
						
							| 
									
										
										
										
											2010-02-13 13:29:25 +01:00
										 |  |  | 	"""Sanitizes a video title so it could be used as part of a filename.""" | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 	utitle = re.sub(ur'(?u)&(.+?);', htmlentity_transform, utitle) | 
					
						
							|  |  |  | 	return utitle.replace(unicode(os.sep), u'%') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-13 13:29:25 +01:00
										 |  |  | def sanitize_open(filename, open_mode): | 
					
						
							|  |  |  | 	"""Try to open the given filename, and slightly tweak it if this fails. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	Attempts to open the given filename. If this fails, it tries to change | 
					
						
							|  |  |  | 	the filename slightly, step by step, until it's either able to open it | 
					
						
							|  |  |  | 	or it fails and raises a final exception, like the standard open() | 
					
						
							|  |  |  | 	function. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	It returns the tuple (stream, definitive_file_name). | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	try: | 
					
						
							| 
									
										
										
										
											2010-03-19 17:51:20 +01:00
										 |  |  | 		if filename == u'-': | 
					
						
							| 
									
										
										
										
											2010-10-23 12:22:42 +02:00
										 |  |  | 			if sys.platform == 'win32': | 
					
						
							|  |  |  | 				import msvcrt | 
					
						
							|  |  |  | 				msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | 
					
						
							| 
									
										
										
										
											2010-03-19 17:51:20 +01:00
										 |  |  | 			return (sys.stdout, filename) | 
					
						
							| 
									
										
										
										
											2010-02-13 13:29:25 +01:00
										 |  |  | 		stream = open(filename, open_mode) | 
					
						
							|  |  |  | 		return (stream, filename) | 
					
						
							|  |  |  | 	except (IOError, OSError), err: | 
					
						
							|  |  |  | 		# In case of error, try to remove win32 forbidden chars | 
					
						
							| 
									
										
										
										
											2010-06-29 11:10:12 +02:00
										 |  |  | 		filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) | 
					
						
							| 
									
										
										
										
											2010-02-13 13:29:25 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# An exception here should be caught in the caller | 
					
						
							|  |  |  | 		stream = open(filename, open_mode) | 
					
						
							|  |  |  | 		return (stream, filename) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-27 13:02:51 +08:00
										 |  |  | def timeconvert(timestr): | 
					
						
							|  |  |  |     """Convert RFC 2822 defined time string into system timestamp""" | 
					
						
							|  |  |  |     timestamp = None | 
					
						
							|  |  |  |     timetuple = email.utils.parsedate_tz(timestr) | 
					
						
							|  |  |  |     if timetuple is not None: | 
					
						
							|  |  |  |         timestamp = email.utils.mktime_tz(timetuple) | 
					
						
							|  |  |  |     return timestamp | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | class DownloadError(Exception): | 
					
						
							|  |  |  | 	"""Download Error exception. | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | 	This exception may be thrown by FileDownloader objects if they are not | 
					
						
							|  |  |  | 	configured to continue on errors. They will contain the appropriate | 
					
						
							|  |  |  | 	error message. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class SameFileError(Exception): | 
					
						
							|  |  |  | 	"""Same File exception. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	This exception will be thrown by FileDownloader objects if they detect | 
					
						
							|  |  |  | 	multiple files would have to be downloaded to the same file on disk. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	pass | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | class PostProcessingError(Exception): | 
					
						
							|  |  |  | 	"""Post Processing exception. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	This exception may be raised by PostProcessor's .run() method to | 
					
						
							|  |  |  | 	indicate an error in the postprocessing task. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	pass | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | class UnavailableVideoError(Exception): | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 	"""Unavailable Format exception. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	This exception will be thrown when a video is requested | 
					
						
							|  |  |  | 	in a format that is not available for that video. | 
					
						
							|  |  |  | 	""" | 
					
						
							| 
									
										
										
										
											2009-05-21 20:59:02 +02:00
										 |  |  | 	pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class ContentTooShortError(Exception): | 
					
						
							|  |  |  | 	"""Content Too Short exception. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	This exception may be raised by FileDownloader objects when a file they | 
					
						
							|  |  |  | 	download is too small for what the server announced first, indicating | 
					
						
							|  |  |  | 	the connection was probably interrupted. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 	# Both in bytes | 
					
						
							|  |  |  | 	downloaded = None | 
					
						
							|  |  |  | 	expected = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloaded, expected): | 
					
						
							|  |  |  | 		self.downloaded = downloaded | 
					
						
							|  |  |  | 		self.expected = expected | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | class YoutubeDLHandler(urllib2.HTTPHandler): | 
					
						
							|  |  |  | 	"""Handler for HTTP requests and responses. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	This class, when installed with an OpenerDirector, automatically adds | 
					
						
							|  |  |  | 	the standard headers to every HTTP request and handles gzipped and | 
					
						
							|  |  |  | 	deflated responses from web servers. If compression is to be avoided in | 
					
						
							|  |  |  | 	a particular request, the original request in the program code only has | 
					
						
							|  |  |  | 	to include the HTTP header "Youtubedl-No-Compression", which will be | 
					
						
							|  |  |  | 	removed before making the real request. | 
					
						
							|  |  |  | 	 | 
					
						
							|  |  |  | 	Part of this code was copied from: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	  http://techknack.net/python-urllib2-handlers/ | 
					
						
							|  |  |  | 	   | 
					
						
							|  |  |  | 	Andrew Rowls, the author of that code, agreed to release it to the | 
					
						
							|  |  |  | 	public domain. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def deflate(data): | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			return zlib.decompress(data, -zlib.MAX_WBITS) | 
					
						
							|  |  |  | 		except zlib.error: | 
					
						
							|  |  |  | 			return zlib.decompress(data) | 
					
						
							|  |  |  | 	 | 
					
						
							| 
									
										
										
										
											2011-01-18 20:52:37 +01:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def addinfourl_wrapper(stream, headers, url, code): | 
					
						
							|  |  |  | 		if hasattr(urllib2.addinfourl, 'getcode'): | 
					
						
							|  |  |  | 			return urllib2.addinfourl(stream, headers, url, code) | 
					
						
							| 
									
										
										
										
											2011-01-20 20:36:42 +01:00
										 |  |  | 		ret = urllib2.addinfourl(stream, headers, url) | 
					
						
							|  |  |  | 		ret.code = code | 
					
						
							|  |  |  | 		return ret | 
					
						
							| 
									
										
										
										
											2011-01-18 20:52:37 +01:00
										 |  |  | 	 | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 	def http_request(self, req): | 
					
						
							|  |  |  | 		for h in std_headers: | 
					
						
							|  |  |  | 			if h in req.headers: | 
					
						
							|  |  |  | 				del req.headers[h] | 
					
						
							|  |  |  | 			req.add_header(h, std_headers[h]) | 
					
						
							|  |  |  | 		if 'Youtubedl-no-compression' in req.headers: | 
					
						
							|  |  |  | 			if 'Accept-encoding' in req.headers: | 
					
						
							|  |  |  | 				del req.headers['Accept-encoding'] | 
					
						
							|  |  |  | 			del req.headers['Youtubedl-no-compression'] | 
					
						
							|  |  |  | 		return req | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def http_response(self, req, resp): | 
					
						
							|  |  |  | 		old_resp = resp | 
					
						
							|  |  |  | 		# gzip | 
					
						
							|  |  |  | 		if resp.headers.get('Content-encoding', '') == 'gzip': | 
					
						
							|  |  |  | 			gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r') | 
					
						
							| 
									
										
										
										
											2011-01-18 20:52:37 +01:00
										 |  |  | 			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			resp.msg = old_resp.msg | 
					
						
							|  |  |  | 		# deflate | 
					
						
							|  |  |  | 		if resp.headers.get('Content-encoding', '') == 'deflate': | 
					
						
							|  |  |  | 			gz = StringIO.StringIO(self.deflate(resp.read())) | 
					
						
							| 
									
										
										
										
											2011-01-18 20:52:37 +01:00
										 |  |  | 			resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			resp.msg = old_resp.msg | 
					
						
							|  |  |  | 		return resp | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | class FileDownloader(object): | 
					
						
							|  |  |  | 	"""File Downloader class. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	File downloader objects are the ones responsible of downloading the | 
					
						
							|  |  |  | 	actual video file and writing it to disk if the user has requested | 
					
						
							|  |  |  | 	it, among some other tasks. In most cases there should be one per | 
					
						
							|  |  |  | 	program. As, given a video URL, the downloader doesn't know how to | 
					
						
							|  |  |  | 	extract all the needed information, task that InfoExtractors do, it | 
					
						
							|  |  |  | 	has to pass the URL to one of them. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	For this, file downloader objects have a method that allows | 
					
						
							|  |  |  | 	InfoExtractors to be registered in a given order. When it is passed | 
					
						
							|  |  |  | 	a URL, the file downloader handles it to the first InfoExtractor it | 
					
						
							| 
									
										
										
										
											2009-04-23 22:34:58 +02:00
										 |  |  | 	finds that reports being able to handle it. The InfoExtractor extracts | 
					
						
							|  |  |  | 	all the information about the video or videos the URL refers to, and | 
					
						
							|  |  |  | 	asks the FileDownloader to process the video information, possibly | 
					
						
							|  |  |  | 	downloading the video. | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	File downloaders accept a lot of parameters. In order not to saturate | 
					
						
							|  |  |  | 	the object constructor with arguments, it receives a dictionary of | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 	options instead. These options are available through the params | 
					
						
							|  |  |  | 	attribute for the InfoExtractors to use. The FileDownloader also | 
					
						
							|  |  |  | 	registers itself as the downloader in charge for the InfoExtractors | 
					
						
							|  |  |  | 	that are added to it, so this is a "mutual registration". | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	Available options: | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 	username:         Username for authentication purposes. | 
					
						
							|  |  |  | 	password:         Password for authentication purposes. | 
					
						
							|  |  |  | 	usenetrc:         Use netrc for authentication instead. | 
					
						
							|  |  |  | 	quiet:            Do not print messages to stdout. | 
					
						
							|  |  |  | 	forceurl:         Force printing final URL. | 
					
						
							|  |  |  | 	forcetitle:       Force printing title. | 
					
						
							|  |  |  | 	forcethumbnail:   Force printing thumbnail URL. | 
					
						
							|  |  |  | 	forcedescription: Force printing description. | 
					
						
							| 
									
										
										
										
											2011-01-25 11:03:16 +08:00
										 |  |  | 	forcefilename:    Force printing final filename. | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 	simulate:         Do not download the video files. | 
					
						
							|  |  |  | 	format:           Video format code. | 
					
						
							|  |  |  | 	format_limit:     Highest quality format to try. | 
					
						
							|  |  |  | 	outtmpl:          Template for output names. | 
					
						
							|  |  |  | 	ignoreerrors:     Do not stop on download errors. | 
					
						
							|  |  |  | 	ratelimit:        Download speed limit, in bytes/sec. | 
					
						
							|  |  |  | 	nooverwrites:     Prevent overwriting files. | 
					
						
							|  |  |  | 	retries:          Number of times to retry for HTTP error 5xx | 
					
						
							|  |  |  | 	continuedl:       Try to continue downloads if possible. | 
					
						
							|  |  |  | 	noprogress:       Do not print the progress bar. | 
					
						
							|  |  |  | 	playliststart:    Playlist item to start at. | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 	playlistend:      Playlist item to end at. | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 	logtostderr:      Log messages to stderr instead of stdout. | 
					
						
							| 
									
										
										
										
											2011-01-03 16:14:19 +02:00
										 |  |  | 	consoletitle:     Display progress in console window's titlebar. | 
					
						
							| 
									
										
										
										
											2011-01-07 10:23:18 +01:00
										 |  |  | 	nopart:           Do not use temporary .part files. | 
					
						
							| 
									
										
										
										
											2011-01-28 19:59:18 +01:00
										 |  |  | 	updatetime:       Use the Last-modified header to set output file timestamps. | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 	writedescription: Write the video description to a .description file | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 	writeinfojson:    Write the video description to a .info.json file | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 	params = None | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	_ies = [] | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 	_pps = [] | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 	_download_retcode = None | 
					
						
							| 
									
										
										
										
											2010-04-03 09:54:36 +02:00
										 |  |  | 	_num_downloads = None | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 	_screen_file = None | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, params): | 
					
						
							| 
									
										
										
										
											2008-07-22 11:41:25 +02:00
										 |  |  | 		"""Create a FileDownloader object with the given options.""" | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		self._ies = [] | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 		self._pps = [] | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 		self._download_retcode = 0 | 
					
						
							| 
									
										
										
										
											2010-04-03 09:54:36 +02:00
										 |  |  | 		self._num_downloads = 0 | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 		self.params = params | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def pmkdir(filename): | 
					
						
							|  |  |  | 		"""Create directory components in filename. Similar to Unix "mkdir -p".""" | 
					
						
							|  |  |  | 		components = filename.split(os.sep) | 
					
						
							|  |  |  | 		aggregate = [os.sep.join(components[0:x]) for x in xrange(1, len(components))] | 
					
						
							| 
									
										
										
										
											2008-07-24 10:07:46 +02:00
										 |  |  | 		aggregate = ['%s%s' % (x, os.sep) for x in aggregate] # Finish names with separator | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		for dir in aggregate: | 
					
						
							|  |  |  | 			if not os.path.exists(dir): | 
					
						
							|  |  |  | 				os.mkdir(dir) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:23:18 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def format_bytes(bytes): | 
					
						
							|  |  |  | 		if bytes is None: | 
					
						
							|  |  |  | 			return 'N/A' | 
					
						
							| 
									
										
										
										
											2009-08-08 14:54:39 +02:00
										 |  |  | 		if type(bytes) is str: | 
					
						
							|  |  |  | 			bytes = float(bytes) | 
					
						
							|  |  |  | 		if bytes == 0.0: | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			exponent = 0 | 
					
						
							|  |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2009-08-08 14:54:39 +02:00
										 |  |  | 			exponent = long(math.log(bytes, 1024.0)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		suffix = 'bkMGTPEZY'[exponent] | 
					
						
							|  |  |  | 		converted = float(bytes) / float(1024**exponent) | 
					
						
							|  |  |  | 		return '%.2f%s' % (converted, suffix) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def calc_percent(byte_counter, data_len): | 
					
						
							|  |  |  | 		if data_len is None: | 
					
						
							|  |  |  | 			return '---.-%' | 
					
						
							|  |  |  | 		return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def calc_eta(start, now, total, current): | 
					
						
							|  |  |  | 		if total is None: | 
					
						
							|  |  |  | 			return '--:--' | 
					
						
							|  |  |  | 		dif = now - start | 
					
						
							|  |  |  | 		if current == 0 or dif < 0.001: # One millisecond | 
					
						
							|  |  |  | 			return '--:--' | 
					
						
							|  |  |  | 		rate = float(current) / dif | 
					
						
							|  |  |  | 		eta = long((float(total) - float(current)) / rate) | 
					
						
							|  |  |  | 		(eta_mins, eta_secs) = divmod(eta, 60) | 
					
						
							|  |  |  | 		if eta_mins > 99: | 
					
						
							|  |  |  | 			return '--:--' | 
					
						
							|  |  |  | 		return '%02d:%02d' % (eta_mins, eta_secs) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-01 00:00:04 +01:00
										 |  |  | 	@staticmethod | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	def calc_speed(start, now, bytes): | 
					
						
							|  |  |  | 		dif = now - start | 
					
						
							|  |  |  | 		if bytes == 0 or dif < 0.001: # One millisecond | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 			return '%10s' % '---b/s' | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def best_block_size(elapsed_time, bytes): | 
					
						
							|  |  |  | 		new_min = max(bytes / 2.0, 1.0) | 
					
						
							|  |  |  | 		new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB | 
					
						
							|  |  |  | 		if elapsed_time < 0.001: | 
					
						
							| 
									
										
										
										
											2009-05-27 23:03:56 +02:00
										 |  |  | 			return long(new_max) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		rate = bytes / elapsed_time | 
					
						
							|  |  |  | 		if rate > new_max: | 
					
						
							| 
									
										
										
										
											2009-05-27 23:03:56 +02:00
										 |  |  | 			return long(new_max) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		if rate < new_min: | 
					
						
							| 
									
										
										
										
											2009-05-27 23:03:56 +02:00
										 |  |  | 			return long(new_min) | 
					
						
							|  |  |  | 		return long(rate) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def parse_bytes(bytestr): | 
					
						
							|  |  |  | 		"""Parse a string indicating a byte quantity into a long integer.""" | 
					
						
							|  |  |  | 		matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) | 
					
						
							|  |  |  | 		if matchobj is None: | 
					
						
							|  |  |  | 			return None | 
					
						
							|  |  |  | 		number = float(matchobj.group(1)) | 
					
						
							|  |  |  | 		multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) | 
					
						
							|  |  |  | 		return long(round(number * multiplier)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	def add_info_extractor(self, ie): | 
					
						
							|  |  |  | 		"""Add an InfoExtractor object to the end of the list.""" | 
					
						
							|  |  |  | 		self._ies.append(ie) | 
					
						
							|  |  |  | 		ie.set_downloader(self) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 	def add_post_processor(self, pp): | 
					
						
							|  |  |  | 		"""Add a PostProcessor object to the end of the chain.""" | 
					
						
							|  |  |  | 		self._pps.append(pp) | 
					
						
							|  |  |  | 		pp.set_downloader(self) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 	def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False): | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 		"""Print message to stdout if not in quiet mode.""" | 
					
						
							| 
									
										
										
										
											2010-02-28 23:49:14 +01:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			if not self.params.get('quiet', False): | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 				terminator = [u'\n', u''][skip_eol] | 
					
						
							|  |  |  | 				print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()), | 
					
						
							|  |  |  | 			self._screen_file.flush() | 
					
						
							| 
									
										
										
										
											2010-02-28 23:49:14 +01:00
										 |  |  | 		except (UnicodeEncodeError), err: | 
					
						
							|  |  |  | 			if not ignore_encoding_errors: | 
					
						
							|  |  |  | 				raise | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 00:07:07 +02:00
										 |  |  | 	def to_stderr(self, message): | 
					
						
							|  |  |  | 		"""Print message to stderr.""" | 
					
						
							| 
									
										
										
										
											2009-09-13 10:45:04 +02:00
										 |  |  | 		print >>sys.stderr, message.encode(preferredencoding()) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-03 16:14:19 +02:00
										 |  |  | 	def to_cons_title(self, message): | 
					
						
							|  |  |  | 		"""Set console/terminal window title to message.""" | 
					
						
							|  |  |  | 		if not self.params.get('consoletitle', False): | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): | 
					
						
							|  |  |  | 			# c_wchar_p() might not be necessary if `message` is | 
					
						
							|  |  |  | 			# already of type unicode() | 
					
						
							|  |  |  | 			ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) | 
					
						
							|  |  |  | 		elif 'TERM' in os.environ: | 
					
						
							|  |  |  | 			sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 09:45:49 +02:00
										 |  |  | 	def fixed_template(self): | 
					
						
							|  |  |  | 		"""Checks if the output template is fixed.""" | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 		return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 11:33:41 +02:00
										 |  |  | 	def trouble(self, message=None): | 
					
						
							|  |  |  | 		"""Determine action to take when a download problem appears. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		Depending on if the downloader has been configured to ignore | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | 		download errors or not, this method may throw an exception or | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 		not when errors are found, after printing the message. | 
					
						
							| 
									
										
										
										
											2008-07-22 11:33:41 +02:00
										 |  |  | 		""" | 
					
						
							|  |  |  | 		if message is not None: | 
					
						
							|  |  |  | 			self.to_stderr(message) | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 		if not self.params.get('ignoreerrors', False): | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | 			raise DownloadError(message) | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 		self._download_retcode = 1 | 
					
						
							| 
									
										
										
										
											2008-07-22 11:33:41 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 	def slow_down(self, start_time, byte_counter): | 
					
						
							|  |  |  | 		"""Sleep if the download speed is over the rate limit.""" | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 		rate_limit = self.params.get('ratelimit', None) | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 		if rate_limit is None or byte_counter == 0: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		now = time.time() | 
					
						
							|  |  |  | 		elapsed = now - start_time | 
					
						
							|  |  |  | 		if elapsed <= 0.0: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		speed = float(byte_counter) / elapsed | 
					
						
							|  |  |  | 		if speed > rate_limit: | 
					
						
							|  |  |  | 			time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:23:18 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def temp_name(self, filename): | 
					
						
							|  |  |  | 		"""Returns a temporary filename for the given filename.""" | 
					
						
							|  |  |  | 		if self.params.get('nopart', False) or filename == u'-' or \ | 
					
						
							|  |  |  | 				(os.path.exists(filename) and not os.path.isfile(filename)): | 
					
						
							|  |  |  | 			return filename | 
					
						
							|  |  |  | 		return filename + u'.part' | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-12 20:21:43 +01:00
										 |  |  | 	def undo_temp_name(self, filename): | 
					
						
							|  |  |  | 		if filename.endswith(u'.part'): | 
					
						
							|  |  |  | 			return filename[:-len(u'.part')] | 
					
						
							|  |  |  | 		return filename | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 	def try_rename(self, old_filename, new_filename): | 
					
						
							|  |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2010-12-09 19:33:04 +01:00
										 |  |  | 			if old_filename == new_filename: | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 			os.rename(old_filename, new_filename) | 
					
						
							|  |  |  | 		except (IOError, OSError), err: | 
					
						
							|  |  |  | 			self.trouble(u'ERROR: unable to rename file') | 
					
						
							| 
									
										
										
										
											2011-01-28 19:59:18 +01:00
										 |  |  | 	 | 
					
						
							|  |  |  | 	def try_utime(self, filename, last_modified_hdr): | 
					
						
							|  |  |  | 		"""Try to set the last-modified time of the given file.""" | 
					
						
							|  |  |  | 		if last_modified_hdr is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		if not os.path.isfile(filename): | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		timestr = last_modified_hdr | 
					
						
							|  |  |  | 		if timestr is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		filetime = timeconvert(timestr) | 
					
						
							|  |  |  | 		if filetime is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			os.utime(filename,(time.time(), filetime)) | 
					
						
							|  |  |  | 		except: | 
					
						
							|  |  |  | 			pass | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 	def report_writedescription(self, descfn): | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 		""" Report that the description file is being written """ | 
					
						
							|  |  |  | 		self.to_screen(u'[info] Writing video description to: %s' % descfn, ignore_encoding_errors=True) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_writeinfojson(self, infofn): | 
					
						
							|  |  |  | 		""" Report that the metadata file has been written """ | 
					
						
							|  |  |  | 		self.to_screen(u'[info] Video description metadata as JSON to: %s' % infofn, ignore_encoding_errors=True) | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_destination(self, filename): | 
					
						
							|  |  |  | 		"""Report destination filename.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_progress(self, percent_str, data_len_str, speed_str, eta_str): | 
					
						
							|  |  |  | 		"""Report download progress.""" | 
					
						
							| 
									
										
										
										
											2010-03-07 11:24:22 +01:00
										 |  |  | 		if self.params.get('noprogress', False): | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self.to_screen(u'\r[download] %s of %s at %s ETA %s' % | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 				(percent_str, data_len_str, speed_str, eta_str), skip_eol=True) | 
					
						
							| 
									
										
										
										
											2011-01-03 16:14:19 +02:00
										 |  |  | 		self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % | 
					
						
							|  |  |  | 				(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_resuming_byte(self, resume_len): | 
					
						
							| 
									
										
										
										
											2010-08-12 18:28:34 +02:00
										 |  |  | 		"""Report attempt to resume at given byte.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self.to_screen(u'[download] Resuming download at byte %s' % resume_len) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-30 18:34:56 +02:00
										 |  |  | 	def report_retry(self, count, retries): | 
					
						
							| 
									
										
										
										
											2010-09-11 09:58:34 +02:00
										 |  |  | 		"""Report retry in case of HTTP error 5xx""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 	def report_file_already_downloaded(self, file_name): | 
					
						
							|  |  |  | 		"""Report file has already been fully downloaded.""" | 
					
						
							| 
									
										
										
										
											2010-02-28 23:49:14 +01:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 			self.to_screen(u'[download] %s has already been downloaded' % file_name) | 
					
						
							| 
									
										
										
										
											2010-02-28 23:49:14 +01:00
										 |  |  | 		except (UnicodeEncodeError), err: | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 			self.to_screen(u'[download] The file has already been downloaded') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 	def report_unable_to_resume(self): | 
					
						
							|  |  |  | 		"""Report it was impossible to resume download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self.to_screen(u'[download] Unable to resume') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_finish(self): | 
					
						
							|  |  |  | 		"""Report download finished.""" | 
					
						
							| 
									
										
										
										
											2010-03-07 11:24:22 +01:00
										 |  |  | 		if self.params.get('noprogress', False): | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 			self.to_screen(u'[download] Download completed') | 
					
						
							| 
									
										
										
										
											2010-03-07 11:24:22 +01:00
										 |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 			self.to_screen(u'') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 	def increment_downloads(self): | 
					
						
							|  |  |  | 		"""Increment the ordinal that assigns a number to each file.""" | 
					
						
							|  |  |  | 		self._num_downloads += 1 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-25 11:03:16 +08:00
										 |  |  | 	def prepare_filename(self, info_dict): | 
					
						
							|  |  |  | 		"""Generate the output filename.""" | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			template_dict = dict(info_dict) | 
					
						
							|  |  |  | 			template_dict['epoch'] = unicode(long(time.time())) | 
					
						
							|  |  |  | 			template_dict['autonumber'] = unicode('%05d' % self._num_downloads) | 
					
						
							|  |  |  | 			filename = self.params['outtmpl'] % template_dict | 
					
						
							|  |  |  | 			return filename | 
					
						
							|  |  |  | 		except (ValueError, KeyError), err: | 
					
						
							|  |  |  | 			self.trouble(u'ERROR: invalid system charset or erroneous output template') | 
					
						
							|  |  |  | 			return None | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 	def process_info(self, info_dict): | 
					
						
							|  |  |  | 		"""Process a single dictionary returned by an InfoExtractor.""" | 
					
						
							| 
									
										
										
										
											2011-01-25 11:03:16 +08:00
										 |  |  | 		filename = self.prepare_filename(info_dict) | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 		# Do nothing else if in simulate mode | 
					
						
							|  |  |  | 		if self.params.get('simulate', False): | 
					
						
							| 
									
										
										
										
											2009-05-24 11:07:51 +02:00
										 |  |  | 			# Forced printings | 
					
						
							|  |  |  | 			if self.params.get('forcetitle', False): | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') | 
					
						
							| 
									
										
										
										
											2009-05-24 11:07:51 +02:00
										 |  |  | 			if self.params.get('forceurl', False): | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace') | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 			if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: | 
					
						
							|  |  |  | 				print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace') | 
					
						
							|  |  |  | 			if self.params.get('forcedescription', False) and 'description' in info_dict: | 
					
						
							|  |  |  | 				print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace') | 
					
						
							| 
									
										
										
										
											2011-01-25 11:03:16 +08:00
										 |  |  | 			if self.params.get('forcefilename', False) and filename is not None: | 
					
						
							|  |  |  | 				print filename.encode(preferredencoding(), 'xmlcharrefreplace') | 
					
						
							| 
									
										
										
										
											2009-05-24 11:07:51 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-25 11:03:16 +08:00
										 |  |  | 		if filename is None: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:40:36 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-09-20 00:11:11 +02:00
										 |  |  | 		if self.params.get('nooverwrites', False) and os.path.exists(filename): | 
					
						
							| 
									
										
										
										
											2010-09-11 09:52:25 +02:00
										 |  |  | 			self.to_stderr(u'WARNING: file exists and will be skipped') | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.pmkdir(filename) | 
					
						
							|  |  |  | 		except (OSError, IOError), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 			self.trouble(u'ERROR: unable to create directories: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 		if self.params.get('writedescription', False): | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				descfn = filename + '.description' | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 				self.report_writedescription(descfn) | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 				descfile = open(descfn, 'wb') | 
					
						
							|  |  |  | 				try: | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 					descfile.write(info_dict['description'].encode('utf-8')) | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 				finally: | 
					
						
							|  |  |  | 					descfile.close() | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 			except (OSError, IOError): | 
					
						
							|  |  |  | 				self.trouble(u'ERROR: Cannot write description file: %s' % str(descfn)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 		if self.params.get('writeinfojson', False): | 
					
						
							|  |  |  | 			infofn = filename + '.info.json' | 
					
						
							|  |  |  | 			self.report_writeinfojson(infofn) | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				json.dump | 
					
						
							|  |  |  | 			except (NameError,AttributeError): | 
					
						
							|  |  |  | 				self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			try: | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 				infof = open(infofn, 'wb') | 
					
						
							|  |  |  | 				try: | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 					json.dump(info_dict, infof) | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 				finally: | 
					
						
							|  |  |  | 					infof.close() | 
					
						
							| 
									
										
										
										
											2011-07-10 21:39:36 +02:00
										 |  |  | 			except (OSError, IOError): | 
					
						
							|  |  |  | 				self.trouble(u'ERROR: Cannot write metadata to JSON file: %s' % str(infofn)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			success = self._do_download(filename, info_dict['url'].encode('utf-8'), info_dict.get('player_url', None)) | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 		except (OSError, IOError), err: | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 			raise UnavailableVideoError | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 			self.trouble(u'ERROR: unable to download video data: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-05-21 20:59:02 +02:00
										 |  |  | 		except (ContentTooShortError, ), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 			self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) | 
					
						
							| 
									
										
										
										
											2009-05-21 20:59:02 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 		if success: | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				self.post_process(filename, info_dict) | 
					
						
							|  |  |  | 			except (PostProcessingError), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 				self.trouble(u'ERROR: postprocessing: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	def download(self, url_list): | 
					
						
							|  |  |  | 		"""Download a given list of URLs.""" | 
					
						
							| 
									
										
										
										
											2008-07-22 09:45:49 +02:00
										 |  |  | 		if len(url_list) > 1 and self.fixed_template(): | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 			raise SameFileError(self.params['outtmpl']) | 
					
						
							| 
									
										
										
										
											2008-07-22 09:45:49 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		for url in url_list: | 
					
						
							|  |  |  | 			suitable_found = False | 
					
						
							|  |  |  | 			for ie in self._ies: | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 				# Go to next InfoExtractor if not suitable | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				if not ie.suitable(url): | 
					
						
							|  |  |  | 					continue | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				# Suitable InfoExtractor found | 
					
						
							|  |  |  | 				suitable_found = True | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				# Extract information from URL and process it | 
					
						
							|  |  |  | 				ie.extract(url) | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 				# Suitable InfoExtractor had been found; go to next URL | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				break | 
					
						
							| 
									
										
										
										
											2009-04-10 00:59:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			if not suitable_found: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 				self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) | 
					
						
							| 
									
										
										
										
											2008-07-22 11:16:32 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-23 21:43:04 +02:00
										 |  |  | 		return self._download_retcode | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def post_process(self, filename, ie_info): | 
					
						
							|  |  |  | 		"""Run the postprocessing chain on the given file.""" | 
					
						
							|  |  |  | 		info = dict(ie_info) | 
					
						
							|  |  |  | 		info['filepath'] = filename | 
					
						
							|  |  |  | 		for pp in self._pps: | 
					
						
							|  |  |  | 			info = pp.run(info) | 
					
						
							|  |  |  | 			if info is None: | 
					
						
							|  |  |  | 				break | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 	def _download_with_rtmpdump(self, filename, url, player_url): | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 		self.report_destination(filename) | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		tmpfilename = self.temp_name(filename) | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Check for rtmpdump first | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) | 
					
						
							|  |  |  | 		except (OSError, IOError): | 
					
						
							|  |  |  | 			self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') | 
					
						
							|  |  |  | 			return False | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Download using rtmpdump. rtmpdump returns exit code 2 when | 
					
						
							|  |  |  | 		# the connection was interrumpted and resuming appears to be | 
					
						
							|  |  |  | 		# possible. This is part of rtmpdump's normal usage, AFAIK. | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] | 
					
						
							| 
									
										
										
										
											2010-01-19 20:04:56 +01:00
										 |  |  | 		retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]) | 
					
						
							|  |  |  | 		while retval == 2 or retval == 1: | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 			prevsize = os.path.getsize(tmpfilename) | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 			self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			time.sleep(5.0) # This seems to be needed | 
					
						
							| 
									
										
										
										
											2010-01-19 20:04:56 +01:00
										 |  |  | 			retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 			cursize = os.path.getsize(tmpfilename) | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			if prevsize == cursize and retval == 1: | 
					
						
							|  |  |  | 				break | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 		if retval == 0: | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 			self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename)) | 
					
						
							|  |  |  | 			self.try_rename(tmpfilename, filename) | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 			return True | 
					
						
							|  |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 			self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 			return False | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 	def _do_download(self, filename, url, player_url): | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		# Check file already present | 
					
						
							| 
									
										
										
										
											2011-01-07 10:23:18 +01:00
										 |  |  | 		if self.params.get('continuedl', False) and os.path.isfile(filename) and not self.params.get('nopart', False): | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 			self.report_file_already_downloaded(filename) | 
					
						
							|  |  |  | 			return True | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 		# Attempt to download using rtmpdump | 
					
						
							|  |  |  | 		if url.startswith('rtmp'): | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			return self._download_with_rtmpdump(filename, url, player_url) | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		tmpfilename = self.temp_name(filename) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 		stream = None | 
					
						
							| 
									
										
										
										
											2009-12-21 21:43:15 +01:00
										 |  |  | 		open_mode = 'wb' | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Do not include the Accept-Encoding header | 
					
						
							|  |  |  | 		headers = {'Youtubedl-no-compression': 'True'} | 
					
						
							|  |  |  | 		basic_request = urllib2.Request(url, None, headers) | 
					
						
							|  |  |  | 		request = urllib2.Request(url, None, headers) | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-12-21 21:43:15 +01:00
										 |  |  | 		# Establish possible resume length | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		if os.path.isfile(tmpfilename): | 
					
						
							|  |  |  | 			resume_len = os.path.getsize(tmpfilename) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 		else: | 
					
						
							|  |  |  | 			resume_len = 0 | 
					
						
							| 
									
										
										
										
											2009-12-21 21:43:15 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Request parameters in case of being able to resume | 
					
						
							| 
									
										
										
										
											2009-09-20 00:11:11 +02:00
										 |  |  | 		if self.params.get('continuedl', False) and resume_len != 0: | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 			self.report_resuming_byte(resume_len) | 
					
						
							|  |  |  | 			request.add_header('Range','bytes=%d-' % resume_len) | 
					
						
							| 
									
										
										
										
											2009-12-21 21:43:15 +01:00
										 |  |  | 			open_mode = 'ab' | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-30 18:34:56 +02:00
										 |  |  | 		count = 0 | 
					
						
							|  |  |  | 		retries = self.params.get('retries', 0) | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 		while count <= retries: | 
					
						
							| 
									
										
										
										
											2010-05-30 18:34:56 +02:00
										 |  |  | 			# Establish connection | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				data = urllib2.urlopen(request) | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			except (urllib2.HTTPError, ), err: | 
					
						
							| 
									
										
										
										
											2010-10-03 11:05:20 +02:00
										 |  |  | 				if (err.code < 500 or err.code >= 600) and err.code != 416: | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 					# Unexpected HTTP error | 
					
						
							| 
									
										
										
										
											2010-05-30 18:34:56 +02:00
										 |  |  | 					raise | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 				elif err.code == 416: | 
					
						
							|  |  |  | 					# Unable to resume (requested range not satisfiable) | 
					
						
							|  |  |  | 					try: | 
					
						
							|  |  |  | 						# Open the connection again without the range header | 
					
						
							|  |  |  | 						data = urllib2.urlopen(basic_request) | 
					
						
							|  |  |  | 						content_length = data.info()['Content-Length'] | 
					
						
							|  |  |  | 					except (urllib2.HTTPError, ), err: | 
					
						
							| 
									
										
										
										
											2010-10-03 11:05:20 +02:00
										 |  |  | 						if err.code < 500 or err.code >= 600: | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 							raise | 
					
						
							|  |  |  | 					else: | 
					
						
							|  |  |  | 						# Examine the reported length | 
					
						
							| 
									
										
										
										
											2010-08-01 01:15:43 +02:00
										 |  |  | 						if (content_length is not None and | 
					
						
							| 
									
										
										
										
											2011-08-23 15:01:51 +03:00
										 |  |  | 							(resume_len - 100 < long(content_length) < resume_len + 100)): | 
					
						
							| 
									
										
										
										
											2010-08-01 01:15:43 +02:00
										 |  |  | 							# The file had already been fully downloaded. | 
					
						
							|  |  |  | 							# Explanation to the above condition: in issue #175 it was revealed that | 
					
						
							|  |  |  | 							# YouTube sometimes adds or removes a few bytes from the end of the file, | 
					
						
							|  |  |  | 							# changing the file size slightly and causing problems for some users. So | 
					
						
							|  |  |  | 							# I decided to implement a suggested change and consider the file | 
					
						
							|  |  |  | 							# completely downloaded if the file size differs less than 100 bytes from | 
					
						
							|  |  |  | 							# the one in the hard drive. | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 							self.report_file_already_downloaded(filename) | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 							self.try_rename(tmpfilename, filename) | 
					
						
							| 
									
										
										
										
											2010-07-27 20:11:06 +02:00
										 |  |  | 							return True | 
					
						
							|  |  |  | 						else: | 
					
						
							|  |  |  | 							# The length does not match, we start the download over | 
					
						
							|  |  |  | 							self.report_unable_to_resume() | 
					
						
							|  |  |  | 							open_mode = 'wb' | 
					
						
							|  |  |  | 							break | 
					
						
							|  |  |  | 			# Retry | 
					
						
							|  |  |  | 			count += 1 | 
					
						
							|  |  |  | 			if count <= retries: | 
					
						
							|  |  |  | 				self.report_retry(count, retries) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if count > retries: | 
					
						
							|  |  |  | 			self.trouble(u'ERROR: giving up after %s retries' % retries) | 
					
						
							|  |  |  | 			return False | 
					
						
							| 
									
										
										
										
											2009-05-27 22:50:18 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		data_len = data.info().get('Content-length', None) | 
					
						
							| 
									
										
										
										
											2010-12-11 11:32:13 +01:00
										 |  |  | 		if data_len is not None: | 
					
						
							|  |  |  | 			data_len = long(data_len) + resume_len | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		data_len_str = self.format_bytes(data_len) | 
					
						
							| 
									
										
										
										
											2010-12-11 11:32:13 +01:00
										 |  |  | 		byte_counter = 0 + resume_len | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		block_size = 1024 | 
					
						
							|  |  |  | 		start = time.time() | 
					
						
							|  |  |  | 		while True: | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 			# Download and write | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			before = time.time() | 
					
						
							|  |  |  | 			data_block = data.read(block_size) | 
					
						
							|  |  |  | 			after = time.time() | 
					
						
							| 
									
										
										
										
											2010-12-15 21:42:11 +01:00
										 |  |  | 			if len(data_block) == 0: | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				break | 
					
						
							| 
									
										
										
										
											2010-12-15 21:42:11 +01:00
										 |  |  | 			byte_counter += len(data_block) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			# Open file just in time | 
					
						
							|  |  |  | 			if stream is None: | 
					
						
							|  |  |  | 				try: | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 					(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:21:43 +01:00
										 |  |  | 					filename = self.undo_temp_name(tmpfilename) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 					self.report_destination(filename) | 
					
						
							|  |  |  | 				except (OSError, IOError), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:42:26 +02:00
										 |  |  | 					self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 					return False | 
					
						
							| 
									
										
										
										
											2010-04-17 18:49:56 +02:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				stream.write(data_block) | 
					
						
							|  |  |  | 			except (IOError, OSError), err: | 
					
						
							| 
									
										
										
										
											2010-08-12 18:41:29 +02:00
										 |  |  | 				self.trouble(u'\nERROR: unable to write data: %s' % str(err)) | 
					
						
							|  |  |  | 				return False | 
					
						
							| 
									
										
										
										
											2010-12-15 21:42:11 +01:00
										 |  |  | 			block_size = self.best_block_size(after - before, len(data_block)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 			# Progress message | 
					
						
							|  |  |  | 			percent_str = self.calc_percent(byte_counter, data_len) | 
					
						
							| 
									
										
										
										
											2010-12-15 21:42:11 +01:00
										 |  |  | 			eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) | 
					
						
							|  |  |  | 			speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 			self.report_progress(percent_str, data_len_str, speed_str, eta_str) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 			# Apply rate limit | 
					
						
							| 
									
										
										
										
											2010-12-15 21:42:11 +01:00
										 |  |  | 			self.slow_down(start, byte_counter - resume_len) | 
					
						
							| 
									
										
										
										
											2008-07-24 09:47:07 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-05 20:57:46 +02:00
										 |  |  | 		stream.close() | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 		self.report_finish() | 
					
						
							| 
									
										
										
										
											2010-12-12 19:21:09 +01:00
										 |  |  | 		if data_len is not None and byte_counter != data_len: | 
					
						
							| 
									
										
										
										
											2009-05-21 20:59:02 +02:00
										 |  |  | 			raise ContentTooShortError(byte_counter, long(data_len)) | 
					
						
							| 
									
										
										
										
											2010-12-04 10:38:53 +01:00
										 |  |  | 		self.try_rename(tmpfilename, filename) | 
					
						
							| 
									
										
										
										
											2011-01-28 19:59:18 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-27 13:02:51 +08:00
										 |  |  | 		# Update file modification time | 
					
						
							| 
									
										
										
										
											2011-01-28 19:59:18 +01:00
										 |  |  | 		if self.params.get('updatetime', True): | 
					
						
							|  |  |  | 			self.try_utime(filename, data.info().get('last-modified', None)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-06-07 01:11:50 +02:00
										 |  |  | 		return True | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | class InfoExtractor(object): | 
					
						
							|  |  |  | 	"""Information Extractor class. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	Information extractors are the classes that, given a URL, extract | 
					
						
							|  |  |  | 	information from the video (or videos) the URL refers to. This | 
					
						
							|  |  |  | 	information includes the real video URL, the video title and simplified | 
					
						
							| 
									
										
										
										
											2009-04-23 22:34:58 +02:00
										 |  |  | 	title, author and others. The information is stored in a dictionary | 
					
						
							|  |  |  | 	which is then passed to the FileDownloader. The FileDownloader | 
					
						
							|  |  |  | 	processes this information possibly downloading the video to the file | 
					
						
							|  |  |  | 	system, among other possible outcomes. The dictionaries must include | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	the following fields: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	id:		Video identifier. | 
					
						
							|  |  |  | 	url:		Final video URL. | 
					
						
							|  |  |  | 	uploader:	Nickname of the video uploader. | 
					
						
							|  |  |  | 	title:		Literal title. | 
					
						
							|  |  |  | 	stitle:		Simplified title. | 
					
						
							|  |  |  | 	ext:		Video filename extension. | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 	format:		Video format. | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 	player_url:	SWF Player URL (may be None). | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 	The following fields are optional. Their primary purpose is to allow | 
					
						
							|  |  |  | 	youtube-dl to serve as the backend for a video search function, such | 
					
						
							|  |  |  | 	as the one in youtube2mp3.  They are only used when their respective | 
					
						
							|  |  |  | 	forced printing functions are called: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	thumbnail:	Full URL to a video thumbnail image. | 
					
						
							|  |  |  | 	description:	One-line video description. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	Subclasses of this one should re-define the _real_initialize() and | 
					
						
							|  |  |  | 	_real_extract() methods, as well as the suitable() static method. | 
					
						
							|  |  |  | 	Probably, they should also be instantiated and added to the main | 
					
						
							|  |  |  | 	downloader. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_ready = False | 
					
						
							|  |  |  | 	_downloader = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		"""Constructor. Receives an optional downloader.""" | 
					
						
							|  |  |  | 		self._ready = False | 
					
						
							|  |  |  | 		self.set_downloader(downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		"""Receives a URL and returns True if suitable for this IE.""" | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		return False | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def initialize(self): | 
					
						
							| 
									
										
										
										
											2008-07-22 11:41:25 +02:00
										 |  |  | 		"""Initializes an instance (authentication, etc).""" | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		if not self._ready: | 
					
						
							|  |  |  | 			self._real_initialize() | 
					
						
							|  |  |  | 			self._ready = True | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def extract(self, url): | 
					
						
							|  |  |  | 		"""Extracts URL information and returns it in list of dicts.""" | 
					
						
							|  |  |  | 		self.initialize() | 
					
						
							|  |  |  | 		return self._real_extract(url) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def set_downloader(self, downloader): | 
					
						
							|  |  |  | 		"""Sets the downloader for this IE.""" | 
					
						
							|  |  |  | 		self._downloader = downloader | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		"""Real initialization process. Redefine in subclasses.""" | 
					
						
							|  |  |  | 		pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		"""Real extraction process. Redefine in subclasses.""" | 
					
						
							|  |  |  | 		pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class YoutubeIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for youtube.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-07 13:01:09 +02:00
										 |  |  | 	_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed|e)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?)?([0-9A-Za-z_-]+)(?(1).+)?$' | 
					
						
							| 
									
										
										
										
											2010-07-22 20:24:59 +02:00
										 |  |  | 	_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' | 
					
						
							| 
									
										
										
										
											2010-10-09 12:28:15 +02:00
										 |  |  | 	_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 	_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	_NETRC_MACHINE = 'youtube' | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 	# Listed in order of quality | 
					
						
							|  |  |  | 	_available_formats = ['38', '37', '22', '45', '35', '34', '43', '18', '6', '5', '17', '13'] | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 	_video_extensions = { | 
					
						
							|  |  |  | 		'13': '3gp', | 
					
						
							|  |  |  | 		'17': 'mp4', | 
					
						
							|  |  |  | 		'18': 'mp4', | 
					
						
							|  |  |  | 		'22': 'mp4', | 
					
						
							| 
									
										
										
										
											2009-11-20 21:51:38 +01:00
										 |  |  | 		'37': 'mp4', | 
					
						
							| 
									
										
										
										
											2010-07-13 19:01:43 +02:00
										 |  |  | 		'38': 'video', # You actually don't know if this will be MOV, AVI or whatever | 
					
						
							| 
									
										
										
										
											2010-05-30 19:46:08 +02:00
										 |  |  | 		'43': 'webm', | 
					
						
							|  |  |  | 		'45': 'webm', | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YoutubeIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 	def report_lang(self): | 
					
						
							|  |  |  | 		"""Report attempt to set language.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] Setting language') | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_login(self): | 
					
						
							|  |  |  | 		"""Report attempt to log in.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] Logging in') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_age_confirmation(self): | 
					
						
							|  |  |  | 		"""Report attempt to confirm age.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] Confirming age') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 	def report_video_webpage_download(self, video_id): | 
					
						
							|  |  |  | 		"""Report attempt to download video webpage.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-08-08 14:56:06 +02:00
										 |  |  | 	def report_video_info_webpage_download(self, video_id): | 
					
						
							|  |  |  | 		"""Report attempt to download video info webpage.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 	def report_information_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report attempt to extract video information.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 	def report_unavailable_format(self, video_id, format): | 
					
						
							|  |  |  | 		"""Report extracted video URL.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-03 13:12:11 +01:00
										 |  |  | 	def report_rtmp_download(self): | 
					
						
							|  |  |  | 		"""Indicate the download will use the RTMP protocol.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] RTMP download detected') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		if self._downloader is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		username = None | 
					
						
							|  |  |  | 		password = None | 
					
						
							| 
									
										
										
										
											2009-03-04 22:12:33 +01:00
										 |  |  | 		downloader_params = self._downloader.params | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Attempt to use provided username and password or .netrc data | 
					
						
							|  |  |  | 		if downloader_params.get('username', None) is not None: | 
					
						
							|  |  |  | 			username = downloader_params['username'] | 
					
						
							|  |  |  | 			password = downloader_params['password'] | 
					
						
							|  |  |  | 		elif downloader_params.get('usenetrc', False): | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				info = netrc.netrc().authenticators(self._NETRC_MACHINE) | 
					
						
							|  |  |  | 				if info is not None: | 
					
						
							|  |  |  | 					username = info[0] | 
					
						
							|  |  |  | 					password = info[2] | 
					
						
							|  |  |  | 				else: | 
					
						
							|  |  |  | 					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) | 
					
						
							|  |  |  | 			except (IOError, netrc.NetrcParseError), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 		# Set language | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(self._LANG_URL) | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_lang() | 
					
						
							|  |  |  | 			urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-01-31 10:12:22 +01:00
										 |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-03-02 00:02:56 +01:00
										 |  |  | 		# No authentication to be performed | 
					
						
							|  |  |  | 		if username is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		# Log in | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 		login_form = { | 
					
						
							|  |  |  | 				'current_form': 'loginForm', | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				'next':		'/', | 
					
						
							|  |  |  | 				'action_login':	'Log In', | 
					
						
							|  |  |  | 				'username':	username, | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 				'password':	password, | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 			self.report_login() | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			login_results = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 				return | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		# Confirm age | 
					
						
							| 
									
										
										
										
											2008-07-21 23:53:06 +02:00
										 |  |  | 		age_form = { | 
					
						
							|  |  |  | 				'next_url':		'/', | 
					
						
							|  |  |  | 				'action_confirm':	'Confirm', | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form)) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2008-07-22 22:40:50 +02:00
										 |  |  | 			self.report_age_confirmation() | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 			age_results = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract video id from URL | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: invalid URL: %s' % url) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 		video_id = mobj.group(2) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		# Get video webpage | 
					
						
							|  |  |  | 		self.report_video_webpage_download(video_id) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			video_webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-05-24 11:09:30 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		# Attempt to extract SWF player URL | 
					
						
							| 
									
										
										
										
											2010-11-16 13:52:23 -08:00
										 |  |  | 		mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		if mobj is not None: | 
					
						
							| 
									
										
										
										
											2010-11-16 13:52:23 -08:00
										 |  |  | 			player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		else: | 
					
						
							|  |  |  | 			player_url = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Get video info | 
					
						
							|  |  |  | 		self.report_video_info_webpage_download(video_id) | 
					
						
							|  |  |  | 		for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: | 
					
						
							|  |  |  | 			video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' | 
					
						
							|  |  |  | 					   % (video_id, el_type)) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			request = urllib2.Request(video_info_url) | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			try: | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 				video_info_webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 				video_info = parse_qs(video_info_webpage) | 
					
						
							|  |  |  | 				if 'token' in video_info: | 
					
						
							|  |  |  | 					break | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				return | 
					
						
							| 
									
										
										
										
											2010-07-25 11:55:49 +02:00
										 |  |  | 		if 'token' not in video_info: | 
					
						
							|  |  |  | 			if 'reason' in video_info: | 
					
						
							| 
									
										
										
										
											2010-08-22 00:48:55 +02:00
										 |  |  | 				self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8')) | 
					
						
							| 
									
										
										
										
											2010-07-25 11:55:49 +02:00
										 |  |  | 			else: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Start extracting information | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		self.report_information_extraction(video_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# uploader | 
					
						
							|  |  |  | 		if 'author' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract uploader nickname') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = urllib.unquote_plus(video_info['author'][0]) | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		# title | 
					
						
							|  |  |  | 		if 'title' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = urllib.unquote_plus(video_info['title'][0]) | 
					
						
							|  |  |  | 		video_title = video_title.decode('utf-8') | 
					
						
							|  |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# simplified title | 
					
						
							|  |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							|  |  |  | 		simple_title = simple_title.strip(ur'_') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# thumbnail image | 
					
						
							|  |  |  | 		if 'thumbnail_url' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'WARNING: unable to extract video thumbnail') | 
					
						
							|  |  |  | 			video_thumbnail = '' | 
					
						
							|  |  |  | 		else:	# don't panic if we can't find it | 
					
						
							|  |  |  | 			video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0]) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-17 20:55:30 +02:00
										 |  |  | 		# upload date | 
					
						
							|  |  |  | 		upload_date = u'NA' | 
					
						
							| 
									
										
										
										
											2011-03-15 20:12:10 +01:00
										 |  |  | 		mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) | 
					
						
							| 
									
										
										
										
											2010-11-17 20:55:30 +02:00
										 |  |  | 		if mobj is not None: | 
					
						
							| 
									
										
										
										
											2010-11-30 18:51:00 +02:00
										 |  |  | 			upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) | 
					
						
							| 
									
										
										
										
											2011-02-25 19:05:35 +01:00
										 |  |  | 			format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] | 
					
						
							| 
									
										
										
										
											2010-11-30 18:51:00 +02:00
										 |  |  | 			for expression in format_expressions: | 
					
						
							|  |  |  | 				try: | 
					
						
							|  |  |  | 					upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') | 
					
						
							|  |  |  | 				except: | 
					
						
							|  |  |  | 					pass | 
					
						
							| 
									
										
										
										
											2010-11-17 20:55:30 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		# description | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			lxml.etree | 
					
						
							|  |  |  | 		except NameError: | 
					
						
							|  |  |  | 			video_description = u'No description available.' | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 			if self._downloader.params.get('forcedescription', False) or self._downloader.params.get('writedescription', False): | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | 				mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', video_webpage) | 
					
						
							|  |  |  | 				if mobj is not None: | 
					
						
							|  |  |  | 					video_description = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			html_parser = lxml.etree.HTMLParser(encoding='utf-8') | 
					
						
							|  |  |  | 			vwebpage_doc = lxml.etree.parse(StringIO.StringIO(video_webpage), html_parser) | 
					
						
							|  |  |  | 			video_description = u''.join(vwebpage_doc.xpath('id("eow-description")//text()')) | 
					
						
							| 
									
										
										
										
											2011-07-18 19:43:21 +02:00
										 |  |  | 			# TODO use another parser | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-24 10:23:06 +02:00
										 |  |  | 		# token | 
					
						
							|  |  |  | 		video_token = urllib.unquote_plus(video_info['token'][0]) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		# Decide which formats to download | 
					
						
							| 
									
										
										
										
											2010-12-09 19:57:39 +01:00
										 |  |  | 		req_format = self._downloader.params.get('format', None) | 
					
						
							| 
									
										
										
										
											2010-07-24 09:47:01 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-06 11:05:57 +02:00
										 |  |  | 		if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): | 
					
						
							|  |  |  | 			self.report_rtmp_download() | 
					
						
							|  |  |  | 			video_url_list = [(None, video_info['conn'][0])] | 
					
						
							|  |  |  | 		elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: | 
					
						
							| 
									
										
										
										
											2011-08-04 00:04:55 +02:00
										 |  |  | 			url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') | 
					
						
							| 
									
										
										
										
											2011-08-07 00:29:25 +02:00
										 |  |  | 			url_data = [parse_qs(uds) for uds in url_data_strs] | 
					
						
							| 
									
										
										
										
											2011-08-06 11:05:57 +02:00
										 |  |  | 			url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) | 
					
						
							| 
									
										
										
										
											2011-08-07 00:29:25 +02:00
										 |  |  | 			url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data) | 
					
						
							| 
									
										
										
										
											2011-08-24 23:04:10 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 			format_limit = self._downloader.params.get('format_limit', None) | 
					
						
							|  |  |  | 			if format_limit is not None and format_limit in self._available_formats: | 
					
						
							|  |  |  | 				format_list = self._available_formats[self._available_formats.index(format_limit):] | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 			else: | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 				format_list = self._available_formats | 
					
						
							|  |  |  | 			existing_formats = [x for x in format_list if x in url_map] | 
					
						
							|  |  |  | 			if len(existing_formats) == 0: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: no known formats available for video') | 
					
						
							| 
									
										
										
										
											2009-05-24 11:09:30 +02:00
										 |  |  | 				return | 
					
						
							| 
									
										
										
										
											2010-12-09 19:57:39 +01:00
										 |  |  | 			if req_format is None: | 
					
						
							| 
									
										
										
										
											2010-12-09 19:22:32 +01:00
										 |  |  | 				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality | 
					
						
							| 
									
										
										
										
											2010-12-09 19:57:39 +01:00
										 |  |  | 			elif req_format == '-1': | 
					
						
							| 
									
										
										
										
											2010-12-09 19:22:32 +01:00
										 |  |  | 				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 			else: | 
					
						
							| 
									
										
										
										
											2010-12-11 11:34:10 +01:00
										 |  |  | 				# Specific format | 
					
						
							|  |  |  | 				if req_format not in url_map: | 
					
						
							|  |  |  | 					self._downloader.trouble(u'ERROR: requested format not available') | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				video_url_list = [(req_format, url_map[req_format])] # Specific format | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2011-08-07 00:02:50 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 		for format_param, video_real_url in video_url_list: | 
					
						
							|  |  |  | 			# At this point we have a new video | 
					
						
							|  |  |  | 			self._downloader.increment_downloads() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# Extension | 
					
						
							|  |  |  | 			video_extension = self._video_extensions.get(format_param, 'flv') | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-05-24 11:09:30 +02:00
										 |  |  | 			try: | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 				# Process video information | 
					
						
							|  |  |  | 				self._downloader.process_info({ | 
					
						
							|  |  |  | 					'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 					'url':		video_real_url.decode('utf-8'), | 
					
						
							|  |  |  | 					'uploader':	video_uploader.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 					'upload_date':	upload_date, | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 					'title':	video_title, | 
					
						
							|  |  |  | 					'stitle':	simple_title, | 
					
						
							|  |  |  | 					'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 					'format':	(format_param is None and u'NA' or format_param.decode('utf-8')), | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 					'thumbnail':	video_thumbnail.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2011-07-07 12:12:20 +02:00
										 |  |  | 					'description':	video_description, | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 					'player_url':	player_url, | 
					
						
							| 
									
										
										
										
											2009-04-27 22:30:20 -07:00
										 |  |  | 				}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:29:52 +02:00
										 |  |  | 			except UnavailableVideoError, err: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 				self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2009-04-28 14:39:23 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | class MetacafeIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for metacafe.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' | 
					
						
							| 
									
										
										
										
											2008-09-13 13:23:24 +02:00
										 |  |  | 	_DISCLAIMER = 'http://www.metacafe.com/family_filter/' | 
					
						
							| 
									
										
										
										
											2009-04-25 11:52:33 +02:00
										 |  |  | 	_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 	_youtube_ie = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, youtube_ie, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._youtube_ie = youtube_ie | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(MetacafeIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_disclaimer(self): | 
					
						
							|  |  |  | 		"""Report disclaimer retrieval.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_age_confirmation(self): | 
					
						
							|  |  |  | 		"""Report attempt to confirm age.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[metacafe] Confirming age') | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		# Retrieve disclaimer | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(self._DISCLAIMER) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_disclaimer() | 
					
						
							|  |  |  | 			disclaimer = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Confirm age | 
					
						
							|  |  |  | 		disclaimer_form = { | 
					
						
							| 
									
										
										
										
											2008-09-13 13:23:24 +02:00
										 |  |  | 			'filters': '0', | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 			'submit': "Continue - I'm over 18", | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form)) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_age_confirmation() | 
					
						
							|  |  |  | 			disclaimer = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract id and simplified title from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: invalid URL: %s' % url) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		video_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Check if video comes from YouTube | 
					
						
							|  |  |  | 		mobj2 = re.match(r'^yt-(.*)$', video_id) | 
					
						
							|  |  |  | 		if mobj2 is not None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % mobj2.group(1)) | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		simple_title = mobj.group(2).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							|  |  |  | 		request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Extract URL, uploader and title from webpage | 
					
						
							|  |  |  | 		self.report_extraction(video_id) | 
					
						
							| 
									
										
										
										
											2009-08-02 12:18:52 +02:00
										 |  |  | 		mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) | 
					
						
							| 
									
										
										
										
											2010-08-12 19:15:26 +02:00
										 |  |  | 		if mobj is not None: | 
					
						
							|  |  |  | 			mediaURL = urllib.unquote(mobj.group(1)) | 
					
						
							| 
									
										
										
										
											2010-08-12 19:21:06 +02:00
										 |  |  | 			video_extension = mediaURL[-3:] | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-08-12 19:15:26 +02:00
										 |  |  | 			# Extract gdaKey if available | 
					
						
							|  |  |  | 			mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				video_url = mediaURL | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				gdaKey = mobj.group(1) | 
					
						
							|  |  |  | 				video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) | 
					
						
							| 
									
										
										
										
											2010-08-04 19:05:53 +02:00
										 |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2010-08-12 19:15:26 +02:00
										 |  |  | 			mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			vardict = parse_qs(mobj.group(1)) | 
					
						
							|  |  |  | 			if 'mediaData' not in vardict: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2010-08-12 19:21:06 +02:00
										 |  |  | 			mediaURL = mobj.group(1).replace('\\/', '/') | 
					
						
							|  |  |  | 			video_extension = mediaURL[-3:] | 
					
						
							|  |  |  | 			video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-13 13:23:24 +02:00
										 |  |  | 		mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-24 20:40:34 +01:00
										 |  |  | 		mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to extract uploader nickname') | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-25 11:52:33 +02:00
										 |  |  | 		video_uploader = mobj.group(1) | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-28 14:39:23 -07:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url.decode('utf-8'), | 
					
						
							|  |  |  | 				'uploader':	video_uploader.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2009-04-28 14:39:23 -07:00
										 |  |  | 				'title':	video_title, | 
					
						
							|  |  |  | 				'stitle':	simple_title, | 
					
						
							|  |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 				'format':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				'player_url':	None, | 
					
						
							| 
									
										
										
										
											2009-04-28 14:39:23 -07:00
										 |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2008-07-24 15:53:24 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | class DailymotionIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for Dailymotion""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^_/]+)_([^/]+)' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(DailymotionIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract id and simplified title from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 		video_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		simple_title = mobj.group(2).decode('utf-8') | 
					
						
							|  |  |  | 		video_extension = 'flv' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							|  |  |  | 		request = urllib2.Request(url) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract URL, uploader and title from webpage | 
					
						
							|  |  |  | 		self.report_extraction(video_id) | 
					
						
							|  |  |  | 		mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		mediaURL = urllib.unquote(mobj.group(1)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# if needed add http://www.dailymotion.com/ if relative URL | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_url = mediaURL | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# '<meta\s+name="title"\s+content="Dailymotion\s*[:\-]\s*(.*?)"\s*\/\s*>' | 
					
						
							|  |  |  | 		mobj = re.search(r'(?im)<title>Dailymotion\s*[\-:]\s*(.+?)</title>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-21 18:16:33 +01:00
										 |  |  | 		mobj = re.search(r'(?im)<Attribute name="owner">(.+?)</Attribute>', webpage) | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract uploader nickname') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url.decode('utf-8'), | 
					
						
							|  |  |  | 				'uploader':	video_uploader.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 				'title':	video_title, | 
					
						
							|  |  |  | 				'stitle':	simple_title, | 
					
						
							|  |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							|  |  |  | 				'format':	u'NA', | 
					
						
							|  |  |  | 				'player_url':	None, | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2010-07-02 01:53:47 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | class GoogleIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for video.google.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 	_VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*' | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(GoogleIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract id from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 		video_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_extension = 'mp4' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract URL, uploader, and title from webpage | 
					
						
							|  |  |  | 		self.report_extraction(video_id) | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		mobj = re.search(r"download_url:'([^']+)'", webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			video_extension = 'flv' | 
					
						
							|  |  |  | 			mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		mediaURL = urllib.unquote(mobj.group(1)) | 
					
						
							|  |  |  | 		mediaURL = mediaURL.replace('\\x3d', '\x3d') | 
					
						
							|  |  |  | 		mediaURL = mediaURL.replace('\\x26', '\x26') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_url = mediaURL | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		mobj = re.search(r'<title>(.*)</title>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 		# Extract video description | 
					
						
							|  |  |  | 		mobj = re.search(r'<span id=short-desc-content>([^<]*)</span>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video description') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_description = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		if not video_description: | 
					
						
							|  |  |  | 			video_description = 'No description available.' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract video thumbnail | 
					
						
							|  |  |  | 		if self._downloader.params.get('forcethumbnail', False): | 
					
						
							|  |  |  | 			request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to extract video thumbnail') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			video_thumbnail = mobj.group(1) | 
					
						
							|  |  |  | 		else:	# we need something to pass to process_info | 
					
						
							|  |  |  | 			video_thumbnail = '' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 				'uploader':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				'title':	video_title, | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 				'stitle':	simple_title, | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 				'format':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				'player_url':	None, | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class PhotobucketIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for photobucket.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(PhotobucketIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract id from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 		video_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_extension = 'flv' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							|  |  |  | 		request = urllib2.Request(url) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract URL, uploader, and title from webpage | 
					
						
							|  |  |  | 		self.report_extraction(video_id) | 
					
						
							|  |  |  | 		mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract media URL') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		mediaURL = urllib.unquote(mobj.group(1)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_url = mediaURL | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		video_uploader = mobj.group(2).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				'uploader':	video_uploader, | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				'title':	video_title, | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 				'stitle':	simple_title, | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 				'format':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				'player_url':	None, | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | class YahooIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for video.yahoo.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# _VALID_URL matches all Yahoo! Video URLs | 
					
						
							|  |  |  | 	# _VPAGE_URL matches only the extractable '/watch/' URLs | 
					
						
							|  |  |  | 	_VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' | 
					
						
							|  |  |  | 	_VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YahooIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 	def _real_extract(self, url, new_video=True): | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 		# Extract ID from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 		video_id = mobj.group(2) | 
					
						
							|  |  |  | 		video_extension = 'flv' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Rewrite valid but non-extractable URLs as | 
					
						
							|  |  |  | 		# extractable English language /watch/ URLs | 
					
						
							|  |  |  | 		if re.match(self._VPAGE_URL, url) is None: | 
					
						
							|  |  |  | 			request = urllib2.Request(url) | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: Unable to extract id field') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			yahoo_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) | 
					
						
							|  |  |  | 			if mobj is None: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: Unable to extract vid field') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			yahoo_vid = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 			return self._real_extract(url, new_video=False) | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							|  |  |  | 		request = urllib2.Request(url) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract uploader and title from webpage | 
					
						
							|  |  |  | 		self.report_extraction(video_id) | 
					
						
							|  |  |  | 		mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video uploader') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 		# Extract video thumbnail | 
					
						
							|  |  |  | 		mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video thumbnail') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_thumbnail = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract video description | 
					
						
							|  |  |  | 		mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video description') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_description = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		if not video_description: video_description = 'No description available.' | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 		# Extract video height and width | 
					
						
							|  |  |  | 		mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video height') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		yv_video_height = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video width') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		yv_video_width = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video playlist to extract media URL | 
					
						
							|  |  |  | 		# I'm not completely sure what all these options are, but we | 
					
						
							|  |  |  | 		# seem to need most of them, otherwise the server sends a 401. | 
					
						
							|  |  |  | 		yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents | 
					
						
							|  |  |  | 		yv_bitrate = '700'  # according to Wikipedia this is hard-coded | 
					
						
							|  |  |  | 		request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + | 
					
						
							| 
									
										
										
										
											2011-08-23 15:01:51 +03:00
										 |  |  | 								  '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + | 
					
						
							|  |  |  | 								  '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract media URL from playlist XML | 
					
						
							|  |  |  | 		mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to extract media URL') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_url = urllib.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') | 
					
						
							|  |  |  | 		video_url = re.sub(r'(?u)&(.+?);', htmlentity_transform, video_url) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url, | 
					
						
							|  |  |  | 				'uploader':	video_uploader, | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 				'title':	video_title, | 
					
						
							|  |  |  | 				'stitle':	simple_title, | 
					
						
							|  |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 				'thumbnail':	video_thumbnail.decode('utf-8'), | 
					
						
							|  |  |  | 				'description':	video_description, | 
					
						
							|  |  |  | 				'thumbnail':	video_thumbnail, | 
					
						
							|  |  |  | 				'description':	video_description, | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				'player_url':	None, | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2010-04-01 20:55:43 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | class VimeoIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for vimeo.com.""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# _VALID_URL matches Vimeo URLs | 
					
						
							| 
									
										
										
										
											2011-04-20 21:20:55 -03:00
										 |  |  | 	_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)' | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(VimeoIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2011-04-20 21:07:57 -03:00
										 |  |  | 		self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2011-04-20 21:07:57 -03:00
										 |  |  | 		self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url, new_video=True): | 
					
						
							|  |  |  | 		# Extract ID from URL | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# At this point we have a new video | 
					
						
							|  |  |  | 		self._downloader.increment_downloads() | 
					
						
							|  |  |  | 		video_id = mobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve video webpage to extract further information | 
					
						
							|  |  |  | 		request = urllib2.Request("http://vimeo.com/moogaloop/load/clip:%s" % video_id, None, std_headers) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-04 04:02:29 -02:00
										 |  |  | 		# Now we begin extracting as much information as we can from what we | 
					
						
							|  |  |  | 		# retrieved. First we extract the information common to all extractors, | 
					
						
							|  |  |  | 		# and latter we extract those that are Vimeo specific. | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		self.report_extraction(video_id) | 
					
						
							| 
									
										
										
										
											2011-02-04 04:02:29 -02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Extract title | 
					
						
							| 
									
										
										
										
											2011-01-29 04:13:54 -02:00
										 |  |  | 		mobj = re.search(r'<caption>(.*?)</caption>', webpage) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-04 04:02:29 -02:00
										 |  |  | 		# Extract uploader | 
					
						
							| 
									
										
										
										
											2011-01-29 04:13:54 -02:00
										 |  |  | 		mobj = re.search(r'<uploader_url>http://vimeo.com/(.*?)</uploader_url>', webpage) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video uploader') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract video thumbnail | 
					
						
							| 
									
										
										
										
											2011-01-29 04:13:54 -02:00
										 |  |  | 		mobj = re.search(r'<thumbnail>(.*?)</thumbnail>', webpage) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video thumbnail') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_thumbnail = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# # Extract video description | 
					
						
							|  |  |  | 		# mobj = re.search(r'<meta property="og:description" content="(.*)" />', webpage) | 
					
						
							|  |  |  | 		# if mobj is None: | 
					
						
							|  |  |  | 		# 	self._downloader.trouble(u'ERROR: unable to extract video description') | 
					
						
							|  |  |  | 		# 	return | 
					
						
							|  |  |  | 		# video_description = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		# if not video_description: video_description = 'No description available.' | 
					
						
							|  |  |  | 		video_description = 'Foo.' | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-04 04:02:29 -02:00
										 |  |  | 		# Vimeo specific: extract request signature | 
					
						
							| 
									
										
										
										
											2011-01-29 04:13:54 -02:00
										 |  |  | 		mobj = re.search(r'<request_signature>(.*?)</request_signature>', webpage) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract request signature') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		sig = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-04 04:02:29 -02:00
										 |  |  | 		# Vimeo specific: Extract request signature expiration | 
					
						
							| 
									
										
										
										
											2011-01-29 04:13:54 -02:00
										 |  |  | 		mobj = re.search(r'<request_signature_expires>(.*?)</request_signature_expires>', webpage) | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract request signature expiration') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		sig_exp = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_url = "http://vimeo.com/moogaloop/play/clip:%s/%s/%s" % (video_id, sig, sig_exp) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url, | 
					
						
							|  |  |  | 				'uploader':	video_uploader, | 
					
						
							|  |  |  | 				'upload_date':	u'NA', | 
					
						
							|  |  |  | 				'title':	video_title, | 
					
						
							|  |  |  | 				'stitle':	simple_title, | 
					
						
							| 
									
										
										
										
											2011-04-20 21:29:29 -03:00
										 |  |  | 				'ext':		u'mp4', | 
					
						
							| 
									
										
										
										
											2010-11-25 04:24:45 -02:00
										 |  |  | 				'thumbnail':	video_thumbnail.decode('utf-8'), | 
					
						
							|  |  |  | 				'description':	video_description, | 
					
						
							|  |  |  | 				'thumbnail':	video_thumbnail, | 
					
						
							|  |  |  | 				'description':	video_description, | 
					
						
							|  |  |  | 				'player_url':	None, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		except UnavailableVideoError: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to download video') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | class GenericIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Generic last-resort information extractor.""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return True | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, video_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') | 
					
						
							|  |  |  | 		self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 		# At this point we have a new video | 
					
						
							| 
									
										
										
										
											2010-07-22 20:27:35 +02:00
										 |  |  | 		self._downloader.increment_downloads() | 
					
						
							| 
									
										
										
										
											2010-07-13 19:37:07 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		video_id = url.split('/')[-1] | 
					
						
							|  |  |  | 		request = urllib2.Request(url) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(video_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		except ValueError, err: | 
					
						
							|  |  |  | 			# since this is the last-resort InfoExtractor, if | 
					
						
							|  |  |  | 			# this error is thrown, it'll be thrown here | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-05 20:48:22 +02:00
										 |  |  | 		self.report_extraction(video_id) | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		# Start with something easy: JW Player in SWFObject | 
					
						
							|  |  |  | 		mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			# Broaden the search a little bit | 
					
						
							|  |  |  | 			mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# It's possible that one of the regexes | 
					
						
							|  |  |  | 		# matched, but returned an empty group: | 
					
						
							|  |  |  | 		if mobj.group(1) is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_url = urllib.unquote(mobj.group(1)) | 
					
						
							|  |  |  | 		video_id  = os.path.basename(video_url) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# here's a fun little line of code for you: | 
					
						
							|  |  |  | 		video_extension = os.path.splitext(video_id)[1][1:] | 
					
						
							|  |  |  | 		video_id        = os.path.splitext(video_id)[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# it's tempting to parse this further, but you would | 
					
						
							|  |  |  | 		# have to take into account all the variations like | 
					
						
							|  |  |  | 		#   Video Title - Site Name | 
					
						
							|  |  |  | 		#   Site Name | Video Title | 
					
						
							|  |  |  | 		#   Video Title - Tagline | Site Name | 
					
						
							|  |  |  | 		# and so on and so forth; it's just not practical | 
					
						
							|  |  |  | 		mobj = re.search(r'<title>(.*)</title>', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# video uploader is domain name | 
					
						
							|  |  |  | 		mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process video information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		video_url.decode('utf-8'), | 
					
						
							|  |  |  | 				'uploader':	video_uploader, | 
					
						
							| 
									
										
										
										
											2010-11-19 19:31:26 +01:00
										 |  |  | 				'upload_date':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 				'title':	video_title, | 
					
						
							| 
									
										
										
										
											2010-02-21 00:13:34 +01:00
										 |  |  | 				'stitle':	simple_title, | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 				'ext':		video_extension.decode('utf-8'), | 
					
						
							| 
									
										
										
										
											2010-03-19 18:15:43 +01:00
										 |  |  | 				'format':	u'NA', | 
					
						
							| 
									
										
										
										
											2010-05-30 19:49:51 +02:00
										 |  |  | 				'player_url':	None, | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2010-07-22 20:26:37 +02:00
										 |  |  | 		except UnavailableVideoError, err: | 
					
						
							| 
									
										
										
										
											2011-01-03 11:22:49 +01:00
										 |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							| 
									
										
										
										
											2010-01-15 16:26:41 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | class YoutubeSearchIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for YouTube search queries.""" | 
					
						
							|  |  |  | 	_VALID_QUERY = r'ytsearch(\d+|all)?:[\s\S]+' | 
					
						
							|  |  |  | 	_TEMPLATE_URL = 'http://www.youtube.com/results?search_query=%s&page=%s&gl=US&hl=en' | 
					
						
							|  |  |  | 	_VIDEO_INDICATOR = r'href="/watch\?v=.+?"' | 
					
						
							| 
									
										
										
										
											2009-08-15 00:33:50 +02:00
										 |  |  | 	_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>' | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 	_youtube_ie = None | 
					
						
							| 
									
										
										
										
											2009-04-06 17:39:16 -07:00
										 |  |  | 	_max_youtube_results = 1000 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-04-02 20:23:13 +02:00
										 |  |  | 	def __init__(self, youtube_ie, downloader=None): | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._youtube_ie = youtube_ie | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YoutubeSearchIE._VALID_QUERY, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_page(self, query, pagenum): | 
					
						
							|  |  |  | 		"""Report attempt to download playlist page with given number.""" | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		query = query.decode(preferredencoding()) | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		self._youtube_ie.initialize() | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 	def _real_extract(self, query): | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_QUERY, query) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		prefix, query = query.split(':') | 
					
						
							|  |  |  | 		prefix = prefix[8:] | 
					
						
							| 
									
										
										
										
											2010-02-12 21:01:55 +01:00
										 |  |  | 		query  = query.encode('utf-8') | 
					
						
							| 
									
										
										
										
											2009-04-02 20:23:13 +02:00
										 |  |  | 		if prefix == '': | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._download_n_results(query, 1) | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-02 20:23:13 +02:00
										 |  |  | 		elif prefix == 'all': | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._download_n_results(query, self._max_youtube_results) | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2009-04-02 20:23:13 +02:00
										 |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 			try: | 
					
						
							| 
									
										
										
										
											2009-05-27 23:03:56 +02:00
										 |  |  | 				n = long(prefix) | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 				if n <= 0: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 					return | 
					
						
							| 
									
										
										
										
											2009-04-07 08:21:27 -07:00
										 |  |  | 				elif n > self._max_youtube_results: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 					self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)'  % (self._max_youtube_results, n)) | 
					
						
							| 
									
										
										
										
											2009-04-07 08:21:27 -07:00
										 |  |  | 					n = self._max_youtube_results | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				self._download_n_results(query, n) | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-05-27 23:03:56 +02:00
										 |  |  | 			except ValueError: # parsing prefix as integer fails | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				self._download_n_results(query, 1) | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _download_n_results(self, query, n): | 
					
						
							|  |  |  | 		"""Downloads a specified number of results for a query""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_ids = [] | 
					
						
							|  |  |  | 		already_seen = set() | 
					
						
							|  |  |  | 		pagenum = 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		while True: | 
					
						
							|  |  |  | 			self.report_download_page(query, pagenum) | 
					
						
							| 
									
										
										
										
											2009-02-02 20:29:44 +01:00
										 |  |  | 			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			request = urllib2.Request(result_url) | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				page = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			# Extract video identifiers | 
					
						
							|  |  |  | 			for mobj in re.finditer(self._VIDEO_INDICATOR, page): | 
					
						
							|  |  |  | 				video_id = page[mobj.span()[0]:mobj.span()[1]].split('=')[2][:-1] | 
					
						
							|  |  |  | 				if video_id not in already_seen: | 
					
						
							|  |  |  | 					video_ids.append(video_id) | 
					
						
							|  |  |  | 					already_seen.add(video_id) | 
					
						
							|  |  |  | 					if len(video_ids) == n: | 
					
						
							|  |  |  | 						# Specified n videos reached | 
					
						
							|  |  |  | 						for id in video_ids: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 							self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) | 
					
						
							|  |  |  | 						return | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-08-15 00:33:50 +02:00
										 |  |  | 			if re.search(self._MORE_PAGES_INDICATOR, page) is None: | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 				for id in video_ids: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 					self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-02-02 19:59:48 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			pagenum = pagenum + 1 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | class GoogleSearchIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for Google Video search queries.""" | 
					
						
							|  |  |  | 	_VALID_QUERY = r'gvsearch(\d+|all)?:[\s\S]+' | 
					
						
							|  |  |  | 	_TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' | 
					
						
							|  |  |  | 	_VIDEO_INDICATOR = r'videoplay\?docid=([^\&>]+)\&' | 
					
						
							|  |  |  | 	_MORE_PAGES_INDICATOR = r'<span>Next</span>' | 
					
						
							|  |  |  | 	_google_ie = None | 
					
						
							|  |  |  | 	_max_google_results = 1000 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, google_ie, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._google_ie = google_ie | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(GoogleSearchIE._VALID_QUERY, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_page(self, query, pagenum): | 
					
						
							|  |  |  | 		"""Report attempt to download playlist page with given number.""" | 
					
						
							|  |  |  | 		query = query.decode(preferredencoding()) | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum)) | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		self._google_ie.initialize() | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 	def _real_extract(self, query): | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_QUERY, query) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		prefix, query = query.split(':') | 
					
						
							|  |  |  | 		prefix = prefix[8:] | 
					
						
							|  |  |  | 		query  = query.encode('utf-8') | 
					
						
							|  |  |  | 		if prefix == '': | 
					
						
							|  |  |  | 			self._download_n_results(query, 1) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		elif prefix == 'all': | 
					
						
							|  |  |  | 			self._download_n_results(query, self._max_google_results) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				n = long(prefix) | 
					
						
							|  |  |  | 				if n <= 0: | 
					
						
							|  |  |  | 					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				elif n > self._max_google_results: | 
					
						
							|  |  |  | 					self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)'  % (self._max_google_results, n)) | 
					
						
							|  |  |  | 					n = self._max_google_results | 
					
						
							|  |  |  | 				self._download_n_results(query, n) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			except ValueError: # parsing prefix as integer fails | 
					
						
							|  |  |  | 				self._download_n_results(query, 1) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _download_n_results(self, query, n): | 
					
						
							|  |  |  | 		"""Downloads a specified number of results for a query""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_ids = [] | 
					
						
							|  |  |  | 		already_seen = set() | 
					
						
							|  |  |  | 		pagenum = 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		while True: | 
					
						
							|  |  |  | 			self.report_download_page(query, pagenum) | 
					
						
							|  |  |  | 			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			request = urllib2.Request(result_url) | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				page = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# Extract video identifiers | 
					
						
							|  |  |  | 			for mobj in re.finditer(self._VIDEO_INDICATOR, page): | 
					
						
							|  |  |  | 				video_id = mobj.group(1) | 
					
						
							|  |  |  | 				if video_id not in already_seen: | 
					
						
							|  |  |  | 					video_ids.append(video_id) | 
					
						
							|  |  |  | 					already_seen.add(video_id) | 
					
						
							|  |  |  | 					if len(video_ids) == n: | 
					
						
							|  |  |  | 						# Specified n videos reached | 
					
						
							|  |  |  | 						for id in video_ids: | 
					
						
							|  |  |  | 							self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) | 
					
						
							|  |  |  | 						return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if re.search(self._MORE_PAGES_INDICATOR, page) is None: | 
					
						
							|  |  |  | 				for id in video_ids: | 
					
						
							|  |  |  | 					self._google_ie.extract('http://video.google.com/videoplay?docid=%s' % id) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			pagenum = pagenum + 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class YahooSearchIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for Yahoo! Video search queries.""" | 
					
						
							|  |  |  | 	_VALID_QUERY = r'yvsearch(\d+|all)?:[\s\S]+' | 
					
						
							|  |  |  | 	_TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' | 
					
						
							|  |  |  | 	_VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' | 
					
						
							|  |  |  | 	_MORE_PAGES_INDICATOR = r'\s*Next' | 
					
						
							|  |  |  | 	_yahoo_ie = None | 
					
						
							|  |  |  | 	_max_yahoo_results = 1000 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, yahoo_ie, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._yahoo_ie = yahoo_ie | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YahooSearchIE._VALID_QUERY, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_page(self, query, pagenum): | 
					
						
							|  |  |  | 		"""Report attempt to download playlist page with given number.""" | 
					
						
							|  |  |  | 		query = query.decode(preferredencoding()) | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		self._yahoo_ie.initialize() | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 	def _real_extract(self, query): | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_QUERY, query) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		prefix, query = query.split(':') | 
					
						
							|  |  |  | 		prefix = prefix[8:] | 
					
						
							|  |  |  | 		query  = query.encode('utf-8') | 
					
						
							|  |  |  | 		if prefix == '': | 
					
						
							|  |  |  | 			self._download_n_results(query, 1) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		elif prefix == 'all': | 
					
						
							|  |  |  | 			self._download_n_results(query, self._max_yahoo_results) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				n = long(prefix) | 
					
						
							|  |  |  | 				if n <= 0: | 
					
						
							|  |  |  | 					self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				elif n > self._max_yahoo_results: | 
					
						
							|  |  |  | 					self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)'  % (self._max_yahoo_results, n)) | 
					
						
							|  |  |  | 					n = self._max_yahoo_results | 
					
						
							|  |  |  | 				self._download_n_results(query, n) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			except ValueError: # parsing prefix as integer fails | 
					
						
							|  |  |  | 				self._download_n_results(query, 1) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _download_n_results(self, query, n): | 
					
						
							|  |  |  | 		"""Downloads a specified number of results for a query""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		video_ids = [] | 
					
						
							|  |  |  | 		already_seen = set() | 
					
						
							|  |  |  | 		pagenum = 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		while True: | 
					
						
							|  |  |  | 			self.report_download_page(query, pagenum) | 
					
						
							|  |  |  | 			result_url = self._TEMPLATE_URL % (urllib.quote_plus(query), pagenum) | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 			request = urllib2.Request(result_url) | 
					
						
							| 
									
										
										
										
											2010-04-04 17:57:59 +02:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				page = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# Extract video identifiers | 
					
						
							|  |  |  | 			for mobj in re.finditer(self._VIDEO_INDICATOR, page): | 
					
						
							|  |  |  | 				video_id = mobj.group(1) | 
					
						
							|  |  |  | 				if video_id not in already_seen: | 
					
						
							|  |  |  | 					video_ids.append(video_id) | 
					
						
							|  |  |  | 					already_seen.add(video_id) | 
					
						
							|  |  |  | 					if len(video_ids) == n: | 
					
						
							|  |  |  | 						# Specified n videos reached | 
					
						
							|  |  |  | 						for id in video_ids: | 
					
						
							|  |  |  | 							self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) | 
					
						
							|  |  |  | 						return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if re.search(self._MORE_PAGES_INDICATOR, page) is None: | 
					
						
							|  |  |  | 				for id in video_ids: | 
					
						
							|  |  |  | 					self._yahoo_ie.extract('http://video.yahoo.com/watch/%s' % id) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			pagenum = pagenum + 1 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | class YoutubePlaylistIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for YouTube playlists.""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-12 20:19:20 +01:00
										 |  |  | 	_VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/|user/.*?#[pg]/c/)([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' | 
					
						
							| 
									
										
										
										
											2011-01-31 18:54:47 +08:00
										 |  |  | 	_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 	_VIDEO_INDICATOR = r'/watch\?v=(.+?)&' | 
					
						
							| 
									
										
										
										
											2010-04-02 19:51:54 +02:00
										 |  |  | 	_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>' | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 	_youtube_ie = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, youtube_ie, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._youtube_ie = youtube_ie | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YoutubePlaylistIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_page(self, playlist_id, pagenum): | 
					
						
							|  |  |  | 		"""Report attempt to download playlist page with given number.""" | 
					
						
							| 
									
										
										
										
											2010-10-23 13:19:26 +02:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		self._youtube_ie.initialize() | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract playlist id | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: invalid url: %s' % url) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-12 20:19:20 +01:00
										 |  |  | 		# Single video case | 
					
						
							|  |  |  | 		if mobj.group(3) is not None: | 
					
						
							|  |  |  | 			self._youtube_ie.extract(mobj.group(3)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 		# Download playlist pages | 
					
						
							| 
									
										
										
										
											2011-01-31 18:54:47 +08:00
										 |  |  | 		# prefix is 'p' as default for playlists but there are other types that need extra care | 
					
						
							|  |  |  | 		playlist_prefix = mobj.group(1) | 
					
						
							|  |  |  | 		if playlist_prefix == 'a': | 
					
						
							|  |  |  | 			playlist_access = 'artist' | 
					
						
							|  |  |  | 		else: | 
					
						
							| 
									
										
										
										
											2011-02-13 19:02:56 +08:00
										 |  |  | 			playlist_prefix = 'p' | 
					
						
							| 
									
										
										
										
											2011-01-31 18:54:47 +08:00
										 |  |  | 			playlist_access = 'view_play_list' | 
					
						
							|  |  |  | 		playlist_id = mobj.group(2) | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 		video_ids = [] | 
					
						
							|  |  |  | 		pagenum = 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		while True: | 
					
						
							|  |  |  | 			self.report_download_page(playlist_id, pagenum) | 
					
						
							| 
									
										
										
										
											2011-01-31 18:54:47 +08:00
										 |  |  | 			request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)) | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				page = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:01:28 +02:00
										 |  |  | 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 				return | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			# Extract video identifiers | 
					
						
							| 
									
										
										
										
											2008-11-01 15:52:51 +01:00
										 |  |  | 			ids_in_page = [] | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 			for mobj in re.finditer(self._VIDEO_INDICATOR, page): | 
					
						
							| 
									
										
										
										
											2008-11-01 15:52:51 +01:00
										 |  |  | 				if mobj.group(1) not in ids_in_page: | 
					
						
							|  |  |  | 					ids_in_page.append(mobj.group(1)) | 
					
						
							|  |  |  | 			video_ids.extend(ids_in_page) | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-02 19:51:54 +02:00
										 |  |  | 			if re.search(self._MORE_PAGES_INDICATOR, page) is None: | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 				break | 
					
						
							|  |  |  | 			pagenum = pagenum + 1 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		playliststart = self._downloader.params.get('playliststart', 1) - 1 | 
					
						
							|  |  |  | 		playlistend = self._downloader.params.get('playlistend', -1) | 
					
						
							|  |  |  | 		video_ids = video_ids[playliststart:playlistend] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 		for id in video_ids: | 
					
						
							| 
									
										
										
										
											2009-04-23 22:20:06 +02:00
										 |  |  | 			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2008-07-25 12:55:01 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | class YoutubeUserIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for YouTube users.""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 	_VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)' | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 	_TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 	_GDATA_PAGE_SIZE = 50 | 
					
						
							|  |  |  | 	_GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' | 
					
						
							|  |  |  | 	_VIDEO_INDICATOR = r'/watch\?v=(.+?)&' | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 	_youtube_ie = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, youtube_ie, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 		self._youtube_ie = youtube_ie | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(YoutubeUserIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 	def report_download_page(self, username, start_index): | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 		"""Report attempt to download user page.""" | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 		self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % | 
					
						
							|  |  |  | 				           (username, start_index, start_index + self._GDATA_PAGE_SIZE)) | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		self._youtube_ie.initialize() | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# Extract username | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid url: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		username = mobj.group(1) | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		# Download video ids using YouTube Data API. Result size per | 
					
						
							|  |  |  | 		# query is limited (currently to 50 videos) so we need to query | 
					
						
							|  |  |  | 		# page by page until there are no video ids - it means we got | 
					
						
							|  |  |  | 		# all of them. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 		video_ids = [] | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 		pagenum = 0 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 		while True: | 
					
						
							|  |  |  | 			start_index = pagenum * self._GDATA_PAGE_SIZE + 1 | 
					
						
							|  |  |  | 			self.report_download_page(username, start_index) | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 			request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 			try: | 
					
						
							|  |  |  | 				page = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 			# Extract video identifiers | 
					
						
							|  |  |  | 			ids_in_page = [] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			for mobj in re.finditer(self._VIDEO_INDICATOR, page): | 
					
						
							|  |  |  | 				if mobj.group(1) not in ids_in_page: | 
					
						
							|  |  |  | 					ids_in_page.append(mobj.group(1)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			video_ids.extend(ids_in_page) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# A little optimization - if current page is not | 
					
						
							|  |  |  | 			# "full", ie. does not contain PAGE_SIZE video ids then | 
					
						
							|  |  |  | 			# we can assume that this page is the last one - there | 
					
						
							|  |  |  | 			# are no more ids on further pages - no need to query | 
					
						
							|  |  |  | 			# again. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if len(ids_in_page) < self._GDATA_PAGE_SIZE: | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			pagenum += 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		all_ids_count = len(video_ids) | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		playliststart = self._downloader.params.get('playliststart', 1) - 1 | 
					
						
							|  |  |  | 		playlistend = self._downloader.params.get('playlistend', -1) | 
					
						
							| 
									
										
										
										
											2010-08-04 18:52:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 		if playlistend == -1: | 
					
						
							|  |  |  | 			video_ids = video_ids[playliststart:] | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			video_ids = video_ids[playliststart:playlistend] | 
					
						
							| 
									
										
										
										
											2011-08-23 15:01:51 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 		self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" % | 
					
						
							| 
									
										
										
										
											2011-08-23 15:01:51 +03:00
										 |  |  | 								  (username, all_ids_count, len(video_ids))) | 
					
						
							| 
									
										
										
										
											2011-01-29 11:55:20 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		for video_id in video_ids: | 
					
						
							|  |  |  | 			self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2009-11-25 16:34:34 -05:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-12-05 21:09:14 +02:00
										 |  |  | class DepositFilesIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for depositfiles.com""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(DepositFilesIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_download_webpage(self, file_id): | 
					
						
							|  |  |  | 		"""Report webpage download.""" | 
					
						
							|  |  |  | 		self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, file_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							|  |  |  | 		self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		# At this point we have a new file | 
					
						
							|  |  |  | 		self._downloader.increment_downloads() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		file_id = url.split('/')[-1] | 
					
						
							|  |  |  | 		# Rebuild url in english locale | 
					
						
							|  |  |  | 		url = 'http://depositfiles.com/en/files/' + file_id | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Retrieve file webpage with 'Free download' button pressed | 
					
						
							|  |  |  | 		free_download_indication = { 'gateway_result' : '1' } | 
					
						
							| 
									
										
										
										
											2011-01-12 20:20:37 +01:00
										 |  |  | 		request = urllib2.Request(url, urllib.urlencode(free_download_indication)) | 
					
						
							| 
									
										
										
										
											2010-12-05 21:09:14 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_download_webpage(file_id) | 
					
						
							|  |  |  | 			webpage = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Search for the real file URL | 
					
						
							|  |  |  | 		mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) | 
					
						
							|  |  |  | 		if (mobj is None) or (mobj.group(1) is None): | 
					
						
							|  |  |  | 			# Try to figure out reason of the error. | 
					
						
							|  |  |  | 			mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) | 
					
						
							|  |  |  | 			if (mobj is not None) and (mobj.group(1) is not None): | 
					
						
							|  |  |  | 				restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: %s' % restriction_message) | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		file_url = mobj.group(1) | 
					
						
							|  |  |  | 		file_extension = os.path.splitext(file_url)[1][1:] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Search for file title | 
					
						
							|  |  |  | 		mobj = re.search(r'<b title="(.*?)">', webpage) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		file_title = mobj.group(1).decode('utf-8') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			# Process file information | 
					
						
							|  |  |  | 			self._downloader.process_info({ | 
					
						
							|  |  |  | 				'id':		file_id.decode('utf-8'), | 
					
						
							|  |  |  | 				'url':		file_url.decode('utf-8'), | 
					
						
							|  |  |  | 				'uploader':	u'NA', | 
					
						
							|  |  |  | 				'upload_date':	u'NA', | 
					
						
							|  |  |  | 				'title':	file_title, | 
					
						
							|  |  |  | 				'stitle':	file_title, | 
					
						
							|  |  |  | 				'ext':		file_extension.decode('utf-8'), | 
					
						
							|  |  |  | 				'format':	u'NA', | 
					
						
							|  |  |  | 				'player_url':	None, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		except UnavailableVideoError, err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to download file') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-13 21:26:58 +08:00
										 |  |  | class FacebookIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information Extractor for Facebook""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook.com/video/video.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' | 
					
						
							|  |  |  | 	_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' | 
					
						
							|  |  |  | 	_NETRC_MACHINE = 'facebook' | 
					
						
							|  |  |  | 	_available_formats = ['highqual', 'lowqual'] | 
					
						
							|  |  |  | 	_video_extensions = { | 
					
						
							|  |  |  | 		'highqual': 'mp4', | 
					
						
							|  |  |  | 		'lowqual': 'mp4', | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		InfoExtractor.__init__(self, downloader) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(FacebookIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _reporter(self, message): | 
					
						
							|  |  |  | 		"""Add header and report message.""" | 
					
						
							|  |  |  | 		self._downloader.to_screen(u'[facebook] %s' % message) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_login(self): | 
					
						
							|  |  |  | 		"""Report attempt to log in.""" | 
					
						
							|  |  |  | 		self._reporter(u'Logging in') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_video_webpage_download(self, video_id): | 
					
						
							|  |  |  | 		"""Report attempt to download video webpage.""" | 
					
						
							|  |  |  | 		self._reporter(u'%s: Downloading video webpage' % video_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_information_extraction(self, video_id): | 
					
						
							|  |  |  | 		"""Report attempt to extract video information.""" | 
					
						
							|  |  |  | 		self._reporter(u'%s: Extracting video information' % video_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _parse_page(self, video_webpage): | 
					
						
							|  |  |  | 		"""Extract video information from page""" | 
					
						
							|  |  |  | 		# General data | 
					
						
							|  |  |  | 		data = {'title': r'class="video_title datawrap">(.*?)</', | 
					
						
							|  |  |  | 			'description': r'<div class="datawrap">(.*?)</div>', | 
					
						
							|  |  |  | 			'owner': r'\("video_owner_name", "(.*?)"\)', | 
					
						
							|  |  |  | 			'upload_date': r'data-date="(.*?)"', | 
					
						
							|  |  |  | 			'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)', | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		video_info = {} | 
					
						
							|  |  |  | 		for piece in data.keys(): | 
					
						
							|  |  |  | 			mobj = re.search(data[piece], video_webpage) | 
					
						
							|  |  |  | 			if mobj is not None: | 
					
						
							|  |  |  | 				video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Video urls | 
					
						
							|  |  |  | 		video_urls = {} | 
					
						
							|  |  |  | 		for fmt in self._available_formats: | 
					
						
							|  |  |  | 			mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) | 
					
						
							|  |  |  | 			if mobj is not None: | 
					
						
							|  |  |  | 				# URL is in a Javascript segment inside an escaped Unicode format within | 
					
						
							|  |  |  | 				# the generally utf-8 page | 
					
						
							|  |  |  | 				video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) | 
					
						
							|  |  |  | 		video_info['video_urls'] = video_urls | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		return video_info | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_initialize(self): | 
					
						
							|  |  |  | 		if self._downloader is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		useremail = None | 
					
						
							|  |  |  | 		password = None | 
					
						
							|  |  |  | 		downloader_params = self._downloader.params | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Attempt to use provided username and password or .netrc data | 
					
						
							|  |  |  | 		if downloader_params.get('username', None) is not None: | 
					
						
							|  |  |  | 			useremail = downloader_params['username'] | 
					
						
							|  |  |  | 			password = downloader_params['password'] | 
					
						
							|  |  |  | 		elif downloader_params.get('usenetrc', False): | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				info = netrc.netrc().authenticators(self._NETRC_MACHINE) | 
					
						
							|  |  |  | 				if info is not None: | 
					
						
							|  |  |  | 					useremail = info[0] | 
					
						
							|  |  |  | 					password = info[2] | 
					
						
							|  |  |  | 				else: | 
					
						
							|  |  |  | 					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) | 
					
						
							|  |  |  | 			except (IOError, netrc.NetrcParseError), err: | 
					
						
							|  |  |  | 				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if useremail is None: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Log in | 
					
						
							|  |  |  | 		login_form = { | 
					
						
							|  |  |  | 			'email': useremail, | 
					
						
							|  |  |  | 			'pass': password, | 
					
						
							|  |  |  | 			'login': 'Log+In' | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self.report_login() | 
					
						
							|  |  |  | 			login_results = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 			if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: | 
					
						
							|  |  |  | 				self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_id = mobj.group('ID') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Get video webpage | 
					
						
							|  |  |  | 		self.report_video_webpage_download(video_id) | 
					
						
							|  |  |  | 		request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			page = urllib2.urlopen(request) | 
					
						
							|  |  |  | 			video_webpage = page.read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Start extracting information | 
					
						
							|  |  |  | 		self.report_information_extraction(video_id) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# Extract information | 
					
						
							|  |  |  | 		video_info = self._parse_page(video_webpage) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# uploader | 
					
						
							|  |  |  | 		if 'owner' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract uploader nickname') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_uploader = video_info['owner'] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# title | 
					
						
							|  |  |  | 		if 'title' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to extract video title') | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		video_title = video_info['title'] | 
					
						
							|  |  |  | 		video_title = video_title.decode('utf-8') | 
					
						
							|  |  |  | 		video_title = sanitize_title(video_title) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# simplified title | 
					
						
							|  |  |  | 		simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) | 
					
						
							|  |  |  | 		simple_title = simple_title.strip(ur'_') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# thumbnail image | 
					
						
							|  |  |  | 		if 'thumbnail' not in video_info: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'WARNING: unable to extract video thumbnail') | 
					
						
							|  |  |  | 			video_thumbnail = '' | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			video_thumbnail = video_info['thumbnail'] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# upload date | 
					
						
							|  |  |  | 		upload_date = u'NA' | 
					
						
							|  |  |  | 		if 'upload_date' in video_info: | 
					
						
							|  |  |  | 			upload_time = video_info['upload_date'] | 
					
						
							|  |  |  | 			timetuple = email.utils.parsedate_tz(upload_time) | 
					
						
							|  |  |  | 			if timetuple is not None: | 
					
						
							|  |  |  | 				try: | 
					
						
							|  |  |  | 					upload_date = time.strftime('%Y%m%d', timetuple[0:9]) | 
					
						
							|  |  |  | 				except: | 
					
						
							|  |  |  | 					pass | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		# description | 
					
						
							| 
									
										
										
										
											2011-07-07 12:47:36 +02:00
										 |  |  | 		video_description = video_info.get('description', 'No description available.') | 
					
						
							| 
									
										
										
										
											2011-02-13 21:26:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		url_map = video_info['video_urls'] | 
					
						
							|  |  |  | 		if len(url_map.keys()) > 0: | 
					
						
							|  |  |  | 			# Decide which formats to download | 
					
						
							|  |  |  | 			req_format = self._downloader.params.get('format', None) | 
					
						
							|  |  |  | 			format_limit = self._downloader.params.get('format_limit', None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if format_limit is not None and format_limit in self._available_formats: | 
					
						
							|  |  |  | 				format_list = self._available_formats[self._available_formats.index(format_limit):] | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				format_list = self._available_formats | 
					
						
							|  |  |  | 			existing_formats = [x for x in format_list if x in url_map] | 
					
						
							|  |  |  | 			if len(existing_formats) == 0: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'ERROR: no known formats available for video') | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			if req_format is None: | 
					
						
							|  |  |  | 				video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality | 
					
						
							|  |  |  | 			elif req_format == '-1': | 
					
						
							|  |  |  | 				video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				# Specific format | 
					
						
							|  |  |  | 				if req_format not in url_map: | 
					
						
							|  |  |  | 					self._downloader.trouble(u'ERROR: requested format not available') | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				video_url_list = [(req_format, url_map[req_format])] # Specific format | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for format_param, video_real_url in video_url_list: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# At this point we have a new video | 
					
						
							|  |  |  | 			self._downloader.increment_downloads() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			# Extension | 
					
						
							|  |  |  | 			video_extension = self._video_extensions.get(format_param, 'mp4') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			try: | 
					
						
							|  |  |  | 				# Process video information | 
					
						
							|  |  |  | 				self._downloader.process_info({ | 
					
						
							|  |  |  | 					'id':		video_id.decode('utf-8'), | 
					
						
							|  |  |  | 					'url':		video_real_url.decode('utf-8'), | 
					
						
							|  |  |  | 					'uploader':	video_uploader.decode('utf-8'), | 
					
						
							|  |  |  | 					'upload_date':	upload_date, | 
					
						
							|  |  |  | 					'title':	video_title, | 
					
						
							|  |  |  | 					'stitle':	simple_title, | 
					
						
							|  |  |  | 					'ext':		video_extension.decode('utf-8'), | 
					
						
							|  |  |  | 					'format':	(format_param is None and u'NA' or format_param.decode('utf-8')), | 
					
						
							|  |  |  | 					'thumbnail':	video_thumbnail.decode('utf-8'), | 
					
						
							|  |  |  | 					'description':	video_description.decode('utf-8'), | 
					
						
							|  |  |  | 					'player_url':	None, | 
					
						
							|  |  |  | 				}) | 
					
						
							|  |  |  | 			except UnavailableVideoError, err: | 
					
						
							|  |  |  | 				self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | class BlipTVIE(InfoExtractor): | 
					
						
							|  |  |  | 	"""Information extractor for blip.tv""" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-18 09:31:36 +02:00
										 |  |  | 	_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 	_URL_EXT = r'^.*\.([a-z0-9]+)$' | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def suitable(url): | 
					
						
							|  |  |  | 		return (re.match(BlipTVIE._VALID_URL, url) is not None) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def report_extraction(self, file_id): | 
					
						
							|  |  |  | 		"""Report information extraction.""" | 
					
						
							| 
									
										
										
										
											2011-06-25 19:26:29 +02:00
										 |  |  | 		self._downloader.to_screen(u'[blip.tv] %s: Extracting information' % file_id) | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	def _simplify_title(self, title): | 
					
						
							|  |  |  | 		res = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', title) | 
					
						
							|  |  |  | 		res = res.strip(ur'_') | 
					
						
							|  |  |  | 		return res | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def _real_extract(self, url): | 
					
						
							|  |  |  | 		mobj = re.match(self._VALID_URL, url) | 
					
						
							|  |  |  | 		if mobj is None: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: invalid URL: %s' % url) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 		if '?' in url: | 
					
						
							|  |  |  | 			cchar = '&' | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			cchar = '?' | 
					
						
							|  |  |  | 		json_url = url + cchar + 'skin=json&version=2&no_wrap=1' | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 		request = urllib2.Request(json_url) | 
					
						
							| 
									
										
										
										
											2011-06-25 19:26:29 +02:00
										 |  |  | 		self.report_extraction(mobj.group(1)) | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			json_code = urllib2.urlopen(request).read() | 
					
						
							|  |  |  | 		except (urllib2.URLError, httplib.HTTPException, socket.error), err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			json_data = json.loads(json_code) | 
					
						
							| 
									
										
										
										
											2011-08-06 12:16:07 +02:00
										 |  |  | 			if 'Post' in json_data: | 
					
						
							|  |  |  | 				data = json_data['Post'] | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				data = json_data | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') | 
					
						
							|  |  |  | 			video_url = data['media']['url'] | 
					
						
							|  |  |  | 			umobj = re.match(self._URL_EXT, video_url) | 
					
						
							|  |  |  | 			if umobj is None: | 
					
						
							|  |  |  | 				raise ValueError('Can not determine filename extension') | 
					
						
							|  |  |  | 			ext = umobj.group(1) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-07-07 14:10:25 +02:00
										 |  |  | 			self._downloader.increment_downloads() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 			info = { | 
					
						
							|  |  |  | 				'id': data['item_id'], | 
					
						
							|  |  |  | 				'url': video_url, | 
					
						
							|  |  |  | 				'uploader': data['display_name'], | 
					
						
							|  |  |  | 				'upload_date': upload_date, | 
					
						
							|  |  |  | 				'title': data['title'], | 
					
						
							|  |  |  | 				'stitle': self._simplify_title(data['title']), | 
					
						
							|  |  |  | 				'ext': ext, | 
					
						
							|  |  |  | 				'format': data['media']['mimeType'], | 
					
						
							|  |  |  | 				'thumbnail': data['thumbnailUrl'], | 
					
						
							|  |  |  | 				'description': data['description'], | 
					
						
							|  |  |  | 				'player_url': data['embedUrl'] | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		except (ValueError,KeyError), err: | 
					
						
							| 
									
										
										
										
											2011-06-25 19:26:29 +02:00
										 |  |  | 			self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) | 
					
						
							| 
									
										
										
										
											2011-06-21 22:24:58 +02:00
										 |  |  | 			return | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			self._downloader.process_info(info) | 
					
						
							|  |  |  | 		except UnavailableVideoError, err: | 
					
						
							|  |  |  | 			self._downloader.trouble(u'\nERROR: unable to download video') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | class PostProcessor(object): | 
					
						
							|  |  |  | 	"""Post Processor class. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	PostProcessor objects can be added to downloaders with their | 
					
						
							|  |  |  | 	add_post_processor() method. When the downloader has finished a | 
					
						
							|  |  |  | 	successful download, it will take its internal chain of PostProcessors | 
					
						
							|  |  |  | 	and start calling the run() method on each one of them, first with | 
					
						
							|  |  |  | 	an initial argument and then with the returned value of the previous | 
					
						
							|  |  |  | 	PostProcessor. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	The chain will be stopped if one of them ever returns None or the end | 
					
						
							|  |  |  | 	of the chain is reached. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	PostProcessor objects follow a "mutual registration" process similar | 
					
						
							|  |  |  | 	to InfoExtractor objects. | 
					
						
							|  |  |  | 	""" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_downloader = None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None): | 
					
						
							|  |  |  | 		self._downloader = downloader | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def set_downloader(self, downloader): | 
					
						
							|  |  |  | 		"""Sets the downloader for this PP.""" | 
					
						
							|  |  |  | 		self._downloader = downloader | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 	def run(self, information): | 
					
						
							|  |  |  | 		"""Run the PostProcessor. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		The "information" argument is a dictionary like the ones | 
					
						
							| 
									
										
										
										
											2009-04-25 14:33:52 +02:00
										 |  |  | 		composed by InfoExtractors. The only difference is that this | 
					
						
							| 
									
										
										
										
											2008-07-27 12:13:49 +02:00
										 |  |  | 		one has an extra field called "filepath" that points to the | 
					
						
							|  |  |  | 		downloaded file. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		When this method returns None, the postprocessing chain is | 
					
						
							|  |  |  | 		stopped. However, this method may return an information | 
					
						
							|  |  |  | 		dictionary that will be passed to the next postprocessing | 
					
						
							|  |  |  | 		object in the chain. It can be the one it received after | 
					
						
							|  |  |  | 		changing some fields. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		In addition, this method may raise a PostProcessingError | 
					
						
							|  |  |  | 		exception that will be taken into account by the downloader | 
					
						
							|  |  |  | 		it was called from. | 
					
						
							|  |  |  | 		""" | 
					
						
							|  |  |  | 		return information # by default, do nothing | 
					
						
							| 
									
										
										
										
											2011-01-07 10:22:01 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-02-25 19:06:58 +01:00
										 |  |  | class FFmpegExtractAudioPP(PostProcessor): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def __init__(self, downloader=None, preferredcodec=None): | 
					
						
							|  |  |  | 		PostProcessor.__init__(self, downloader) | 
					
						
							|  |  |  | 		if preferredcodec is None: | 
					
						
							|  |  |  | 			preferredcodec = 'best' | 
					
						
							|  |  |  | 		self._preferredcodec = preferredcodec | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def get_audio_codec(path): | 
					
						
							| 
									
										
										
										
											2011-02-25 21:53:26 +01:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2011-03-15 20:04:20 +01:00
										 |  |  | 			cmd = ['ffprobe', '-show_streams', '--', path] | 
					
						
							|  |  |  | 			handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) | 
					
						
							| 
									
										
										
										
											2011-02-25 21:53:26 +01:00
										 |  |  | 			output = handle.communicate()[0] | 
					
						
							|  |  |  | 			if handle.wait() != 0: | 
					
						
							|  |  |  | 				return None | 
					
						
							|  |  |  | 		except (IOError, OSError): | 
					
						
							| 
									
										
										
										
											2011-02-25 19:06:58 +01:00
										 |  |  | 			return None | 
					
						
							|  |  |  | 		audio_codec = None | 
					
						
							|  |  |  | 		for line in output.split('\n'): | 
					
						
							|  |  |  | 			if line.startswith('codec_name='): | 
					
						
							|  |  |  | 				audio_codec = line.split('=')[1].strip() | 
					
						
							|  |  |  | 			elif line.strip() == 'codec_type=audio' and audio_codec is not None: | 
					
						
							|  |  |  | 				return audio_codec | 
					
						
							|  |  |  | 		return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	@staticmethod | 
					
						
							|  |  |  | 	def run_ffmpeg(path, out_path, codec, more_opts): | 
					
						
							|  |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2011-03-15 20:04:20 +01:00
										 |  |  | 			cmd = ['ffmpeg', '-y', '-i', path, '-vn', '-acodec', codec] + more_opts + ['--', out_path] | 
					
						
							|  |  |  | 			ret = subprocess.call(cmd, stdout=file(os.path.devnull, 'w'), stderr=subprocess.STDOUT) | 
					
						
							| 
									
										
										
										
											2011-02-25 19:06:58 +01:00
										 |  |  | 			return (ret == 0) | 
					
						
							|  |  |  | 		except (IOError, OSError): | 
					
						
							|  |  |  | 			return False | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	def run(self, information): | 
					
						
							|  |  |  | 		path = information['filepath'] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		filecodec = self.get_audio_codec(path) | 
					
						
							|  |  |  | 		if filecodec is None: | 
					
						
							| 
									
										
										
										
											2011-02-25 21:53:26 +01:00
										 |  |  | 			self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') | 
					
						
							| 
									
										
										
										
											2011-02-25 19:06:58 +01:00
										 |  |  | 			return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		more_opts = [] | 
					
						
							|  |  |  | 		if self._preferredcodec == 'best' or self._preferredcodec == filecodec: | 
					
						
							|  |  |  | 			if filecodec == 'aac' or filecodec == 'mp3': | 
					
						
							|  |  |  | 				# Lossless if possible | 
					
						
							|  |  |  | 				acodec = 'copy' | 
					
						
							|  |  |  | 				extension = filecodec | 
					
						
							|  |  |  | 				if filecodec == 'aac': | 
					
						
							|  |  |  | 					more_opts = ['-f', 'adts'] | 
					
						
							|  |  |  | 			else: | 
					
						
							|  |  |  | 				# MP3 otherwise. | 
					
						
							|  |  |  | 				acodec = 'libmp3lame' | 
					
						
							|  |  |  | 				extension = 'mp3' | 
					
						
							|  |  |  | 				more_opts = ['-ab', '128k'] | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			# We convert the audio (lossy) | 
					
						
							|  |  |  | 			acodec = {'mp3': 'libmp3lame', 'aac': 'aac'}[self._preferredcodec] | 
					
						
							|  |  |  | 			extension = self._preferredcodec | 
					
						
							|  |  |  | 			more_opts = ['-ab', '128k'] | 
					
						
							|  |  |  | 			if self._preferredcodec == 'aac': | 
					
						
							|  |  |  | 				more_opts += ['-f', 'adts'] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		(prefix, ext) = os.path.splitext(path) | 
					
						
							|  |  |  | 		new_path = prefix + '.' + extension | 
					
						
							|  |  |  | 		self._downloader.to_screen(u'[ffmpeg] Destination: %s' % new_path) | 
					
						
							|  |  |  | 		status = self.run_ffmpeg(path, new_path, acodec, more_opts) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if not status: | 
					
						
							| 
									
										
										
										
											2011-02-25 22:30:22 +02:00
										 |  |  | 			self._downloader.to_stderr(u'WARNING: error running ffmpeg') | 
					
						
							| 
									
										
										
										
											2011-02-25 19:06:58 +01:00
										 |  |  | 			return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			os.remove(path) | 
					
						
							|  |  |  | 		except (IOError, OSError): | 
					
						
							|  |  |  | 			self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') | 
					
						
							|  |  |  | 			return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		information['filepath'] = new_path | 
					
						
							|  |  |  | 		return information | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:37:35 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | def updateSelf(downloader, filename): | 
					
						
							|  |  |  | 	''' Update the program file with the latest version from the repository ''' | 
					
						
							|  |  |  | 	# Note: downloader only used for options | 
					
						
							|  |  |  | 	if not os.access(filename, os.W_OK): | 
					
						
							|  |  |  | 		sys.exit('ERROR: no write permissions on %s' % filename) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	downloader.to_screen('Updating to latest stable version...') | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	try: | 
					
						
							| 
									
										
										
										
											2011-08-23 15:37:35 +03:00
										 |  |  | 		latest_url = 'http://github.com/rg3/youtube-dl/raw/master/LATEST_VERSION' | 
					
						
							|  |  |  | 		latest_version = urllib.urlopen(latest_url).read().strip() | 
					
						
							|  |  |  | 		prog_url = 'http://github.com/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version | 
					
						
							|  |  |  | 		newcontent = urllib.urlopen(prog_url).read() | 
					
						
							|  |  |  | 	except (IOError, OSError), err: | 
					
						
							|  |  |  | 		sys.exit('ERROR: unable to download latest version') | 
					
						
							| 
									
										
										
										
											2008-07-22 10:14:13 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:37:35 +03:00
										 |  |  | 	try: | 
					
						
							| 
									
										
										
										
											2011-08-28 22:10:03 +02:00
										 |  |  | 		stream = open(filename, 'wb') | 
					
						
							| 
									
										
										
										
											2011-08-23 15:37:35 +03:00
										 |  |  | 		stream.write(newcontent) | 
					
						
							|  |  |  | 		stream.close() | 
					
						
							|  |  |  | 	except (IOError, OSError), err: | 
					
						
							|  |  |  | 		sys.exit('ERROR: unable to overwrite current version') | 
					
						
							| 
									
										
										
										
											2009-11-19 20:19:47 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:37:35 +03:00
										 |  |  | 	downloader.to_screen('Updated to version %s' % latest_version) | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | def parseOpts(): | 
					
						
							|  |  |  | 	# Deferred imports | 
					
						
							|  |  |  | 	import getpass | 
					
						
							|  |  |  | 	import optparse | 
					
						
							| 
									
										
										
										
											2011-01-03 11:47:23 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 	def _format_option_string(option): | 
					
						
							|  |  |  | 		''' ('-o', '--option') -> -o, --format METAVAR''' | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 		opts = [] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if option._short_opts: opts.append(option._short_opts[0]) | 
					
						
							|  |  |  | 		if option._long_opts: opts.append(option._long_opts[0]) | 
					
						
							|  |  |  | 		if len(opts) > 1: opts.insert(1, ', ') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if option.takes_value(): opts.append(' %s' % option.metavar) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		return "".join(opts) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 17:03:28 +03:00
										 |  |  | 	def _find_term_columns(): | 
					
						
							|  |  |  | 		columns = os.environ.get('COLUMNS', None) | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		if columns: | 
					
						
							|  |  |  | 			return int(columns) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-24 23:28:30 +02:00
										 |  |  | 		try: | 
					
						
							|  |  |  | 			sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) | 
					
						
							|  |  |  | 			out,err = sp.communicate() | 
					
						
							| 
									
										
										
										
											2011-08-25 00:08:59 +02:00
										 |  |  | 			return int(out.split()[1]) | 
					
						
							| 
									
										
										
										
											2011-08-24 23:28:30 +02:00
										 |  |  | 		except: | 
					
						
							|  |  |  | 			pass | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		return None | 
					
						
							| 
									
										
										
										
											2011-08-23 17:03:28 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:42:51 +03:00
										 |  |  | 	max_width = 80 | 
					
						
							|  |  |  | 	max_help_position = 80 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# No need to wrap help messages if we're on a wide console | 
					
						
							| 
									
										
										
										
											2011-08-23 17:03:28 +03:00
										 |  |  | 	columns = _find_term_columns() | 
					
						
							| 
									
										
										
										
											2011-08-23 16:42:51 +03:00
										 |  |  | 	if columns: max_width = columns | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 	fmt.format_option_strings = _format_option_string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	kw = { | 
					
						
							|  |  |  | 		'version'   : __version__, | 
					
						
							|  |  |  | 		'formatter' : fmt, | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		'usage' : '%prog [options] url...', | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 		'conflict_handler' : 'resolve', | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	parser = optparse.OptionParser(**kw) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# option groups | 
					
						
							|  |  |  | 	general        = optparse.OptionGroup(parser, 'General Options') | 
					
						
							|  |  |  | 	authentication = optparse.OptionGroup(parser, 'Authentication Options') | 
					
						
							|  |  |  | 	video_format   = optparse.OptionGroup(parser, 'Video Format Options') | 
					
						
							|  |  |  | 	postproc       = optparse.OptionGroup(parser, 'Post-processing Options') | 
					
						
							|  |  |  | 	filesystem     = optparse.OptionGroup(parser, 'Filesystem Options') | 
					
						
							|  |  |  | 	verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	general.add_option('-h', '--help', | 
					
						
							|  |  |  | 			action='help', help='print this help text and exit') | 
					
						
							|  |  |  | 	general.add_option('-v', '--version', | 
					
						
							|  |  |  | 			action='version', help='print program version and exit') | 
					
						
							|  |  |  | 	general.add_option('-U', '--update', | 
					
						
							|  |  |  | 			action='store_true', dest='update_self', help='update this program to latest stable version') | 
					
						
							|  |  |  | 	general.add_option('-i', '--ignore-errors', | 
					
						
							|  |  |  | 			action='store_true', dest='ignoreerrors', help='continue on download errors', default=False) | 
					
						
							|  |  |  | 	general.add_option('-r', '--rate-limit', | 
					
						
							|  |  |  | 			dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)') | 
					
						
							|  |  |  | 	general.add_option('-R', '--retries', | 
					
						
							|  |  |  | 			dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10) | 
					
						
							|  |  |  | 	general.add_option('--playlist-start', | 
					
						
							|  |  |  | 			dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1) | 
					
						
							|  |  |  | 	general.add_option('--playlist-end', | 
					
						
							|  |  |  | 			dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1) | 
					
						
							|  |  |  | 	general.add_option('--dump-user-agent', | 
					
						
							|  |  |  | 			action='store_true', dest='dump_user_agent', | 
					
						
							|  |  |  | 			help='display the current browser identification', default=False) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	authentication.add_option('-u', '--username', | 
					
						
							|  |  |  | 			dest='username', metavar='USERNAME', help='account username') | 
					
						
							|  |  |  | 	authentication.add_option('-p', '--password', | 
					
						
							|  |  |  | 			dest='password', metavar='PASSWORD', help='account password') | 
					
						
							|  |  |  | 	authentication.add_option('-n', '--netrc', | 
					
						
							|  |  |  | 			action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	video_format.add_option('-f', '--format', | 
					
						
							|  |  |  | 			action='store', dest='format', metavar='FORMAT', help='video format code') | 
					
						
							|  |  |  | 	video_format.add_option('--all-formats', | 
					
						
							|  |  |  | 			action='store_const', dest='format', help='download all available video formats', const='-1') | 
					
						
							|  |  |  | 	video_format.add_option('--max-quality', | 
					
						
							|  |  |  | 			action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	verbosity.add_option('-q', '--quiet', | 
					
						
							|  |  |  | 			action='store_true', dest='quiet', help='activates quiet mode', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('-s', '--simulate', | 
					
						
							|  |  |  | 			action='store_true', dest='simulate', help='do not download video', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('-g', '--get-url', | 
					
						
							|  |  |  | 			action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('-e', '--get-title', | 
					
						
							|  |  |  | 			action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('--get-thumbnail', | 
					
						
							|  |  |  | 			action='store_true', dest='getthumbnail', | 
					
						
							|  |  |  | 			help='simulate, quiet but print thumbnail URL', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('--get-description', | 
					
						
							|  |  |  | 			action='store_true', dest='getdescription', | 
					
						
							|  |  |  | 			help='simulate, quiet but print video description', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('--get-filename', | 
					
						
							|  |  |  | 			action='store_true', dest='getfilename', | 
					
						
							|  |  |  | 			help='simulate, quiet but print output filename', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('--no-progress', | 
					
						
							|  |  |  | 			action='store_true', dest='noprogress', help='do not print progress bar', default=False) | 
					
						
							|  |  |  | 	verbosity.add_option('--console-title', | 
					
						
							|  |  |  | 			action='store_true', dest='consoletitle', | 
					
						
							|  |  |  | 			help='display progress in console titlebar', default=False) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	filesystem.add_option('-t', '--title', | 
					
						
							|  |  |  | 			action='store_true', dest='usetitle', help='use title in file name', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('-l', '--literal', | 
					
						
							|  |  |  | 			action='store_true', dest='useliteral', help='use literal title in file name', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('-A', '--auto-number', | 
					
						
							|  |  |  | 			action='store_true', dest='autonumber', | 
					
						
							|  |  |  | 			help='number downloaded files starting from 00000', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('-o', '--output', | 
					
						
							|  |  |  | 			dest='outtmpl', metavar='TEMPLATE', help='output filename template') | 
					
						
							|  |  |  | 	filesystem.add_option('-a', '--batch-file', | 
					
						
							|  |  |  | 			dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') | 
					
						
							|  |  |  | 	filesystem.add_option('-w', '--no-overwrites', | 
					
						
							|  |  |  | 			action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('-c', '--continue', | 
					
						
							|  |  |  | 			action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('--cookies', | 
					
						
							|  |  |  | 			dest='cookiefile', metavar='FILE', help='file to dump cookie jar to') | 
					
						
							|  |  |  | 	filesystem.add_option('--no-part', | 
					
						
							|  |  |  | 			action='store_true', dest='nopart', help='do not use .part files', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('--no-mtime', | 
					
						
							|  |  |  | 			action='store_false', dest='updatetime', | 
					
						
							|  |  |  | 			help='do not use the Last-modified header to set the file modification time', default=True) | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 	filesystem.add_option('--write-description', | 
					
						
							|  |  |  | 			action='store_true', dest='writedescription', | 
					
						
							|  |  |  | 			help='write video description to a .description file', default=False) | 
					
						
							|  |  |  | 	filesystem.add_option('--write-info-json', | 
					
						
							|  |  |  | 			action='store_true', dest='writeinfojson', | 
					
						
							|  |  |  | 			help='write video metadata to a .info.json file', default=False) | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	postproc.add_option('--extract-audio', action='store_true', dest='extractaudio', default=False, | 
					
						
							|  |  |  | 			help='convert video files to audio-only files (requires ffmpeg and ffprobe)') | 
					
						
							|  |  |  | 	postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best', | 
					
						
							|  |  |  | 			help='"best", "aac" or "mp3"; best by default') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	parser.add_option_group(general) | 
					
						
							|  |  |  | 	parser.add_option_group(filesystem) | 
					
						
							|  |  |  | 	parser.add_option_group(verbosity) | 
					
						
							|  |  |  | 	parser.add_option_group(video_format) | 
					
						
							|  |  |  | 	parser.add_option_group(authentication) | 
					
						
							|  |  |  | 	parser.add_option_group(postproc) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	opts, args = parser.parse_args() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return parser, opts, args | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | def main(): | 
					
						
							|  |  |  | 	parser, opts, args = parseOpts() | 
					
						
							| 
									
										
										
										
											2011-08-23 15:53:36 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	# Open appropriate CookieJar | 
					
						
							|  |  |  | 	if opts.cookiefile is None: | 
					
						
							|  |  |  | 		jar = cookielib.CookieJar() | 
					
						
							|  |  |  | 	else: | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 			jar = cookielib.MozillaCookieJar(opts.cookiefile) | 
					
						
							|  |  |  | 			if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK): | 
					
						
							|  |  |  | 				jar.load() | 
					
						
							|  |  |  | 		except (IOError, OSError), err: | 
					
						
							|  |  |  | 			sys.exit(u'ERROR: unable to open cookie file') | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	# Dump user agent | 
					
						
							|  |  |  | 	if opts.dump_user_agent: | 
					
						
							|  |  |  | 		print std_headers['User-Agent'] | 
					
						
							|  |  |  | 		sys.exit(0) | 
					
						
							| 
									
										
										
										
											2011-01-03 11:47:23 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	# General configuration | 
					
						
							|  |  |  | 	cookie_processor = urllib2.HTTPCookieProcessor(jar) | 
					
						
							|  |  |  | 	urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler(), cookie_processor, YoutubeDLHandler())) | 
					
						
							|  |  |  | 	socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	# Batch file verification | 
					
						
							|  |  |  | 	batchurls = [] | 
					
						
							|  |  |  | 	if opts.batchfile is not None: | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 			if opts.batchfile == '-': | 
					
						
							|  |  |  | 				batchfd = sys.stdin | 
					
						
							| 
									
										
										
										
											2009-11-19 20:19:47 +01:00
										 |  |  | 			else: | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 				batchfd = open(opts.batchfile, 'r') | 
					
						
							|  |  |  | 			batchurls = batchfd.readlines() | 
					
						
							|  |  |  | 			batchurls = [x.strip() for x in batchurls] | 
					
						
							|  |  |  | 			batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] | 
					
						
							|  |  |  | 		except IOError: | 
					
						
							|  |  |  | 			sys.exit(u'ERROR: batch file could not be read') | 
					
						
							|  |  |  | 	all_urls = batchurls + args | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Conflicting, missing and erroneous options | 
					
						
							|  |  |  | 	if opts.usenetrc and (opts.username is not None or opts.password is not None): | 
					
						
							|  |  |  | 		parser.error(u'using .netrc conflicts with giving username/password') | 
					
						
							|  |  |  | 	if opts.password is not None and opts.username is None: | 
					
						
							|  |  |  | 		parser.error(u'account username missing') | 
					
						
							|  |  |  | 	if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber): | 
					
						
							|  |  |  | 		parser.error(u'using output template conflicts with using title, literal title or auto number') | 
					
						
							|  |  |  | 	if opts.usetitle and opts.useliteral: | 
					
						
							|  |  |  | 		parser.error(u'using title conflicts with using literal title') | 
					
						
							|  |  |  | 	if opts.username is not None and opts.password is None: | 
					
						
							|  |  |  | 		opts.password = getpass.getpass(u'Type account password and press return:') | 
					
						
							|  |  |  | 	if opts.ratelimit is not None: | 
					
						
							|  |  |  | 		numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) | 
					
						
							|  |  |  | 		if numeric_limit is None: | 
					
						
							|  |  |  | 			parser.error(u'invalid rate limit specified') | 
					
						
							|  |  |  | 		opts.ratelimit = numeric_limit | 
					
						
							|  |  |  | 	if opts.retries is not None: | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		try: | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 			opts.retries = long(opts.retries) | 
					
						
							| 
									
										
										
										
											2010-11-04 23:19:09 +01:00
										 |  |  | 		except (TypeError, ValueError), err: | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 			parser.error(u'invalid retry count specified') | 
					
						
							|  |  |  | 	try: | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		opts.playliststart = int(opts.playliststart) | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 		if opts.playliststart <= 0: | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 			raise ValueError(u'Playlist start must be positive') | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	except (TypeError, ValueError), err: | 
					
						
							|  |  |  | 		parser.error(u'invalid playlist start number specified') | 
					
						
							|  |  |  | 	try: | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		opts.playlistend = int(opts.playlistend) | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 		if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart): | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 			raise ValueError(u'Playlist end must be greater than playlist start') | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	except (TypeError, ValueError), err: | 
					
						
							|  |  |  | 		parser.error(u'invalid playlist end number specified') | 
					
						
							|  |  |  | 	if opts.extractaudio: | 
					
						
							|  |  |  | 		if opts.audioformat not in ['best', 'aac', 'mp3']: | 
					
						
							|  |  |  | 			parser.error(u'invalid audio format specified') | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Information extractors | 
					
						
							|  |  |  | 	youtube_ie = YoutubeIE() | 
					
						
							|  |  |  | 	metacafe_ie = MetacafeIE(youtube_ie) | 
					
						
							|  |  |  | 	dailymotion_ie = DailymotionIE() | 
					
						
							|  |  |  | 	youtube_pl_ie = YoutubePlaylistIE(youtube_ie) | 
					
						
							|  |  |  | 	youtube_user_ie = YoutubeUserIE(youtube_ie) | 
					
						
							|  |  |  | 	youtube_search_ie = YoutubeSearchIE(youtube_ie) | 
					
						
							|  |  |  | 	google_ie = GoogleIE() | 
					
						
							|  |  |  | 	google_search_ie = GoogleSearchIE(google_ie) | 
					
						
							|  |  |  | 	photobucket_ie = PhotobucketIE() | 
					
						
							|  |  |  | 	yahoo_ie = YahooIE() | 
					
						
							|  |  |  | 	yahoo_search_ie = YahooSearchIE(yahoo_ie) | 
					
						
							|  |  |  | 	deposit_files_ie = DepositFilesIE() | 
					
						
							|  |  |  | 	facebook_ie = FacebookIE() | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 	bliptv_ie = BlipTVIE() | 
					
						
							| 
									
										
										
										
											2011-08-28 22:57:50 +02:00
										 |  |  | 	vimeo_ie = VimeoIE() | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	generic_ie = GenericIE() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# File downloader | 
					
						
							|  |  |  | 	fd = FileDownloader({ | 
					
						
							|  |  |  | 		'usenetrc': opts.usenetrc, | 
					
						
							|  |  |  | 		'username': opts.username, | 
					
						
							|  |  |  | 		'password': opts.password, | 
					
						
							|  |  |  | 		'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename), | 
					
						
							|  |  |  | 		'forceurl': opts.geturl, | 
					
						
							|  |  |  | 		'forcetitle': opts.gettitle, | 
					
						
							|  |  |  | 		'forcethumbnail': opts.getthumbnail, | 
					
						
							|  |  |  | 		'forcedescription': opts.getdescription, | 
					
						
							|  |  |  | 		'forcefilename': opts.getfilename, | 
					
						
							|  |  |  | 		'simulate': (opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename), | 
					
						
							|  |  |  | 		'format': opts.format, | 
					
						
							|  |  |  | 		'format_limit': opts.format_limit, | 
					
						
							|  |  |  | 		'outtmpl': ((opts.outtmpl is not None and opts.outtmpl.decode(preferredencoding())) | 
					
						
							|  |  |  | 			or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s') | 
					
						
							|  |  |  | 			or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') | 
					
						
							|  |  |  | 			or u'%(id)s.%(ext)s'), | 
					
						
							|  |  |  | 		'ignoreerrors': opts.ignoreerrors, | 
					
						
							|  |  |  | 		'ratelimit': opts.ratelimit, | 
					
						
							|  |  |  | 		'nooverwrites': opts.nooverwrites, | 
					
						
							|  |  |  | 		'retries': opts.retries, | 
					
						
							|  |  |  | 		'continuedl': opts.continue_dl, | 
					
						
							|  |  |  | 		'noprogress': opts.noprogress, | 
					
						
							|  |  |  | 		'playliststart': opts.playliststart, | 
					
						
							|  |  |  | 		'playlistend': opts.playlistend, | 
					
						
							|  |  |  | 		'logtostderr': opts.outtmpl == '-', | 
					
						
							|  |  |  | 		'consoletitle': opts.consoletitle, | 
					
						
							|  |  |  | 		'nopart': opts.nopart, | 
					
						
							|  |  |  | 		'updatetime': opts.updatetime, | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 		'writedescription': opts.writedescription, | 
					
						
							|  |  |  | 		'writeinfojson': opts.writeinfojson, | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 		}) | 
					
						
							|  |  |  | 	fd.add_info_extractor(youtube_search_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(youtube_pl_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(youtube_user_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(metacafe_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(dailymotion_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(youtube_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(google_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(google_search_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(photobucket_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(yahoo_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(yahoo_search_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(deposit_files_ie) | 
					
						
							|  |  |  | 	fd.add_info_extractor(facebook_ie) | 
					
						
							| 
									
										
										
										
											2011-08-24 23:21:55 +02:00
										 |  |  | 	fd.add_info_extractor(bliptv_ie) | 
					
						
							| 
									
										
										
										
											2011-08-28 22:57:50 +02:00
										 |  |  | 	fd.add_info_extractor(vimeo_ie) | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	# This must come last since it's the | 
					
						
							|  |  |  | 	# fallback if none of the others work | 
					
						
							|  |  |  | 	fd.add_info_extractor(generic_ie) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# PostProcessors | 
					
						
							|  |  |  | 	if opts.extractaudio: | 
					
						
							|  |  |  | 		fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Update version | 
					
						
							|  |  |  | 	if opts.update_self: | 
					
						
							|  |  |  | 		updateSelf(fd, sys.argv[0]) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	# Maybe do nothing | 
					
						
							|  |  |  | 	if len(all_urls) < 1: | 
					
						
							|  |  |  | 		if not opts.update_self: | 
					
						
							|  |  |  | 			parser.error(u'you must provide at least one URL') | 
					
						
							|  |  |  | 		else: | 
					
						
							|  |  |  | 			sys.exit() | 
					
						
							|  |  |  | 	retcode = fd.download(all_urls) | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	# Dump cookie jar if requested | 
					
						
							|  |  |  | 	if opts.cookiefile is not None: | 
					
						
							|  |  |  | 		try: | 
					
						
							|  |  |  | 			jar.save() | 
					
						
							|  |  |  | 		except (IOError, OSError), err: | 
					
						
							|  |  |  | 			sys.exit(u'ERROR: unable to save cookie jar') | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | 	sys.exit(retcode) | 
					
						
							| 
									
										
										
										
											2010-10-23 12:54:00 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2011-08-23 16:48:08 +03:00
										 |  |  | if __name__ == '__main__': | 
					
						
							|  |  |  | 	try: | 
					
						
							|  |  |  | 		main() | 
					
						
							| 
									
										
										
										
											2008-07-22 15:52:56 +02:00
										 |  |  | 	except DownloadError: | 
					
						
							|  |  |  | 		sys.exit(1) | 
					
						
							|  |  |  | 	except SameFileError: | 
					
						
							| 
									
										
										
										
											2008-07-25 13:28:41 +02:00
										 |  |  | 		sys.exit(u'ERROR: fixed output name but more than one file to download') | 
					
						
							| 
									
										
										
										
											2008-07-21 23:12:31 +02:00
										 |  |  | 	except KeyboardInterrupt: | 
					
						
							| 
									
										
										
										
											2008-07-25 13:28:41 +02:00
										 |  |  | 		sys.exit(u'\nERROR: Interrupted by user') | 
					
						
							| 
									
										
										
										
											2011-08-23 14:45:26 +03:00
										 |  |  | 
 | 
					
						
							|  |  |  | # vim: set ts=4 sw=4 sts=4 noet ai si filetype=python: |