diff --git a/LATEST_VERSION b/LATEST_VERSION index 8418210b7..a1c4173c8 100644 --- a/LATEST_VERSION +++ b/LATEST_VERSION @@ -1 +1 @@ -2010.10.24 +2010.12.09 diff --git a/youtube-dl b/youtube-dl index ab6dcb027..777373ad5 100755 --- a/youtube-dl +++ b/youtube-dl @@ -3,8 +3,10 @@ # Author: Ricardo Garcia Gonzalez # Author: Danny Colligan # Author: Benjamin Johnson +# Author: Vasyl' Vavrychuk # License: Public domain code import cookielib +import datetime import htmlentitydefs import httplib import locale @@ -28,7 +30,7 @@ except ImportError: from cgi import parse_qs std_headers = { - 'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.11) Gecko/20101019 Firefox/3.6.11', + 'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-us,en;q=0.5', @@ -109,7 +111,6 @@ def sanitize_open(filename, open_mode): stream = open(filename, open_mode) return (stream, filename) - class DownloadError(Exception): """Download Error exception. @@ -234,6 +235,13 @@ class FileDownloader(object): if not os.path.exists(dir): os.mkdir(dir) + @staticmethod + def temp_name(filename): + """Returns a temporary filename for the given filename.""" + if filename == u'-' or (os.path.exists(filename) and not os.path.isfile(filename)): + return filename + return filename + u'.part' + @staticmethod def format_bytes(bytes): if bytes is None: @@ -352,6 +360,14 @@ class FileDownloader(object): speed = float(byte_counter) / elapsed if speed > rate_limit: time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) + + def try_rename(self, old_filename, new_filename): + try: + if old_filename == new_filename: + return + os.rename(old_filename, new_filename) + except (IOError, OSError), err: + self.trouble(u'ERROR: unable to rename file') def report_destination(self, filename): """Report destination filename.""" @@ -413,7 +429,7 @@ class FileDownloader(object): try: template_dict = dict(info_dict) template_dict['epoch'] = unicode(long(time.time())) - template_dict['ord'] = unicode('%05d' % self._num_downloads) + template_dict['autonumber'] = unicode('%05d' % self._num_downloads) filename = self.params['outtmpl'] % template_dict except (ValueError, KeyError), err: self.trouble(u'ERROR: invalid system charset or erroneous output template') @@ -483,6 +499,7 @@ class FileDownloader(object): def _download_with_rtmpdump(self, filename, url, player_url): self.report_destination(filename) + tmpfilename = self.temp_name(filename) # Check for rtmpdump first try: @@ -494,36 +511,43 @@ class FileDownloader(object): # Download using rtmpdump. rtmpdump returns exit code 2 when # the connection was interrumpted and resuming appears to be # possible. This is part of rtmpdump's normal usage, AFAIK. - basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', filename] + basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]) while retval == 2 or retval == 1: - prevsize = os.path.getsize(filename) + prevsize = os.path.getsize(tmpfilename) self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) time.sleep(5.0) # This seems to be needed retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) - cursize = os.path.getsize(filename) + cursize = os.path.getsize(tmpfilename) if prevsize == cursize and retval == 1: break if retval == 0: - self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename)) + self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(tmpfilename)) + self.try_rename(tmpfilename, filename) return True else: self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) return False def _do_download(self, filename, url, player_url): + # Check file already present + if self.params.get('continuedl', False) and os.path.isfile(filename): + self.report_file_already_downloaded(filename) + return True + # Attempt to download using rtmpdump if url.startswith('rtmp'): return self._download_with_rtmpdump(filename, url, player_url) + tmpfilename = self.temp_name(filename) stream = None open_mode = 'wb' basic_request = urllib2.Request(url, None, std_headers) request = urllib2.Request(url, None, std_headers) # Establish possible resume length - if os.path.isfile(filename): - resume_len = os.path.getsize(filename) + if os.path.isfile(tmpfilename): + resume_len = os.path.getsize(tmpfilename) else: resume_len = 0 @@ -565,6 +589,7 @@ class FileDownloader(object): # completely downloaded if the file size differs less than 100 bytes from # the one in the hard drive. self.report_file_already_downloaded(filename) + self.try_rename(tmpfilename, filename) return True else: # The length does not match, we start the download over @@ -581,8 +606,10 @@ class FileDownloader(object): return False data_len = data.info().get('Content-length', None) + if data_len is not None: + data_len = long(data_len) + resume_len data_len_str = self.format_bytes(data_len) - byte_counter = 0 + byte_counter = 0 + resume_len block_size = 1024 start = time.time() while True: @@ -598,7 +625,7 @@ class FileDownloader(object): # Open file just in time if stream is None: try: - (stream, filename) = sanitize_open(filename, open_mode) + (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) self.report_destination(filename) except (OSError, IOError), err: self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) @@ -619,9 +646,11 @@ class FileDownloader(object): # Apply rate limit self.slow_down(start, byte_counter) + stream.close() self.report_finish() if data_len is not None and str(byte_counter) != data_len: raise ContentTooShortError(byte_counter, long(data_len)) + self.try_rename(tmpfilename, filename) return True class InfoExtractor(object): @@ -831,7 +860,7 @@ class YoutubeIE(InfoExtractor): # Get video webpage self.report_video_webpage_download(video_id) - request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en' % video_id, None, std_headers) + request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id, None, std_headers) try: video_webpage = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: @@ -839,9 +868,9 @@ class YoutubeIE(InfoExtractor): return # Attempt to extract SWF player URL - mobj = re.search(r'swfConfig.*"(http://.*?watch.*?-.*?\.swf)"', video_webpage) + mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) if mobj is not None: - player_url = mobj.group(1) + player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) else: player_url = None @@ -894,6 +923,18 @@ class YoutubeIE(InfoExtractor): else: # don't panic if we can't find it video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0]) + # upload date + upload_date = u'NA' + mobj = re.search(r'id="eow-date".*?>(.*?)', video_webpage, re.DOTALL) + if mobj is not None: + upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) + format_expressions = ['%d %B %Y', '%B %d %Y'] + for expression in format_expressions: + try: + upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') + except: + pass + # description video_description = 'No description available.' if self._downloader.params.get('forcedescription', False): @@ -905,8 +946,7 @@ class YoutubeIE(InfoExtractor): video_token = urllib.unquote_plus(video_info['token'][0]) # Decide which formats to download - requested_format = self._downloader.params.get('format', None) - get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token) + req_format = self._downloader.params.get('format', None) if 'fmt_url_map' in video_info: url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(',')) @@ -919,12 +959,16 @@ class YoutubeIE(InfoExtractor): if len(existing_formats) == 0: self._downloader.trouble(u'ERROR: no known formats available for video') return - if requested_format is None: - video_url_list = [(existing_formats[0], get_video_template % existing_formats[0])] # Best quality - elif requested_format == '-1': - video_url_list = [(f, get_video_template % f) for f in existing_formats] # All formats + if req_format is None: + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == '-1': + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats else: - video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format + # Specific format + if req_format not in url_map: + self._downloader.trouble(u'ERROR: requested format not available') + return + video_url_list = [(req_format, url_map[req_format])] # Specific format elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): self.report_rtmp_download() @@ -948,6 +992,7 @@ class YoutubeIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_real_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), + 'upload_date': upload_date, 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -957,7 +1002,7 @@ class YoutubeIE(InfoExtractor): 'player_url': player_url, }) except UnavailableVideoError, err: - self._downloader.trouble(u'ERROR: unable to download video (format may not be available)') + self._downloader.trouble(u'ERROR: unable to download video') class MetacafeIE(InfoExtractor): @@ -1094,6 +1139,7 @@ class MetacafeIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -1182,6 +1228,7 @@ class DailymotionIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader.decode('utf-8'), + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -1291,6 +1338,7 @@ class GoogleIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': u'NA', + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -1372,6 +1420,7 @@ class PhotobucketIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader, + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -1526,6 +1575,7 @@ class YahooIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url, 'uploader': video_uploader, + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -1579,6 +1629,7 @@ class GenericIE(InfoExtractor): self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) return + self.report_extraction(video_id) # Start with something easy: JW Player in SWFObject mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) if mobj is None: @@ -1628,6 +1679,7 @@ class GenericIE(InfoExtractor): 'id': video_id.decode('utf-8'), 'url': video_url.decode('utf-8'), 'uploader': video_uploader, + 'upload_date': u'NA', 'title': video_title, 'stitle': simple_title, 'ext': video_extension.decode('utf-8'), @@ -2049,6 +2101,85 @@ class YoutubeUserIE(InfoExtractor): except DownloadError: continue +class DepositFilesIE(InfoExtractor): + """Information extractor for depositfiles.com""" + + _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles.com/(?:../(?#locale))?files/(.+)' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + @staticmethod + def suitable(url): + return (re.match(DepositFilesIE._VALID_URL, url) is not None) + + def report_download_webpage(self, file_id): + """Report webpage download.""" + self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) + + def report_extraction(self, file_id): + """Report information extraction.""" + self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) + + def _real_initialize(self): + return + + def _real_extract(self, url): + # At this point we have a new file + self._downloader.increment_downloads() + + file_id = url.split('/')[-1] + # Rebuild url in english locale + url = 'http://depositfiles.com/en/files/' + file_id + + # Retrieve file webpage with 'Free download' button pressed + free_download_indication = { 'gateway_result' : '1' } + request = urllib2.Request(url, urllib.urlencode(free_download_indication), std_headers) + try: + self.report_download_webpage(file_id) + webpage = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) + return + + # Search for the real file URL + mobj = re.search(r'
(Attention.*?)', webpage, re.DOTALL) + if (mobj is not None) and (mobj.group(1) is not None): + restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() + self._downloader.trouble(u'ERROR: %s' % restriction_message) + else: + self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) + return + + file_url = mobj.group(1) + file_extension = os.path.splitext(file_url)[1][1:] + + # Search for file title + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + file_title = mobj.group(1).decode('utf-8') + + try: + # Process file information + self._downloader.process_info({ + 'id': file_id.decode('utf-8'), + 'url': file_url.decode('utf-8'), + 'uploader': u'NA', + 'upload_date': u'NA', + 'title': file_title, + 'stitle': file_title, + 'ext': file_extension.decode('utf-8'), + 'format': u'NA', + 'player_url': None, + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'ERROR: unable to download file') + class PostProcessor(object): """Post Processor class. @@ -2121,7 +2252,7 @@ if __name__ == '__main__': # Parse command line parser = optparse.OptionParser( usage='Usage: %prog [options] url...', - version='2010.10.24', + version='2010.12.09', conflict_handler='resolve', ) @@ -2154,14 +2285,10 @@ if __name__ == '__main__': video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format.add_option('-f', '--format', action='store', dest='format', metavar='FORMAT', help='video format code') - video_format.add_option('-m', '--mobile-version', - action='store_const', dest='format', help='alias for -f 17', const='17') video_format.add_option('--all-formats', action='store_const', dest='format', help='download all available video formats', const='-1') video_format.add_option('--max-quality', action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') - video_format.add_option('-b', '--best-quality', - action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)') parser.add_option_group(video_format) verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') @@ -2186,6 +2313,8 @@ if __name__ == '__main__': action='store_true', dest='usetitle', help='use title in file name', default=False) filesystem.add_option('-l', '--literal', action='store_true', dest='useliteral', help='use literal title in file name', default=False) + filesystem.add_option('-A', '--auto-number', + action='store_true', dest='autonumber', help='number downloaded files starting from 00000', default=False) filesystem.add_option('-o', '--output', dest='outtmpl', metavar='TEMPLATE', help='output filename template') filesystem.add_option('-a', '--batch-file', @@ -2227,20 +2356,18 @@ if __name__ == '__main__': batchfd = open(opts.batchfile, 'r') batchurls = batchfd.readlines() batchurls = [x.strip() for x in batchurls] - batchurls = [x for x in batchurls if len(x) > 0] + batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] except IOError: sys.exit(u'ERROR: batch file could not be read') all_urls = batchurls + args # Conflicting, missing and erroneous options - if opts.bestquality: - print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n' if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error(u'using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: parser.error(u'account username missing') - if opts.outtmpl is not None and (opts.useliteral or opts.usetitle): - parser.error(u'using output template conflicts with using title or literal title') + if opts.outtmpl is not None and (opts.useliteral or opts.usetitle or opts.autonumber): + parser.error(u'using output template conflicts with using title, literal title or auto number') if opts.usetitle and opts.useliteral: parser.error(u'using title conflicts with using literal title') if opts.username is not None and opts.password is None: @@ -2280,6 +2407,7 @@ if __name__ == '__main__': photobucket_ie = PhotobucketIE() yahoo_ie = YahooIE() yahoo_search_ie = YahooSearchIE(yahoo_ie) + deposit_files_ie = DepositFilesIE() generic_ie = GenericIE() # File downloader @@ -2299,8 +2427,11 @@ if __name__ == '__main__': or (opts.format == '-1' and opts.usetitle and u'%(stitle)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and opts.useliteral and u'%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') + or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(stitle)s-%(id)s.%(ext)s') + or (opts.useliteral and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and u'%(stitle)s-%(id)s.%(ext)s') or (opts.useliteral and u'%(title)s-%(id)s.%(ext)s') + or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or u'%(id)s.%(ext)s'), 'ignoreerrors': opts.ignoreerrors, 'ratelimit': opts.ratelimit, @@ -2323,6 +2454,7 @@ if __name__ == '__main__': fd.add_info_extractor(photobucket_ie) fd.add_info_extractor(yahoo_ie) fd.add_info_extractor(yahoo_search_ie) + fd.add_info_extractor(deposit_files_ie) # This must come last since it's the # fallback if none of the others work