From f8514630db9ba72a9bddc000c393698f4c116c81 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Sun, 12 Feb 2017 20:53:55 +0800 Subject: [PATCH 001/225] [einthusan] Fix extraction (closes #11416) The old test URLs are no longer valid, so I replace them with the one from #11416 --- ChangeLog | 3 + youtube_dl/extractor/einthusan.py | 117 ++++++++++++++++++------------ 2 files changed, 75 insertions(+), 45 deletions(-) diff --git a/ChangeLog b/ChangeLog index cba47a296..089449dfb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,9 @@ version Core * TypeError is fixed with Python 2.7.13 on Windows (#11540, #12085) +Extractor +* [einthusan] Fix extraction (#11416) + version 2017.02.11 diff --git a/youtube_dl/extractor/einthusan.py b/youtube_dl/extractor/einthusan.py index 6ca07a13d..8a2a17b63 100644 --- a/youtube_dl/extractor/einthusan.py +++ b/youtube_dl/extractor/einthusan.py @@ -1,67 +1,94 @@ # coding: utf-8 from __future__ import unicode_literals +import base64 +import json + from .common import InfoExtractor -from ..compat import compat_urlparse +from ..compat import ( + compat_urlparse, + compat_str, +) from ..utils import ( - remove_start, - sanitized_Request, + extract_attributes, + ExtractorError, + get_elements_by_class, + urlencode_postdata, ) class EinthusanIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P[0-9]+)' - _TESTS = [ - { - 'url': 'http://www.einthusan.com/movies/watch.php?id=2447', - 'md5': 'd71379996ff5b7f217eca034c34e3461', - 'info_dict': { - 'id': '2447', - 'ext': 'mp4', - 'title': 'Ek Villain', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:9d29fc91a7abadd4591fb862fa560d93', - } - }, - { - 'url': 'http://www.einthusan.com/movies/watch.php?id=1671', - 'md5': 'b16a6fd3c67c06eb7c79c8a8615f4213', - 'info_dict': { - 'id': '1671', - 'ext': 'mp4', - 'title': 'Soodhu Kavvuum', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:b40f2bf7320b4f9414f3780817b2af8c', - } - }, - ] + _VALID_URL = r'https?://einthusan\.tv/movie/watch/(?P[0-9]+)' + _TEST = { + 'url': 'https://einthusan.tv/movie/watch/9097/', + 'md5': 'ff0f7f2065031b8a2cf13a933731c035', + 'info_dict': { + 'id': '9097', + 'ext': 'mp4', + 'title': 'Ae Dil Hai Mushkil', + 'description': 'md5:33ef934c82a671a94652a9b4e54d931b', + 'thumbnail': r're:^https?://.*\.jpg$', + } + } + + # reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js + def _decrypt(self, encrypted_data, video_id): + return self._parse_json(base64.b64decode(( + encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1] + ).encode('ascii')).decode('utf-8'), video_id) def _real_extract(self, url): video_id = self._match_id(url) - request = sanitized_Request(url) - request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0') - webpage = self._download_webpage(request, video_id) + webpage = self._download_webpage(url, video_id) - title = self._html_search_regex( - r'

]+class=["\']movie-title["\'][^>]*>(.+?)

', - webpage, 'title') + title = self._html_search_regex(r'

([^<]+)

', webpage, 'title') - video_id = self._search_regex( - r'data-movieid=["\'](\d+)', webpage, 'video id', default=video_id) + player_params = extract_attributes(self._search_regex( + r'(]+id="UIVideoPlayer"[^>]+>)', webpage, 'player parameters')) - m3u8_url = self._download_webpage( - 'http://cdn.einthusan.com/geturl/%s/hd/London,Washington,Toronto,Dallas,San,Sydney/' - % video_id, video_id, headers={'Referer': url}) - formats = self._extract_m3u8_formats( - m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native') + page_id = self._html_search_regex( + ']+data-pageid="([^"]+)"', webpage, 'page ID') + video_data = self._download_json( + 'https://einthusan.tv/ajax/movie/watch/%s/' % video_id, video_id, + data=urlencode_postdata({ + 'xEvent': 'UIVideoPlayer.PingOutcome', + 'xJson': json.dumps({ + 'EJOutcomes': player_params['data-ejpingables'], + 'NativeHLS': False + }), + 'arcVersion': 3, + 'appVersion': 59, + 'gorilla.csrf.Token': page_id, + }))['Data'] - description = self._html_search_meta('description', webpage) + if isinstance(video_data, compat_str) and video_data.startswith('/ratelimited/'): + raise ExtractorError( + 'Download rate reached. Please try again later.', expected=True) + + ej_links = self._decrypt(video_data['EJLinks'], video_id) + + formats = [] + + m3u8_url = ej_links.get('HLSLink') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native')) + + mp4_url = ej_links.get('MP4Link') + if mp4_url: + formats.append({ + 'url': mp4_url, + }) + + self._sort_formats(formats) + + description = get_elements_by_class('synopsis', webpage)[0] thumbnail = self._html_search_regex( - r'''''', - webpage, "thumbnail url", fatal=False) + r''']+src=(["'])(?P(?!\1).+?/moviecovers/(?!\1).+?)\1''', + webpage, 'thumbnail url', fatal=False, group='url') if thumbnail is not None: - thumbnail = compat_urlparse.urljoin(url, remove_start(thumbnail, '..')) + thumbnail = compat_urlparse.urljoin(url, thumbnail) return { 'id': video_id, From 0dac7cbb092c804f1548c4a60f15ac29a7db06b9 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Sun, 12 Feb 2017 17:24:45 +0100 Subject: [PATCH 002/225] [hotstar] improve extraction(closes #12096) - extract all qualities - detect drm protected videos - extract more metadata --- youtube_dl/extractor/hotstar.py | 46 +++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/youtube_dl/extractor/hotstar.py b/youtube_dl/extractor/hotstar.py index f05d765d6..3a7a66a34 100644 --- a/youtube_dl/extractor/hotstar.py +++ b/youtube_dl/extractor/hotstar.py @@ -34,11 +34,9 @@ class HotStarIE(InfoExtractor): 'only_matching': True, }] - _GET_CONTENT_TEMPLATE = 'http://account.hotstar.com/AVS/besc?action=GetAggregatedContentDetails&channel=PCTV&contentId=%s' - _GET_CDN_TEMPLATE = 'http://getcdn.hotstar.com/AVS/besc?action=GetCDN&asJson=Y&channel=%s&id=%s&type=%s' - - def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True): - json_data = super(HotStarIE, self)._download_json(url_or_request, video_id, note, fatal=fatal) + def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True, query=None): + json_data = super(HotStarIE, self)._download_json( + url_or_request, video_id, note, fatal=fatal, query=query) if json_data['resultCode'] != 'OK': if fatal: raise ExtractorError(json_data['errorDescription']) @@ -48,20 +46,37 @@ class HotStarIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( - self._GET_CONTENT_TEMPLATE % video_id, - video_id)['contentInfo'][0] + 'http://account.hotstar.com/AVS/besc', video_id, query={ + 'action': 'GetAggregatedContentDetails', + 'channel': 'PCTV', + 'contentId': video_id, + })['contentInfo'][0] + title = video_data['episodeTitle'] + + if video_data.get('encrypted') == 'Y': + raise ExtractorError('This video is DRM protected.', expected=True) formats = [] - # PCTV for extracting f4m manifest - for f in ('TABLET',): + for f in ('JIO',): format_data = self._download_json( - self._GET_CDN_TEMPLATE % (f, video_id, 'VOD'), - video_id, 'Downloading %s JSON metadata' % f, fatal=False) + 'http://getcdn.hotstar.com/AVS/besc', + video_id, 'Downloading %s JSON metadata' % f, + fatal=False, query={ + 'action': 'GetCDN', + 'asJson': 'Y', + 'channel': f, + 'id': video_id, + 'type': 'VOD', + }) if format_data: - format_url = format_data['src'] + format_url = format_data.get('src') + if not format_url: + continue ext = determine_ext(format_url) if ext == 'm3u8': - formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', + m3u8_id='hls', fatal=False)) elif ext == 'f4m': # produce broken files continue @@ -75,9 +90,12 @@ class HotStarIE(InfoExtractor): return { 'id': video_id, - 'title': video_data['episodeTitle'], + 'title': title, 'description': video_data.get('description'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': int_or_none(video_data.get('broadcastDate')), 'formats': formats, + 'episode': title, + 'episode_number': int_or_none(video_data.get('episodeNumber')), + 'series': video_data.get('contentTitle'), } From 1e2c3f61fc952620a52a8a3a79bcd1a6f7d8ecae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Feb 2017 03:33:23 +0700 Subject: [PATCH 003/225] [travis] Separate builds for core and download --- .travis.yml | 7 ++++++- devscripts/run_tests.sh | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 devscripts/run_tests.sh diff --git a/.travis.yml b/.travis.yml index 4833c76e9..8ba93ec02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,12 @@ python: - "3.5" - "3.6" sudo: false -script: nosetests test --verbose +env: + - YTDL_TEST_SET=core + - YTDL_TEST_SET=download +before_script: + - chmod +x ./devscripts/run_tests.sh +script: ./devscripts/run_tests.sh notifications: email: - filippo.valsorda@gmail.com diff --git a/devscripts/run_tests.sh b/devscripts/run_tests.sh new file mode 100644 index 000000000..7f4c1e083 --- /dev/null +++ b/devscripts/run_tests.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter" + +test_set="" + +case "$YTDL_TEST_SET" in + core) + test_set="-I test_($DOWNLOAD_TESTS)\.py" + ;; + download) + test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py" + ;; + *) + break + ;; +esac + +nosetests test --verbose $test_set From 9dad94185367cdfde0de21cd8e595094cbe31acc Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Mon, 13 Feb 2017 11:43:20 +0100 Subject: [PATCH 004/225] [disney] improve extraction - add support for more urls - detect expired videos - skip Adobe Flash Access protected videos closes #4975 closes #11000 closes #11882 closes #11936 --- youtube_dl/extractor/disney.py | 60 ++++++++++++++++++++++++++++----- youtube_dl/extractor/generic.py | 13 ------- 2 files changed, 52 insertions(+), 21 deletions(-) diff --git a/youtube_dl/extractor/disney.py b/youtube_dl/extractor/disney.py index 396873c6d..939d1338c 100644 --- a/youtube_dl/extractor/disney.py +++ b/youtube_dl/extractor/disney.py @@ -9,13 +9,15 @@ from ..utils import ( unified_strdate, compat_str, determine_ext, + ExtractorError, ) class DisneyIE(InfoExtractor): _VALID_URL = r'''(?x) - https?://(?P(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr)|starwars\.com))/(?:embed/|(?:[^/]+/)+[\w-]+-)(?P[a-z0-9]{24})''' + https?://(?P(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P[a-z0-9]{24})|(?:[^/]+/)?(?P[^/?#]+))''' _TESTS = [{ + # Disney.EmbedVideo 'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977', 'info_dict': { 'id': '545ed1857afee5a0ec239977', @@ -28,6 +30,20 @@ class DisneyIE(InfoExtractor): # m3u8 download 'skip_download': True, } + }, { + # Grill.burger + 'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette', + 'info_dict': { + 'id': '5454e9f4e9804a552e3524c8', + 'ext': 'mp4', + 'title': '"Intro" Featurette: Rogue One: A Star Wars Story', + 'upload_date': '20170104', + 'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.', + }, + 'params': { + # m3u8 download + 'skip_download': True, + } }, { 'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2', 'only_matching': True, @@ -43,31 +59,55 @@ class DisneyIE(InfoExtractor): }, { 'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097', 'only_matching': True, + }, { + 'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677', + 'only_matching': True, + }, { + 'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1', + 'only_matching': True, + }, { + 'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo', + 'only_matching': True, + }, { + 'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue', + 'only_matching': True, }] def _real_extract(self, url): - domain, video_id = re.match(self._VALID_URL, url).groups() - webpage = self._download_webpage( - 'http://%s/embed/%s' % (domain, video_id), video_id) - video_data = self._parse_json(self._search_regex( - r'Disney\.EmbedVideo=({.+});', webpage, 'embed data'), video_id)['video'] + domain, video_id, display_id = re.match(self._VALID_URL, url).groups() + if not video_id: + webpage = self._download_webpage(url, display_id) + grill = re.sub(r'"\s*\+\s*"', '', self._search_regex( + r'Grill\.burger\s*=\s*({.+})\s*:', + webpage, 'grill data')) + page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video') + video_data = page_data['data'][0] + else: + webpage = self._download_webpage( + 'http://%s/embed/%s' % (domain, video_id), video_id) + page_data = self._parse_json(self._search_regex( + r'Disney\.EmbedVideo\s*=\s*({.+});', + webpage, 'embed data'), video_id) + video_data = page_data['video'] for external in video_data.get('externals', []): if external.get('source') == 'vevo': return self.url_result('vevo:' + external['data_id'], 'Vevo') + video_id = video_data['id'] title = video_data['title'] formats = [] for flavor in video_data.get('flavors', []): flavor_format = flavor.get('format') flavor_url = flavor.get('url') - if not flavor_url or not re.match(r'https?://', flavor_url): + if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access': continue tbr = int_or_none(flavor.get('bitrate')) if tbr == 99999: formats.extend(self._extract_m3u8_formats( - flavor_url, video_id, 'mp4', m3u8_id=flavor_format, fatal=False)) + flavor_url, video_id, 'mp4', + m3u8_id=flavor_format, fatal=False)) continue format_id = [] if flavor_format: @@ -88,6 +128,10 @@ class DisneyIE(InfoExtractor): 'ext': ext, 'vcodec': 'none' if (width == 0 and height == 0) else None, }) + if not formats and video_data.get('expired'): + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']), + expected=True) self._sort_formats(formats) subtitles = {} diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 1c233f038..494cc3c84 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -991,19 +991,6 @@ class GenericIE(InfoExtractor): 'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014', }, }, - # Kaltura embed protected with referrer - { - 'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero', - 'info_dict': { - 'id': '1_g4fbemnq', - 'ext': 'mp4', - 'title': 'Violetta - Achter De Schermen - Ruggero', - 'description': 'Achter de schermen met Ruggero', - 'timestamp': 1435133761, - 'upload_date': '20150624', - 'uploader_id': 'echojecka', - }, - }, # Kaltura embed with single quotes { 'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY', From 1de9f78e71214e130b5882662cdcd716b737e6ca Mon Sep 17 00:00:00 2001 From: Sergey M Date: Mon, 13 Feb 2017 18:56:05 +0800 Subject: [PATCH 005/225] [travis] Separate builds for core and download --- .travis.yml | 7 ++++++- devscripts/run_tests.sh | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 devscripts/run_tests.sh diff --git a/.travis.yml b/.travis.yml index 4833c76e9..8ba93ec02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,12 @@ python: - "3.5" - "3.6" sudo: false -script: nosetests test --verbose +env: + - YTDL_TEST_SET=core + - YTDL_TEST_SET=download +before_script: + - chmod +x ./devscripts/run_tests.sh +script: ./devscripts/run_tests.sh notifications: email: - filippo.valsorda@gmail.com diff --git a/devscripts/run_tests.sh b/devscripts/run_tests.sh new file mode 100644 index 000000000..7f4c1e083 --- /dev/null +++ b/devscripts/run_tests.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter" + +test_set="" + +case "$YTDL_TEST_SET" in + core) + test_set="-I test_($DOWNLOAD_TESTS)\.py" + ;; + download) + test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py" + ;; + *) + break + ;; +esac + +nosetests test --verbose $test_set From 454e5cdb17dd4e77f3d387045b083f3d3ed61ae0 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Mon, 13 Feb 2017 14:28:30 +0100 Subject: [PATCH 006/225] [limelight] add support referer protected videos --- youtube_dl/extractor/generic.py | 9 ++++++--- youtube_dl/extractor/limelight.py | 23 ++++++++++++++++------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 494cc3c84..a2b0298ec 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -2337,8 +2337,9 @@ class GenericIE(InfoExtractor): 'Channel': 'channel', 'ChannelList': 'channel_list', } - return self.url_result('limelight:%s:%s' % ( - lm[mobj.group(1)], mobj.group(2)), 'Limelight%s' % mobj.group(1), mobj.group(2)) + return self.url_result(smuggle_url('limelight:%s:%s' % ( + lm[mobj.group(1)], mobj.group(2)), {'source_url': url}), + 'Limelight%s' % mobj.group(1), mobj.group(2)) mobj = re.search( r'''(?sx) @@ -2348,7 +2349,9 @@ class GenericIE(InfoExtractor): value=(["\'])(?:(?!\3).)*mediaId=(?P[a-z0-9]{32}) ''', webpage) if mobj: - return self.url_result('limelight:media:%s' % mobj.group('id')) + return self.url_result(smuggle_url( + 'limelight:media:%s' % mobj.group('id'), + {'source_url': url}), 'LimelightMedia', mobj.group('id')) # Look for AdobeTVVideo embeds mobj = re.search( diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py index e635f3c4d..a3712665b 100644 --- a/youtube_dl/extractor/limelight.py +++ b/youtube_dl/extractor/limelight.py @@ -8,6 +8,7 @@ from ..utils import ( determine_ext, float_or_none, int_or_none, + unsmuggle_url, ) @@ -15,20 +16,23 @@ class LimelightBaseIE(InfoExtractor): _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json' - def _call_playlist_service(self, item_id, method, fatal=True): + def _call_playlist_service(self, item_id, method, fatal=True, referer=None): + headers = {} + if referer: + headers['Referer'] = referer return self._download_json( self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), - item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal) + item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal, headers=headers) def _call_api(self, organization_id, item_id, method): return self._download_json( self._API_URL % (organization_id, self._API_PATH, item_id, method), item_id, 'Downloading API %s JSON' % method) - def _extract(self, item_id, pc_method, mobile_method, meta_method): - pc = self._call_playlist_service(item_id, pc_method) + def _extract(self, item_id, pc_method, mobile_method, meta_method, referer=None): + pc = self._call_playlist_service(item_id, pc_method, referer=referer) metadata = self._call_api(pc['orgId'], item_id, meta_method) - mobile = self._call_playlist_service(item_id, mobile_method, fatal=False) + mobile = self._call_playlist_service(item_id, mobile_method, fatal=False, referer=referer) return pc, mobile, metadata def _extract_info(self, streams, mobile_urls, properties): @@ -207,10 +211,13 @@ class LimelightMediaIE(LimelightBaseIE): _API_PATH = 'media' def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) pc, mobile, metadata = self._extract( - video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties') + video_id, 'getPlaylistByMediaId', + 'getMobilePlaylistByMediaId', 'properties', + smuggled_data.get('source_url')) return self._extract_info( pc['playlistItems'][0].get('streams', []), @@ -247,11 +254,13 @@ class LimelightChannelIE(LimelightBaseIE): _API_PATH = 'channels' def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) channel_id = self._match_id(url) pc, mobile, medias = self._extract( channel_id, 'getPlaylistByChannelId', - 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media') + 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', + 'media', smuggled_data.get('source_url')) entries = [ self._extract_info( From 89c6691f9d130ec63552a6ece4743caa572fc962 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Mon, 13 Feb 2017 15:08:48 +0100 Subject: [PATCH 007/225] [bellmedia] accept longer video id(closes #12114) --- youtube_dl/extractor/bellmedia.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/bellmedia.py b/youtube_dl/extractor/bellmedia.py index 32326ed9e..1f5b6ed92 100644 --- a/youtube_dl/extractor/bellmedia.py +++ b/youtube_dl/extractor/bellmedia.py @@ -24,7 +24,7 @@ class BellMediaIE(InfoExtractor): space )\.ca| much\.com - )/.*?(?:\bvid=|-vid|~|%7E|/(?:episode)?)(?P[0-9]{6})''' + )/.*?(?:\bvid=|-vid|~|%7E|/(?:episode)?)(?P[0-9]{6,})''' _TESTS = [{ 'url': 'http://www.ctv.ca/video/player?vid=706966', 'md5': 'ff2ebbeae0aa2dcc32a830c3fd69b7b0', @@ -55,6 +55,9 @@ class BellMediaIE(InfoExtractor): }, { 'url': 'http://www.much.com/shows/the-almost-impossible-gameshow/928979/episode-6', 'only_matching': True, + }, { + 'url': 'http://www.ctv.ca/DCs-Legends-of-Tomorrow/Video/S2E11-Turncoat-vid1051430', + 'only_matching': True, }] _DOMAINS = { 'thecomedynetwork': 'comedy', From 6e5956e6ba32c5e4d186e79fbaff0842818ae56b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 13 Feb 2017 23:17:48 +0700 Subject: [PATCH 008/225] [lemonde] Fallback delegate extraction to generic extractor (closes #12115, closes #12116) --- youtube_dl/extractor/lemonde.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/lemonde.py b/youtube_dl/extractor/lemonde.py index 42568f315..3306892e8 100644 --- a/youtube_dl/extractor/lemonde.py +++ b/youtube_dl/extractor/lemonde.py @@ -7,20 +7,40 @@ class LemondeIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P[^/]+)\.html' _TESTS = [{ 'url': 'http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html', - 'md5': '01fb3c92de4c12c573343d63e163d302', + 'md5': 'da120c8722d8632eec6ced937536cc98', 'info_dict': { 'id': 'lqm3kl', 'ext': 'mp4', 'title': "Comprendre l'affaire Bygmalion en 5 minutes", 'thumbnail': r're:^https?://.*\.jpg', - 'duration': 320, + 'duration': 309, 'upload_date': '20160119', 'timestamp': 1453194778, 'uploader_id': '3pmkp', }, + }, { + # standard iframe embed + 'url': 'http://www.lemonde.fr/les-decodeurs/article/2016/10/18/tout-comprendre-du-ceta-le-petit-cousin-du-traite-transatlantique_5015920_4355770.html', + 'info_dict': { + 'id': 'uzsxms', + 'ext': 'mp4', + 'title': "CETA : quelles suites pour l'accord commercial entre l'Europe et le Canada ?", + 'thumbnail': r're:^https?://.*\.jpg', + 'duration': 325, + 'upload_date': '20161021', + 'timestamp': 1477044540, + 'uploader_id': '3pmkp', + }, + 'params': { + 'skip_download': True, + }, }, { 'url': 'http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html', 'only_matching': True, + }, { + # YouTube embeds + 'url': 'http://www.lemonde.fr/pixels/article/2016/12/09/pourquoi-pewdiepie-superstar-de-youtube-a-menace-de-fermer-sa-chaine_5046649_4408996.html', + 'only_matching': True, }] def _real_extract(self, url): @@ -30,5 +50,9 @@ class LemondeIE(InfoExtractor): digiteka_url = self._proto_relative_url(self._search_regex( r'url\s*:\s*(["\'])(?P(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1', - webpage, 'digiteka url', group='url')) - return self.url_result(digiteka_url, 'Digiteka') + webpage, 'digiteka url', group='url', default=None)) + + if digiteka_url: + return self.url_result(digiteka_url, 'Digiteka') + + return self.url_result(url, 'Generic') From f6d6ca1db3020e7c7771880d0c4b58fdf732a8d5 Mon Sep 17 00:00:00 2001 From: Vobe Date: Sat, 11 Feb 2017 21:11:55 +0100 Subject: [PATCH 009/225] [xtube] Improve title extraction --- youtube_dl/extractor/xtube.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 11717fe98..ed3a37649 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -53,7 +53,7 @@ class XTubeIE(InfoExtractor): if not display_id: display_id = video_id - url = 'http://www.xtube.com/video-watch/-%s' % video_id + url = 'http://www.xtube.com/watch.php?v=%s' % video_id req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1; cookiesAccepted=1') @@ -73,7 +73,7 @@ class XTubeIE(InfoExtractor): self._sort_formats(formats) title = self._search_regex( - (r'

(?P[^<]+)</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'), + (r'<h1>\s*(?P<title>[^<]+?)\s*</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'), webpage, 'title', group='title') description = self._search_regex( r'</h1>\s*<p>([^<]+)', webpage, 'description', fatal=False) From 085f169ffebc17ec8b2bfc63aec8f5df57c7bdcf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 13 Feb 2017 23:44:43 +0700 Subject: [PATCH 010/225] [xtube] Fix extraction for both kinds of video id (closes #12088) --- youtube_dl/extractor/xtube.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index ed3a37649..5584674a0 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -44,6 +44,9 @@ class XTubeIE(InfoExtractor): }, { 'url': 'xtube:625837', 'only_matching': True, + }, { + 'url': 'xtube:kVTUy_G222_', + 'only_matching': True, }] def _real_extract(self, url): @@ -53,11 +56,16 @@ class XTubeIE(InfoExtractor): if not display_id: display_id = video_id - url = 'http://www.xtube.com/watch.php?v=%s' % video_id - req = sanitized_Request(url) - req.add_header('Cookie', 'age_verified=1; cookiesAccepted=1') - webpage = self._download_webpage(req, display_id) + if video_id.isdigit() and len(video_id) < 11: + url_pattern = 'http://www.xtube.com/video-watch/-%s' + else: + url_pattern = 'http://www.xtube.com/watch.php?v=%s' + + webpage = self._download_webpage( + url_pattern % video_id, display_id, headers={ + 'Cookie': 'age_verified=1; cookiesAccepted=1', + }) sources = self._parse_json(self._search_regex( r'(["\'])sources\1\s*:\s*(?P<sources>{.+?}),', From 50de3dbad39d0b8cc1529113894f146f6f3f24b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 14 Feb 2017 01:00:06 +0700 Subject: [PATCH 011/225] [zdf] Fix extraction (closes #12117) --- youtube_dl/extractor/zdf.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index a365923fb..523bb5c95 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -20,9 +20,9 @@ from ..utils import ( class ZDFBaseIE(InfoExtractor): - def _call_api(self, url, player, referrer, video_id): + def _call_api(self, url, player, referrer, video_id, item): return self._download_json( - url, video_id, 'Downloading JSON content', + url, video_id, 'Downloading JSON %s' % item, headers={ 'Referer': referrer, 'Api-Auth': 'Bearer %s' % player['apiToken'], @@ -104,7 +104,7 @@ class ZDFIE(ZDFBaseIE): }) formats.append(f) - def _extract_entry(self, url, content, video_id): + def _extract_entry(self, url, player, content, video_id): title = content.get('title') or content['teaserHeadline'] t = content['mainVideoContent']['http://zdf.de/rels/target'] @@ -116,7 +116,8 @@ class ZDFIE(ZDFBaseIE): 'http://zdf.de/rels/streams/ptmd-template'].replace( '{playerId}', 'portal') - ptmd = self._download_json(urljoin(url, ptmd_path), video_id) + ptmd = self._call_api( + urljoin(url, ptmd_path), player, url, video_id, 'metadata') formats = [] track_uris = set() @@ -174,8 +175,9 @@ class ZDFIE(ZDFBaseIE): } def _extract_regular(self, url, player, video_id): - content = self._call_api(player['content'], player, url, video_id) - return self._extract_entry(player['content'], content, video_id) + content = self._call_api( + player['content'], player, url, video_id, 'content') + return self._extract_entry(player['content'], player, content, video_id) def _extract_mobile(self, video_id): document = self._download_json( From cedf08ff54d192a0e32ecb3b943f50299cda7ea2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 14 Feb 2017 01:07:35 +0700 Subject: [PATCH 012/225] [ChangeLog] Actualize --- ChangeLog | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index 089449dfb..d651f8880 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,7 +4,17 @@ Core * TypeError is fixed with Python 2.7.13 on Windows (#11540, #12085) Extractor +* [zdf] Fix extraction (#12117) +* [xtube] Fix extraction for both kinds of video id (#12088) +* [xtube] Improve title extraction (#12088) ++ [lemonde] Fallback delegate extraction to generic extractor (#12115, #12116) +* [bellmedia] Allow video id longer than 6 characters (#12114) ++ [limelight] Add support for referer protected videos +* [disney] Improve extraction (#4975, #11000, #11882, #11936) +* [hotstar] Improve extraction (#12096) * [einthusan] Fix extraction (#11416) ++ [aenetworks] Add support for lifetimemovieclub.com (#12097) +* [youtube] Fix parsing codecs (#12091) version 2017.02.11 From 58a65ba852443075fe38a3ef74798de05dd57bda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 14 Feb 2017 01:09:18 +0700 Subject: [PATCH 013/225] release 2017.02.14 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- youtube_dl/version.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 7bd301cc8..32aa55d83 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.11*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.11** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.14*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.14** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.11 +[debug] youtube-dl version 2017.02.14 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index d651f8880..9242b3eee 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.14 Core * TypeError is fixed with Python 2.7.13 on Windows (#11540, #12085) diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 1f84acfea..3e7e7c0bf 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.11' +__version__ = '2017.02.14' From fcca0d53a8fa47614a39a433a3da7d1ab1d88ed9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Caletka?= <ondrej@caletka.cz> Date: Tue, 14 Feb 2017 15:57:17 +0100 Subject: [PATCH 014/225] [ceskatelevize] Quick fix to revert to using old HLS-based playlist This fixes recent changes in iVysilani. Proper patch should migrate to MPEG-DASH version, which is now the default. --- youtube_dl/extractor/ceskatelevize.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index 4f88c31ad..0f1453b99 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -21,10 +21,10 @@ class CeskaTelevizeIE(InfoExtractor): _TESTS = [{ 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220', 'info_dict': { - 'id': '61924494876951776', + 'id': '61924494877246241', 'ext': 'mp4', - 'title': 'Hyde Park Civilizace', - 'description': 'md5:fe93f6eda372d150759d11644ebbfb4a', + 'title': 'Hyde Park Civilizace: Život v Grónsku', + 'description': 'md5:3fec8f6bb497be5cdb0c9e8781076626', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 3350, }, @@ -121,6 +121,7 @@ class CeskaTelevizeIE(InfoExtractor): req.add_header('Content-type', 'application/x-www-form-urlencoded') req.add_header('x-addr', '127.0.0.1') req.add_header('X-Requested-With', 'XMLHttpRequest') + req.add_header('User-agent', 'Mozilla/5.0') req.add_header('Referer', url) playlistpage = self._download_json(req, playlist_id) From 5cb2d36c82abf3b753910afe3013b274e31a247a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 14 Feb 2017 22:56:39 +0700 Subject: [PATCH 015/225] [ceskatelevize] Extract DASH formats (closes #12119, closes #12133) --- youtube_dl/extractor/ceskatelevize.py | 142 +++++++++++++++----------- 1 file changed, 83 insertions(+), 59 deletions(-) diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index 0f1453b99..e08bf264c 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -13,6 +13,7 @@ from ..utils import ( float_or_none, sanitized_Request, urlencode_postdata, + USER_AGENTS, ) @@ -114,71 +115,94 @@ class CeskaTelevizeIE(InfoExtractor): 'requestSource': 'iVysilani', } - req = sanitized_Request( - 'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', - data=urlencode_postdata(data)) - - req.add_header('Content-type', 'application/x-www-form-urlencoded') - req.add_header('x-addr', '127.0.0.1') - req.add_header('X-Requested-With', 'XMLHttpRequest') - req.add_header('User-agent', 'Mozilla/5.0') - req.add_header('Referer', url) - - playlistpage = self._download_json(req, playlist_id) - - playlist_url = playlistpage['url'] - if playlist_url == 'error_region': - raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) - - req = sanitized_Request(compat_urllib_parse_unquote(playlist_url)) - req.add_header('Referer', url) - - playlist_title = self._og_search_title(webpage, default=None) - playlist_description = self._og_search_description(webpage, default=None) - - playlist = self._download_json(req, playlist_id)['playlist'] - playlist_len = len(playlist) - entries = [] - for item in playlist: - is_live = item.get('type') == 'LIVE' - formats = [] - for format_id, stream_url in item['streamUrls'].items(): - formats.extend(self._extract_m3u8_formats( - stream_url, playlist_id, 'mp4', - entry_protocol='m3u8' if is_live else 'm3u8_native', - fatal=False)) - self._sort_formats(formats) - item_id = item.get('id') or item['assetId'] - title = item['title'] + for user_agent in (None, USER_AGENTS['Safari']): + req = sanitized_Request( + 'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', + data=urlencode_postdata(data)) - duration = float_or_none(item.get('duration')) - thumbnail = item.get('previewImageUrl') + req.add_header('Content-type', 'application/x-www-form-urlencoded') + req.add_header('x-addr', '127.0.0.1') + req.add_header('X-Requested-With', 'XMLHttpRequest') + if user_agent: + req.add_header('User-Agent', user_agent) + req.add_header('Referer', url) - subtitles = {} - if item.get('type') == 'VOD': - subs = item.get('subtitles') - if subs: - subtitles = self.extract_subtitles(episode_id, subs) + playlistpage = self._download_json(req, playlist_id, fatal=False) - if playlist_len == 1: - final_title = playlist_title or title - if is_live: - final_title = self._live_title(final_title) - else: - final_title = '%s (%s)' % (playlist_title, title) + if not playlistpage: + continue - entries.append({ - 'id': item_id, - 'title': final_title, - 'description': playlist_description if playlist_len == 1 else None, - 'thumbnail': thumbnail, - 'duration': duration, - 'formats': formats, - 'subtitles': subtitles, - 'is_live': is_live, - }) + playlist_url = playlistpage['url'] + if playlist_url == 'error_region': + raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) + + req = sanitized_Request(compat_urllib_parse_unquote(playlist_url)) + req.add_header('Referer', url) + + playlist_title = self._og_search_title(webpage, default=None) + playlist_description = self._og_search_description(webpage, default=None) + + playlist = self._download_json(req, playlist_id, fatal=False) + if not playlist: + continue + + playlist = playlist.get('playlist') + if not isinstance(playlist, list): + continue + + playlist_len = len(playlist) + + for num, item in enumerate(playlist): + is_live = item.get('type') == 'LIVE' + formats = [] + for format_id, stream_url in item.get('streamUrls', {}).items(): + if 'playerType=flash' in stream_url: + formats.extend(self._extract_m3u8_formats( + stream_url, playlist_id, 'mp4', + entry_protocol='m3u8' if is_live else 'm3u8_native', + fatal=False)) + else: + formats.extend(self._extract_mpd_formats( + stream_url, playlist_id, fatal=False)) + + if user_agent and len(entries) == playlist_len: + entries[num]['formats'].extend(formats) + continue + + item_id = item.get('id') or item['assetId'] + title = item['title'] + + duration = float_or_none(item.get('duration')) + thumbnail = item.get('previewImageUrl') + + subtitles = {} + if item.get('type') == 'VOD': + subs = item.get('subtitles') + if subs: + subtitles = self.extract_subtitles(episode_id, subs) + + if playlist_len == 1: + final_title = playlist_title or title + if is_live: + final_title = self._live_title(final_title) + else: + final_title = '%s (%s)' % (playlist_title, title) + + entries.append({ + 'id': item_id, + 'title': final_title, + 'description': playlist_description if playlist_len == 1 else None, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + 'is_live': is_live, + }) + + for e in entries: + self._sort_formats(e['formats']) return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) From 9a372f14b422de15acf91e25a90375688b2ba3fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 14 Feb 2017 23:52:41 +0700 Subject: [PATCH 016/225] [pornhub] Extract video URL from tv platform site (#12007, #12129) --- youtube_dl/extractor/pornhub.py | 44 ++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py index 818d99c1f..7a2737032 100644 --- a/youtube_dl/extractor/pornhub.py +++ b/youtube_dl/extractor/pornhub.py @@ -2,27 +2,27 @@ from __future__ import unicode_literals import itertools -import os +# import os import re from .common import InfoExtractor from ..compat import ( compat_HTTPError, - compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, - compat_urllib_parse_urlparse, + # compat_urllib_parse_unquote, + # compat_urllib_parse_unquote_plus, + # compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, int_or_none, js_to_json, orderedSet, - sanitized_Request, + # sanitized_Request, str_to_int, ) -from ..aes import ( - aes_decrypt_text -) +# from ..aes import ( +# aes_decrypt_text +# ) class PornHubIE(InfoExtractor): @@ -109,10 +109,14 @@ class PornHubIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = sanitized_Request( - 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id) - req.add_header('Cookie', 'age_verified=1') - webpage = self._download_webpage(req, video_id) + def dl_webpage(platform): + return self._download_webpage( + 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id, + video_id, headers={ + 'Cookie': 'age_verified=1; platform=%s' % platform, + }) + + webpage = dl_webpage('pc') error_msg = self._html_search_regex( r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>', @@ -123,10 +127,19 @@ class PornHubIE(InfoExtractor): 'PornHub said: %s' % error_msg, expected=True, video_id=video_id) + tv_webpage = dl_webpage('tv') + + video_url = self._search_regex( + r'<video[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//.+?)\1', tv_webpage, + 'video url', group='url') + + title = self._search_regex( + r'<h1>([^>]+)</h1>', tv_webpage, 'title', default=None) + # video_title from flashvars contains whitespace instead of non-ASCII (see # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying # on that anymore. - title = self._html_search_meta( + title = title or self._html_search_meta( 'twitter:title', webpage, default=None) or self._search_regex( (r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)', r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1', @@ -156,6 +169,7 @@ class PornHubIE(InfoExtractor): comment_count = self._extract_count( r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') + """ video_variables = {} for video_variablename, quote, video_variable in re.findall( r'(player_quality_[0-9]{3,4}p\w+)\s*=\s*(["\'])(.+?)\2;', webpage): @@ -197,6 +211,7 @@ class PornHubIE(InfoExtractor): 'height': height, }) self._sort_formats(formats) + """ page_params = self._parse_json(self._search_regex( r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})', @@ -209,6 +224,7 @@ class PornHubIE(InfoExtractor): return { 'id': video_id, + 'url': video_url, 'uploader': video_uploader, 'title': title, 'thumbnail': thumbnail, @@ -217,7 +233,7 @@ class PornHubIE(InfoExtractor): 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, - 'formats': formats, + # 'formats': formats, 'age_limit': 18, 'tags': tags, 'categories': categories, From 22ce9ad2bdad2bf79b22f82cfff7f58156c9d349 Mon Sep 17 00:00:00 2001 From: Marek Rusinowski <marekrusinowski@gmail.com> Date: Mon, 13 Feb 2017 21:42:26 +0100 Subject: [PATCH 017/225] [vod.pl] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vodpl.py | 36 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 youtube_dl/extractor/vodpl.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 76ad7c40b..657e45e6f 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1147,6 +1147,7 @@ from .vlive import ( VLiveChannelIE ) from .vodlocker import VodlockerIE +from .vodpl import VODPlIE from .vodplatform import VODPlatformIE from .voicerepublic import VoiceRepublicIE from .voxmedia import VoxMediaIE diff --git a/youtube_dl/extractor/vodpl.py b/youtube_dl/extractor/vodpl.py new file mode 100644 index 000000000..f612347ce --- /dev/null +++ b/youtube_dl/extractor/vodpl.py @@ -0,0 +1,36 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .onet import OnetBaseIE +from ..utils import clean_html + + +class VODPlIE(OnetBaseIE): + _VALID_URL = r'https?://vod\.pl/(?:.*/)?(?P<id>[0-9a-zA-Z]+)' + + _TEST = { + 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', + 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', + 'info_dict': { + 'id': '3ep3jns', + 'ext': 'mp4', + 'title': 'Chłopaki nie płaczą', + 'description': 'Kuba Brenner aby pomóc swojemu nieśmiałemu przyjacielowi Oskarowi wynajmuje w agencji towarzyskiej dwie panie. Po upojnej nocy okazuje się, że chłopcy nie byli przygotowani finansowo. "Opiekun artystyczny" dziewczyn zabiera w ramach rekompensaty drogocenną rzeźbę należącą do wujka Oskara. Kłopoty chłopców zaczynają się, gdy Kuba udaje się do agencji aby wykupić figurkę i trafia w sam środek mafijnej transakcji... Idiotyczny przypadek sprawia, że w klubie dochodzi do strzelaniny podczas której Grucha i Bolec zostają ranni, ginie również walizka z pieniędzmi... Podejrzenie pada na Kubę.', + 'timestamp': 1463415154, + 'duration': 5765, + 'upload_date': '20160516', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + mvp_id = self._search_mvp_id(webpage) + + info_dict = self._extract_from_id(mvp_id, webpage) + info_dict.update({ + 'id': video_id, + 'description': clean_html(info_dict['description']).strip().replace('\r', '\n') + }) + + return info_dict From 6092ccd05844976ea946ba5277f2b00ccb5c7920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 00:52:31 +0700 Subject: [PATCH 018/225] [vodpl] Make more robust and add another test (closes #12122) --- youtube_dl/extractor/vodpl.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/youtube_dl/extractor/vodpl.py b/youtube_dl/extractor/vodpl.py index f612347ce..9e919708e 100644 --- a/youtube_dl/extractor/vodpl.py +++ b/youtube_dl/extractor/vodpl.py @@ -2,35 +2,31 @@ from __future__ import unicode_literals from .onet import OnetBaseIE -from ..utils import clean_html class VODPlIE(OnetBaseIE): - _VALID_URL = r'https?://vod\.pl/(?:.*/)?(?P<id>[0-9a-zA-Z]+)' + _VALID_URL = r'https?://vod\.pl/(?:[^/]+/)+(?P<id>[0-9a-zA-Z]+)' - _TEST = { + _TESTS = [{ 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', 'info_dict': { 'id': '3ep3jns', 'ext': 'mp4', 'title': 'Chłopaki nie płaczą', - 'description': 'Kuba Brenner aby pomóc swojemu nieśmiałemu przyjacielowi Oskarowi wynajmuje w agencji towarzyskiej dwie panie. Po upojnej nocy okazuje się, że chłopcy nie byli przygotowani finansowo. "Opiekun artystyczny" dziewczyn zabiera w ramach rekompensaty drogocenną rzeźbę należącą do wujka Oskara. Kłopoty chłopców zaczynają się, gdy Kuba udaje się do agencji aby wykupić figurkę i trafia w sam środek mafijnej transakcji... Idiotyczny przypadek sprawia, że w klubie dochodzi do strzelaniny podczas której Grucha i Bolec zostają ranni, ginie również walizka z pieniędzmi... Podejrzenie pada na Kubę.', + 'description': 'md5:f5f03b84712e55f5ac9f0a3f94445224', 'timestamp': 1463415154, 'duration': 5765, 'upload_date': '20160516', }, - } + }, { + 'url': 'https://vod.pl/seriale/belfer-na-planie-praca-kamery-online/2c10heh', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - mvp_id = self._search_mvp_id(webpage) - - info_dict = self._extract_from_id(mvp_id, webpage) - info_dict.update({ - 'id': video_id, - 'description': clean_html(info_dict['description']).strip().replace('\r', '\n') - }) - + info_dict = self._extract_from_id(self._search_mvp_id(webpage), webpage) + info_dict['id'] = video_id return info_dict From d31aa74fdb3f69071ba869feba03525f67e974f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 00:58:18 +0700 Subject: [PATCH 019/225] [onetmvp] Add shortcut extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/onet.py | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 657e45e6f..b2ee0c1b0 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -694,6 +694,7 @@ from .ondemandkorea import OnDemandKoreaIE from .onet import ( OnetIE, OnetChannelIE, + OnetMVPIE, ) from .onionstudios import OnionStudiosIE from .ooyala import ( diff --git a/youtube_dl/extractor/onet.py b/youtube_dl/extractor/onet.py index 0a501b3e5..46bad492a 100644 --- a/youtube_dl/extractor/onet.py +++ b/youtube_dl/extractor/onet.py @@ -23,7 +23,7 @@ class OnetBaseIE(InfoExtractor): return self._search_regex( r'id=(["\'])mvp:(?P<id>.+?)\1', webpage, 'mvp id', group='id') - def _extract_from_id(self, video_id, webpage): + def _extract_from_id(self, video_id, webpage=None): response = self._download_json( 'http://qi.ckm.onetapi.pl/', video_id, query={ @@ -74,8 +74,10 @@ class OnetBaseIE(InfoExtractor): meta = video.get('meta', {}) - title = self._og_search_title(webpage, default=None) or meta['title'] - description = self._og_search_description(webpage, default=None) or meta.get('description') + title = (self._og_search_title( + webpage, default=None) if webpage else None) or meta['title'] + description = (self._og_search_description( + webpage, default=None) if webpage else None) or meta.get('description') duration = meta.get('length') or meta.get('lenght') timestamp = parse_iso8601(meta.get('addDate'), ' ') @@ -89,6 +91,18 @@ class OnetBaseIE(InfoExtractor): } +class OnetMVPIE(OnetBaseIE): + _VALID_URL = r'onetmvp:(?P<id>\d+\.\d+)' + + _TEST = { + 'url': 'onetmvp:381027.1509591944', + 'only_matching': True, + } + + def _real_extract(self, url): + return self._extract_from_id(self._match_id(url)) + + class OnetIE(OnetBaseIE): _VALID_URL = r'https?://(?:www\.)?onet\.tv/[a-z]/[a-z]+/(?P<display_id>[0-9a-z-]+)/(?P<id>[0-9a-z]+)' IE_NAME = 'onet.tv' From 43a3d9edfcdad8eb33758c4a7f4f912322001b8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 01:14:06 +0700 Subject: [PATCH 020/225] [onetpl] Add support for onet.pl (closes #10507) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/onet.py | 32 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b2ee0c1b0..be3688d5a 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -695,6 +695,7 @@ from .onet import ( OnetIE, OnetChannelIE, OnetMVPIE, + OnetPlIE, ) from .onionstudios import OnionStudiosIE from .ooyala import ( diff --git a/youtube_dl/extractor/onet.py b/youtube_dl/extractor/onet.py index 46bad492a..801aadbff 100644 --- a/youtube_dl/extractor/onet.py +++ b/youtube_dl/extractor/onet.py @@ -181,3 +181,35 @@ class OnetChannelIE(OnetBaseIE): channel_title = strip_or_none(get_element_by_class('o_channelName', webpage)) channel_description = strip_or_none(get_element_by_class('o_channelDesc', webpage)) return self.playlist_result(entries, channel_id, channel_title, channel_description) + + +class OnetPlIE(InfoExtractor): + _VALID_URL = r'https?://(?:[^/]+\.)?onet\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)' + IE_NAME = 'onet.pl' + + _TESTS = [{ + 'url': 'http://eurosport.onet.pl/zimowe/skoki-narciarskie/ziobro-wygral-kwalifikacje-w-pjongczangu/9ckrly', + 'md5': 'b94021eb56214c3969380388b6e73cb0', + 'info_dict': { + 'id': '1561707.1685479', + 'ext': 'mp4', + 'title': 'Ziobro wygrał kwalifikacje w Pjongczangu', + 'description': 'md5:61fb0740084d2d702ea96512a03585b4', + 'upload_date': '20170214', + 'timestamp': 1487078046, + }, + }, { + 'url': 'http://film.onet.pl/zwiastuny/ghost-in-the-shell-drugi-zwiastun-pl/5q6yl3', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + mvp_id = self._search_regex( + r'data-params-mvp=["\'](\d+\.\d+)', webpage, 'mvp id') + + return self.url_result( + 'onetmvp:%s' % mvp_id, OnetMVPIE.ie_key(), video_id=mvp_id) From 04a741232f8e03cc91a3539066c66aed802076b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 01:23:55 +0700 Subject: [PATCH 021/225] [onetpl] Add support for businessinsider.com.pl and plejada.pl --- youtube_dl/extractor/onet.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/onet.py b/youtube_dl/extractor/onet.py index 801aadbff..94f57990b 100644 --- a/youtube_dl/extractor/onet.py +++ b/youtube_dl/extractor/onet.py @@ -184,7 +184,7 @@ class OnetChannelIE(OnetBaseIE): class OnetPlIE(InfoExtractor): - _VALID_URL = r'https?://(?:[^/]+\.)?onet\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)' + _VALID_URL = r'https?://(?:[^/]+\.)?(?:onet|businessinsider\.com|plejada)\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)' IE_NAME = 'onet.pl' _TESTS = [{ @@ -201,6 +201,15 @@ class OnetPlIE(InfoExtractor): }, { 'url': 'http://film.onet.pl/zwiastuny/ghost-in-the-shell-drugi-zwiastun-pl/5q6yl3', 'only_matching': True, + }, { + 'url': 'http://moto.onet.pl/jak-wybierane-sa-miejsca-na-fotoradary/6rs04e', + 'only_matching': True, + }, { + 'url': 'http://businessinsider.com.pl/wideo/scenariusz-na-koniec-swiata-wedlug-nasa/dwnqptk', + 'only_matching': True, + }, { + 'url': 'http://plejada.pl/weronika-rosati-o-swoim-domniemanym-slubie/n2bq89', + 'only_matching': True, }] def _real_extract(self, url): From 3021cf83b7cd45283fd1a72859e46f44e67ce7bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 02:08:32 +0700 Subject: [PATCH 022/225] [pinkbike] Fix uploader extraction (closes #12054) --- youtube_dl/extractor/pinkbike.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/pinkbike.py b/youtube_dl/extractor/pinkbike.py index 6a4580d54..9f3501f77 100644 --- a/youtube_dl/extractor/pinkbike.py +++ b/youtube_dl/extractor/pinkbike.py @@ -64,7 +64,8 @@ class PinkbikeIE(InfoExtractor): 'video:duration', webpage, 'duration')) uploader = self._search_regex( - r'un:\s*"([^"]+)"', webpage, 'uploader', fatal=False) + r'<a[^>]+\brel=["\']author[^>]+>([^<]+)', webpage, + 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'class="fullTime"[^>]+title="([^"]+)"', webpage, 'upload date', fatal=False)) From 1bd05345ea4b91598ec04b8e0d33fd14f9e2eddc Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Wed, 15 Feb 2017 14:18:50 +0100 Subject: [PATCH 023/225] [amcnetworks] fix extraction(closes #12127) --- youtube_dl/extractor/amcnetworks.py | 30 ++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/youtube_dl/extractor/amcnetworks.py b/youtube_dl/extractor/amcnetworks.py index 87c803e94..b71d1a093 100644 --- a/youtube_dl/extractor/amcnetworks.py +++ b/youtube_dl/extractor/amcnetworks.py @@ -53,20 +53,30 @@ class AMCNetworksIE(ThePlatformIE): 'mbr': 'true', 'manifest': 'm3u', } - media_url = self._search_regex(r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)', webpage, 'media url') + media_url = self._search_regex( + r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)', + webpage, 'media url') theplatform_metadata = self._download_theplatform_metadata(self._search_regex( - r'https?://link.theplatform.com/s/([^?]+)', media_url, 'theplatform_path'), display_id) + r'link\.theplatform\.com/s/([^?]+)', + media_url, 'theplatform_path'), display_id) info = self._parse_theplatform_metadata(theplatform_metadata) video_id = theplatform_metadata['pid'] title = theplatform_metadata['title'] rating = theplatform_metadata['ratings'][0]['rating'] - auth_required = self._search_regex(r'window\.authRequired\s*=\s*(true|false);', webpage, 'auth required') + auth_required = self._search_regex( + r'window\.authRequired\s*=\s*(true|false);', + webpage, 'auth required') if auth_required == 'true': - requestor_id = self._search_regex(r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)', webpage, 'requestor id') - resource = self._get_mvpd_resource(requestor_id, title, video_id, rating) - query['auth'] = self._extract_mvpd_auth(url, video_id, requestor_id, resource) + requestor_id = self._search_regex( + r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)', + webpage, 'requestor id') + resource = self._get_mvpd_resource( + requestor_id, title, video_id, rating) + query['auth'] = self._extract_mvpd_auth( + url, video_id, requestor_id, resource) media_url = update_url_query(media_url, query) - formats, subtitles = self._extract_theplatform_smil(media_url, video_id) + formats, subtitles = self._extract_theplatform_smil( + media_url, video_id) self._sort_formats(formats) info.update({ 'id': video_id, @@ -78,9 +88,11 @@ class AMCNetworksIE(ThePlatformIE): if ns_keys: ns = list(ns_keys)[0] series = theplatform_metadata.get(ns + '$show') - season_number = int_or_none(theplatform_metadata.get(ns + '$season')) + season_number = int_or_none( + theplatform_metadata.get(ns + '$season')) episode = theplatform_metadata.get(ns + '$episodeTitle') - episode_number = int_or_none(theplatform_metadata.get(ns + '$episode')) + episode_number = int_or_none( + theplatform_metadata.get(ns + '$episode')) if season_number: title = 'Season %d - %s' % (season_number, title) if series: From db13c16ef8968613680e2bbc85f373c3e74faf98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 23:12:10 +0700 Subject: [PATCH 024/225] [utils] Add support for quoted string literals in --match-filter (closes #8050, closes #12142, closes #12144) --- test/test_YoutubeDL.py | 24 ++++++++++++++++++++++++ youtube_dl/utils.py | 9 +++++++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index 8bf00bea9..d07c35be8 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# coding: utf-8 from __future__ import unicode_literals @@ -606,6 +607,8 @@ class TestYoutubeDL(unittest.TestCase): 'duration': 30, 'filesize': 10 * 1024, 'playlist_id': '42', + 'uploader': "變態妍字幕版 太妍 тест", + 'creator': "тест ' 123 ' тест--", } second = { 'id': '2', @@ -616,6 +619,7 @@ class TestYoutubeDL(unittest.TestCase): 'description': 'foo', 'filesize': 5 * 1024, 'playlist_id': '43', + 'uploader': "тест 123", } videos = [first, second] @@ -656,6 +660,26 @@ class TestYoutubeDL(unittest.TestCase): res = get_videos(f) self.assertEqual(res, ['1']) + f = match_filter_func('uploader = "變態妍字幕版 太妍 тест"') + res = get_videos(f) + self.assertEqual(res, ['1']) + + f = match_filter_func('uploader != "變態妍字幕版 太妍 тест"') + res = get_videos(f) + self.assertEqual(res, ['2']) + + f = match_filter_func('creator = "тест \' 123 \' тест--"') + res = get_videos(f) + self.assertEqual(res, ['1']) + + f = match_filter_func("creator = 'тест \\' 123 \\' тест--'") + res = get_videos(f) + self.assertEqual(res, ['1']) + + f = match_filter_func(r"creator = 'тест \' 123 \' тест--' & duration > 30") + res = get_videos(f) + self.assertEqual(res, []) + def test_playlist_items_selection(self): entries = [{ 'id': compat_str(i), diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 1279a9042..07c07be6f 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -2383,6 +2383,7 @@ def _match_one(filter_part, dct): \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)| + (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)| (?P<strval>(?![0-9.])[a-z0-9A-Z]*) ) \s*$ @@ -2391,7 +2392,8 @@ def _match_one(filter_part, dct): if m: op = COMPARISON_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) - if (m.group('strval') is not None or + if (m.group('quotedstrval') is not None or + m.group('strval') is not None or # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see @@ -2401,7 +2403,10 @@ def _match_one(filter_part, dct): if m.group('op') not in ('=', '!='): raise ValueError( 'Operator %s does not support string values!' % m.group('op')) - comparison_value = m.group('strval') or m.group('intval') + comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval') + quote = m.group('quote') + if quote is not None: + comparison_value = comparison_value.replace(r'\%s' % quote, quote) else: try: comparison_value = int(m.group('intval')) From 398dea321001b99ac4ad28d3d60a5317c4a439d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 23:20:46 +0700 Subject: [PATCH 025/225] [test_YoutubeDL] Fix invalid escape sequences --- test/test_YoutubeDL.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index d07c35be8..2cfcf743a 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -541,10 +541,10 @@ class TestYoutubeDL(unittest.TestCase): self.assertEqual(ydl._format_note({}), '') assertRegexpMatches(self, ydl._format_note({ 'vbr': 10, - }), '^\s*10k$') + }), r'^\s*10k$') assertRegexpMatches(self, ydl._format_note({ 'fps': 30, - }), '^30fps$') + }), r'^30fps$') def test_postprocessors(self): filename = 'post-processor-testfile.mp4' From 099cfdb770f458de7cfdf3e814fbb9f43db217ea Mon Sep 17 00:00:00 2001 From: Anisse Astier <anisse@astier.eu> Date: Wed, 15 Feb 2017 17:28:31 +0100 Subject: [PATCH 026/225] [devscripts/run_tests.sh] Change permission for script to 755 --- .travis.yml | 2 -- devscripts/run_tests.sh | 0 2 files changed, 2 deletions(-) mode change 100644 => 100755 devscripts/run_tests.sh diff --git a/.travis.yml b/.travis.yml index 8ba93ec02..f41e11137 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,8 +11,6 @@ sudo: false env: - YTDL_TEST_SET=core - YTDL_TEST_SET=download -before_script: - - chmod +x ./devscripts/run_tests.sh script: ./devscripts/run_tests.sh notifications: email: diff --git a/devscripts/run_tests.sh b/devscripts/run_tests.sh old mode 100644 new mode 100755 From de4d378c0cd9035d4ab93dc6826a17c76f388641 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 15 Feb 2017 23:38:00 +0700 Subject: [PATCH 027/225] [ceskatelevize] Prefix format ids --- youtube_dl/extractor/ceskatelevize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index e08bf264c..1b16e5aaa 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -162,10 +162,10 @@ class CeskaTelevizeIE(InfoExtractor): formats.extend(self._extract_m3u8_formats( stream_url, playlist_id, 'mp4', entry_protocol='m3u8' if is_live else 'm3u8_native', - fatal=False)) + m3u8_id='hls', fatal=False)) else: formats.extend(self._extract_mpd_formats( - stream_url, playlist_id, fatal=False)) + stream_url, playlist_id, mpd_id='dash', fatal=False)) if user_agent and len(entries) == playlist_len: entries[num]['formats'].extend(formats) From eafaeb226a277008fb8df72bf0326f2b369ff6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 16 Feb 2017 00:04:15 +0700 Subject: [PATCH 028/225] [ceskatelevize] Lower priority for audio description sources (#12119) --- youtube_dl/extractor/ceskatelevize.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index 1b16e5aaa..b1dfacf80 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -159,13 +159,19 @@ class CeskaTelevizeIE(InfoExtractor): formats = [] for format_id, stream_url in item.get('streamUrls', {}).items(): if 'playerType=flash' in stream_url: - formats.extend(self._extract_m3u8_formats( + stream_formats = self._extract_m3u8_formats( stream_url, playlist_id, 'mp4', entry_protocol='m3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False)) + m3u8_id='hls-%s' % format_id, fatal=False) else: - formats.extend(self._extract_mpd_formats( - stream_url, playlist_id, mpd_id='dash', fatal=False)) + stream_formats = self._extract_mpd_formats( + stream_url, playlist_id, + mpd_id='dash-%s' % format_id, fatal=False) + # See https://github.com/rg3/youtube-dl/issues/12119#issuecomment-280037031 + if format_id == 'audioDescription': + for f in stream_formats: + f['source_preference'] = -10 + formats.extend(stream_formats) if user_agent and len(entries) == playlist_len: entries[num]['formats'].extend(formats) From 3aa25395aa02b7a33e0fbf6d38e39fffee268255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 16 Feb 2017 00:08:56 +0700 Subject: [PATCH 029/225] [ChangeLog] Actualize --- ChangeLog | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ChangeLog b/ChangeLog index 9242b3eee..912e1bbdc 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,21 @@ +version <unreleased> + +Core ++ [utils] Add support for quoted string literals in --match-filter (#8050, + #12142, #12144) + +Extractors +* [ceskatelevize] Lower priority for audio description sources (#12119) +* [amcnetworks] Fix extraction (#12127) +* [pinkbike] Fix uploader extraction (#12054) ++ [onetpl] Add support for businessinsider.com.pl and plejada.pl ++ [onetpl] Add support for onet.pl (#10507) ++ [onetmvp] Add shortcut extractor ++ [vodpl] Add support for vod.pl (#12122) ++ [pornhub] Extract video URL from tv platform site (#12007, #12129) ++ [ceskatelevize] Extract DASH formats (#12119, #12133) + + version 2017.02.14 Core From 2480b056c137e514662b70053ec2df1391b6c2ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 16 Feb 2017 00:10:04 +0700 Subject: [PATCH 030/225] release 2017.02.16 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- docs/supportedsites.md | 3 +++ youtube_dl/version.py | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 32aa55d83..06711f73b 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.14*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.14** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.16*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.16** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.14 +[debug] youtube-dl version 2017.02.16 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 912e1bbdc..8ef8a8307 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.16 Core + [utils] Add support for quoted string literals in --match-filter (#8050, diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 3e84f1237..5a436e8f7 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -546,8 +546,10 @@ - **OktoberfestTV** - **on.aol.com** - **OnDemandKorea** + - **onet.pl** - **onet.tv** - **onet.tv:channel** + - **OnetMVP** - **OnionStudios** - **Ooyala** - **OoyalaExternal** @@ -900,6 +902,7 @@ - **vlive** - **vlive:channel** - **Vodlocker** + - **VODPl** - **VODPlatform** - **VoiceRepublic** - **VoxMedia** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 3e7e7c0bf..323e80954 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.14' +__version__ = '2017.02.16' From b898f0a173fa040ddf95dbd97650cec07a8f19f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 16 Feb 2017 04:57:42 +0700 Subject: [PATCH 031/225] [elpais] Fix typo and improve extraction (closes #12139) --- youtube_dl/extractor/elpais.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/youtube_dl/extractor/elpais.py b/youtube_dl/extractor/elpais.py index 99e00cf3c..b89f6db62 100644 --- a/youtube_dl/extractor/elpais.py +++ b/youtube_dl/extractor/elpais.py @@ -39,6 +39,18 @@ class ElPaisIE(InfoExtractor): 'description': 'La nave portaba cientos de ánforas y se hundió cerca de la isla de Cabrera por razones desconocidas', 'upload_date': '20170127', }, + }, { + 'url': 'http://epv.elpais.com/epv/2017/02/14/programa_la_voz_de_inaki/1487062137_075943.html', + 'info_dict': { + 'id': '1487062137_075943', + 'ext': 'mp4', + 'title': 'Disyuntivas', + 'description': 'md5:a0fb1485c4a6a8a917e6f93878e66218', + 'upload_date': '20170214', + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): @@ -59,14 +71,15 @@ class ElPaisIE(InfoExtractor): video_url = prefix + video_suffix thumbnail_suffix = self._search_regex( r"(?:URLMediaStill|urlFotogramaFijo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'", - webpage, 'thumbnail URL', fatal=False) + webpage, 'thumbnail URL', default=None) thumbnail = ( None if thumbnail_suffix is None - else prefix + thumbnail_suffix) + else prefix + thumbnail_suffix) or self._og_search_thumbnail(webpage) title = self._html_search_regex( - (r"tituloVideo\s*=\s*'([^']+)'", webpage, 'title', - r'<h2 class="entry-header entry-title.*?>(.*?)</h2>'), - webpage, 'title') + (r"tituloVideo\s*=\s*'([^']+)'", + r'<h2 class="entry-header entry-title.*?>(.*?)</h2>', + r'<h1[^>]+class="titulo"[^>]*>([^<]+)'), + webpage, 'title', default=None) or self._og_search_title(webpage) upload_date = unified_strdate(self._search_regex( r'<p class="date-header date-int updated"\s+title="([^"]+)">', webpage, 'upload date', default=None) or self._html_search_meta( From a4a554a79354981fcab55de8eaab7b95a40bbb48 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Thu, 16 Feb 2017 23:42:36 +0800 Subject: [PATCH 032/225] [generic] Try parsing JWPlayer embedded videos (closes #12030) --- ChangeLog | 6 ++ youtube_dl/extractor/archiveorg.py | 4 +- youtube_dl/extractor/common.py | 118 ++++++++++++++++++++ youtube_dl/extractor/generic.py | 20 ++++ youtube_dl/extractor/jwplatform.py | 132 +---------------------- youtube_dl/extractor/ondemandkorea.py | 4 +- youtube_dl/extractor/pornhub.py | 44 -------- youtube_dl/extractor/pornoxo.py | 4 +- youtube_dl/extractor/rentv.py | 3 +- youtube_dl/extractor/rudo.py | 4 +- youtube_dl/extractor/screencastomatic.py | 4 +- youtube_dl/extractor/sendtonews.py | 4 +- youtube_dl/extractor/thisav.py | 4 +- youtube_dl/extractor/tvnoe.py | 4 +- youtube_dl/extractor/vidzi.py | 4 +- youtube_dl/extractor/wimp.py | 4 +- 16 files changed, 166 insertions(+), 197 deletions(-) diff --git a/ChangeLog b/ChangeLog index 8ef8a8307..4e69b03d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractors ++ [generic] Support complex JWPlayer embedded videos (#12030) + + version 2017.02.16 Core diff --git a/youtube_dl/extractor/archiveorg.py b/youtube_dl/extractor/archiveorg.py index 486dff82d..e21045bed 100644 --- a/youtube_dl/extractor/archiveorg.py +++ b/youtube_dl/extractor/archiveorg.py @@ -1,13 +1,13 @@ from __future__ import unicode_literals -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( unified_strdate, clean_html, ) -class ArchiveOrgIE(JWPlatformBaseIE): +class ArchiveOrgIE(InfoExtractor): IE_NAME = 'archive.org' IE_DESC = 'archive.org videos' _VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^/?#]+)(?:[?].*)?$' diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 9681453ca..f6ff56eda 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -40,6 +40,7 @@ from ..utils import ( fix_xml_ampersands, float_or_none, int_or_none, + js_to_json, parse_iso8601, RegexNotFoundError, sanitize_filename, @@ -2073,6 +2074,123 @@ class InfoExtractor(object): }) return formats + @staticmethod + def _find_jwplayer_data(webpage): + mobj = re.search( + r'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\s*\((?P<options>[^)]+)\)', + webpage) + if mobj: + return mobj.group('options') + + def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs): + jwplayer_data = self._parse_json( + self._find_jwplayer_data(webpage), video_id, + transform_source=js_to_json) + return self._parse_jwplayer_data( + jwplayer_data, video_id, *args, **kwargs) + + def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True, + m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): + # JWPlayer backward compatibility: flattened playlists + # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96 + if 'playlist' not in jwplayer_data: + jwplayer_data = {'playlist': [jwplayer_data]} + + entries = [] + + # JWPlayer backward compatibility: single playlist item + # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10 + if not isinstance(jwplayer_data['playlist'], list): + jwplayer_data['playlist'] = [jwplayer_data['playlist']] + + for video_data in jwplayer_data['playlist']: + # JWPlayer backward compatibility: flattened sources + # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35 + if 'sources' not in video_data: + video_data['sources'] = [video_data] + + this_video_id = video_id or video_data['mediaid'] + + formats = [] + for source in video_data['sources']: + source_url = self._proto_relative_url(source['file']) + if base_url: + source_url = compat_urlparse.urljoin(base_url, source_url) + source_type = source.get('type') or '' + ext = mimetype2ext(source_type) or determine_ext(source_url) + if source_type == 'hls' or ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + source_url, this_video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False)) + elif ext == 'mpd': + formats.extend(self._extract_mpd_formats( + source_url, this_video_id, mpd_id=mpd_id, fatal=False)) + # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 + elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'): + formats.append({ + 'url': source_url, + 'vcodec': 'none', + 'ext': ext, + }) + else: + height = int_or_none(source.get('height')) + if height is None: + # Often no height is provided but there is a label in + # format like 1080p. + height = int_or_none(self._search_regex( + r'^(\d{3,})[pP]$', source.get('label') or '', + 'height', default=None)) + a_format = { + 'url': source_url, + 'width': int_or_none(source.get('width')), + 'height': height, + 'ext': ext, + } + if source_url.startswith('rtmp'): + a_format['ext'] = 'flv' + + # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as + # of jwplayer.flash.swf + rtmp_url_parts = re.split( + r'((?:mp4|mp3|flv):)', source_url, 1) + if len(rtmp_url_parts) == 3: + rtmp_url, prefix, play_path = rtmp_url_parts + a_format.update({ + 'url': rtmp_url, + 'play_path': prefix + play_path, + }) + if rtmp_params: + a_format.update(rtmp_params) + formats.append(a_format) + self._sort_formats(formats) + + subtitles = {} + tracks = video_data.get('tracks') + if tracks and isinstance(tracks, list): + for track in tracks: + if track.get('kind') != 'captions': + continue + track_url = urljoin(base_url, track.get('file')) + if not track_url: + continue + subtitles.setdefault(track.get('label') or 'en', []).append({ + 'url': self._proto_relative_url(track_url) + }) + + entries.append({ + 'id': this_video_id, + 'title': video_data['title'] if require_title else video_data.get('title'), + 'description': video_data.get('description'), + 'thumbnail': self._proto_relative_url(video_data.get('image')), + 'timestamp': int_or_none(video_data.get('pubdate')), + 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')), + 'subtitles': subtitles, + 'formats': formats, + }) + if len(entries) == 1: + return entries[0] + else: + return self.playlist_result(entries) + def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index a2b0298ec..3db31debe 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -20,6 +20,7 @@ from ..utils import ( float_or_none, HEADRequest, is_html, + js_to_json, orderedSet, sanitized_Request, smuggle_url, @@ -961,6 +962,16 @@ class GenericIE(InfoExtractor): 'skip_download': True, } }, + # Complex jwplayer + { + 'url': 'http://www.indiedb.com/games/king-machine/videos', + 'info_dict': { + 'id': 'videos', + 'ext': 'mp4', + 'title': 'king machine trailer 1', + 'thumbnail': r're:^https?://.*\.jpg$', + }, + }, # rtl.nl embed { 'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen', @@ -2488,6 +2499,15 @@ class GenericIE(InfoExtractor): self._sort_formats(entry['formats']) return self.playlist_result(entries) + jwplayer_data_str = self._find_jwplayer_data(webpage) + if jwplayer_data_str: + try: + jwplayer_data = self._parse_json( + jwplayer_data_str, video_id, transform_source=js_to_json) + return self._parse_jwplayer_data(jwplayer_data, video_id) + except ExtractorError: + pass + def check_video(vurl): if YoutubeIE.suitable(vurl): return True diff --git a/youtube_dl/extractor/jwplatform.py b/youtube_dl/extractor/jwplatform.py index aff7ab49a..33d55f770 100644 --- a/youtube_dl/extractor/jwplatform.py +++ b/youtube_dl/extractor/jwplatform.py @@ -4,139 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_urlparse -from ..utils import ( - determine_ext, - float_or_none, - int_or_none, - js_to_json, - mimetype2ext, - urljoin, -) -class JWPlatformBaseIE(InfoExtractor): - @staticmethod - def _find_jwplayer_data(webpage): - # TODO: Merge this with JWPlayer-related codes in generic.py - - mobj = re.search( - r'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\s*\((?P<options>[^)]+)\)', - webpage) - if mobj: - return mobj.group('options') - - def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs): - jwplayer_data = self._parse_json( - self._find_jwplayer_data(webpage), video_id, - transform_source=js_to_json) - return self._parse_jwplayer_data( - jwplayer_data, video_id, *args, **kwargs) - - def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True, - m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): - # JWPlayer backward compatibility: flattened playlists - # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96 - if 'playlist' not in jwplayer_data: - jwplayer_data = {'playlist': [jwplayer_data]} - - entries = [] - - # JWPlayer backward compatibility: single playlist item - # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10 - if not isinstance(jwplayer_data['playlist'], list): - jwplayer_data['playlist'] = [jwplayer_data['playlist']] - - for video_data in jwplayer_data['playlist']: - # JWPlayer backward compatibility: flattened sources - # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35 - if 'sources' not in video_data: - video_data['sources'] = [video_data] - - this_video_id = video_id or video_data['mediaid'] - - formats = [] - for source in video_data['sources']: - source_url = self._proto_relative_url(source['file']) - if base_url: - source_url = compat_urlparse.urljoin(base_url, source_url) - source_type = source.get('type') or '' - ext = mimetype2ext(source_type) or determine_ext(source_url) - if source_type == 'hls' or ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - source_url, this_video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False)) - elif ext == 'mpd': - formats.extend(self._extract_mpd_formats( - source_url, this_video_id, mpd_id=mpd_id, fatal=False)) - # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 - elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'): - formats.append({ - 'url': source_url, - 'vcodec': 'none', - 'ext': ext, - }) - else: - height = int_or_none(source.get('height')) - if height is None: - # Often no height is provided but there is a label in - # format like 1080p. - height = int_or_none(self._search_regex( - r'^(\d{3,})[pP]$', source.get('label') or '', - 'height', default=None)) - a_format = { - 'url': source_url, - 'width': int_or_none(source.get('width')), - 'height': height, - 'ext': ext, - } - if source_url.startswith('rtmp'): - a_format['ext'] = 'flv' - - # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as - # of jwplayer.flash.swf - rtmp_url_parts = re.split( - r'((?:mp4|mp3|flv):)', source_url, 1) - if len(rtmp_url_parts) == 3: - rtmp_url, prefix, play_path = rtmp_url_parts - a_format.update({ - 'url': rtmp_url, - 'play_path': prefix + play_path, - }) - if rtmp_params: - a_format.update(rtmp_params) - formats.append(a_format) - self._sort_formats(formats) - - subtitles = {} - tracks = video_data.get('tracks') - if tracks and isinstance(tracks, list): - for track in tracks: - if track.get('kind') != 'captions': - continue - track_url = urljoin(base_url, track.get('file')) - if not track_url: - continue - subtitles.setdefault(track.get('label') or 'en', []).append({ - 'url': self._proto_relative_url(track_url) - }) - - entries.append({ - 'id': this_video_id, - 'title': video_data['title'] if require_title else video_data.get('title'), - 'description': video_data.get('description'), - 'thumbnail': self._proto_relative_url(video_data.get('image')), - 'timestamp': int_or_none(video_data.get('pubdate')), - 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')), - 'subtitles': subtitles, - 'formats': formats, - }) - if len(entries) == 1: - return entries[0] - else: - return self.playlist_result(entries) - - -class JWPlatformIE(JWPlatformBaseIE): +class JWPlatformIE(InfoExtractor): _VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})' _TEST = { 'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js', diff --git a/youtube_dl/extractor/ondemandkorea.py b/youtube_dl/extractor/ondemandkorea.py index de1d6b08a..dcd157777 100644 --- a/youtube_dl/extractor/ondemandkorea.py +++ b/youtube_dl/extractor/ondemandkorea.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import unicode_literals -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( ExtractorError, js_to_json, ) -class OnDemandKoreaIE(JWPlatformBaseIE): +class OnDemandKoreaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html' _TEST = { 'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html', diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py index 7a2737032..9b413590a 100644 --- a/youtube_dl/extractor/pornhub.py +++ b/youtube_dl/extractor/pornhub.py @@ -169,50 +169,6 @@ class PornHubIE(InfoExtractor): comment_count = self._extract_count( r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') - """ - video_variables = {} - for video_variablename, quote, video_variable in re.findall( - r'(player_quality_[0-9]{3,4}p\w+)\s*=\s*(["\'])(.+?)\2;', webpage): - video_variables[video_variablename] = video_variable - - video_urls = [] - for encoded_video_url in re.findall( - r'player_quality_[0-9]{3,4}p\s*=(.+?);', webpage): - for varname, varval in video_variables.items(): - encoded_video_url = encoded_video_url.replace(varname, varval) - video_urls.append(re.sub(r'[\s+]', '', encoded_video_url)) - - if webpage.find('"encrypted":true') != -1: - password = compat_urllib_parse_unquote_plus( - self._search_regex(r'"video_title":"([^"]+)', webpage, 'password')) - video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) - - formats = [] - for video_url in video_urls: - path = compat_urllib_parse_urlparse(video_url).path - extension = os.path.splitext(path)[1][1:] - format = path.split('/')[5].split('_')[:2] - format = '-'.join(format) - - m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format) - if m is None: - height = None - tbr = None - else: - height = int(m.group('height')) - tbr = int(m.group('tbr')) - - formats.append({ - 'url': video_url, - 'ext': extension, - 'format': format, - 'format_id': format, - 'tbr': tbr, - 'height': height, - }) - self._sort_formats(formats) - """ - page_params = self._parse_json(self._search_regex( r'page_params\.zoneDetails\[([\'"])[^\'"]+\1\]\s*=\s*(?P<data>{[^}]+})', webpage, 'page parameters', group='data', default='{}'), diff --git a/youtube_dl/extractor/pornoxo.py b/youtube_dl/extractor/pornoxo.py index 1a0cce7e0..2831368b6 100644 --- a/youtube_dl/extractor/pornoxo.py +++ b/youtube_dl/extractor/pornoxo.py @@ -2,13 +2,13 @@ from __future__ import unicode_literals import re -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( str_to_int, ) -class PornoXOIE(JWPlatformBaseIE): +class PornoXOIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html' _TEST = { 'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html', diff --git a/youtube_dl/extractor/rentv.py b/youtube_dl/extractor/rentv.py index 422c02cff..d338b3a93 100644 --- a/youtube_dl/extractor/rentv.py +++ b/youtube_dl/extractor/rentv.py @@ -2,11 +2,10 @@ from __future__ import unicode_literals from .common import InfoExtractor -from .jwplatform import JWPlatformBaseIE from ..compat import compat_str -class RENTVIE(JWPlatformBaseIE): +class RENTVIE(InfoExtractor): _VALID_URL = r'(?:rentv:|https?://(?:www\.)?ren\.tv/(?:player|video/epizod)/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://ren.tv/video/epizod/118577', diff --git a/youtube_dl/extractor/rudo.py b/youtube_dl/extractor/rudo.py index 3bfe934d8..51644011e 100644 --- a/youtube_dl/extractor/rudo.py +++ b/youtube_dl/extractor/rudo.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( js_to_json, get_element_by_class, @@ -11,7 +11,7 @@ from ..utils import ( ) -class RudoIE(JWPlatformBaseIE): +class RudoIE(InfoExtractor): _VALID_URL = r'https?://rudo\.video/vod/(?P<id>[0-9a-zA-Z]+)' _TEST = { diff --git a/youtube_dl/extractor/screencastomatic.py b/youtube_dl/extractor/screencastomatic.py index 94a2a37d2..b5e76c9af 100644 --- a/youtube_dl/extractor/screencastomatic.py +++ b/youtube_dl/extractor/screencastomatic.py @@ -1,11 +1,11 @@ # coding: utf-8 from __future__ import unicode_literals -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import js_to_json -class ScreencastOMaticIE(JWPlatformBaseIE): +class ScreencastOMaticIE(InfoExtractor): _VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P<id>[0-9a-zA-Z]+)' _TEST = { 'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl', diff --git a/youtube_dl/extractor/sendtonews.py b/youtube_dl/extractor/sendtonews.py index 9880a5a78..9d9652949 100644 --- a/youtube_dl/extractor/sendtonews.py +++ b/youtube_dl/extractor/sendtonews.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( float_or_none, parse_iso8601, @@ -14,7 +14,7 @@ from ..utils import ( ) -class SendtoNewsIE(JWPlatformBaseIE): +class SendtoNewsIE(InfoExtractor): _VALID_URL = r'https?://embed\.sendtonews\.com/player2/embedplayer\.php\?.*\bSC=(?P<id>[0-9A-Za-z-]+)' _TEST = { diff --git a/youtube_dl/extractor/thisav.py b/youtube_dl/extractor/thisav.py index 4473a3c77..b7b3568cb 100644 --- a/youtube_dl/extractor/thisav.py +++ b/youtube_dl/extractor/thisav.py @@ -3,11 +3,11 @@ from __future__ import unicode_literals import re -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import remove_end -class ThisAVIE(JWPlatformBaseIE): +class ThisAVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*' _TESTS = [{ 'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html', diff --git a/youtube_dl/extractor/tvnoe.py b/youtube_dl/extractor/tvnoe.py index 6d5c74826..1a5b76bf2 100644 --- a/youtube_dl/extractor/tvnoe.py +++ b/youtube_dl/extractor/tvnoe.py @@ -1,7 +1,7 @@ # coding: utf-8 from __future__ import unicode_literals -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, @@ -9,7 +9,7 @@ from ..utils import ( ) -class TVNoeIE(JWPlatformBaseIE): +class TVNoeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.tvnoe.cz/video/10362', diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py index 9950c62ad..1f1828fce 100644 --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re -from .jwplatform import JWPlatformBaseIE +from .common import InfoExtractor from ..utils import ( decode_packed_codes, js_to_json, @@ -12,7 +12,7 @@ from ..utils import ( ) -class VidziIE(JWPlatformBaseIE): +class VidziIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vidzi\.tv/(?:embed-)?(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'http://vidzi.tv/cghql9yq6emu.html', diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py index 54eb51427..c022fb33e 100644 --- a/youtube_dl/extractor/wimp.py +++ b/youtube_dl/extractor/wimp.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals +from .common import InfoExtractor from .youtube import YoutubeIE -from .jwplatform import JWPlatformBaseIE -class WimpIE(JWPlatformBaseIE): +class WimpIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?wimp\.com/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.wimp.com/maru-is-exhausted/', From 4cead6a614b5a293e78dce5cd5eda7476f83985d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 22:02:01 +0700 Subject: [PATCH 033/225] [einthusan] Relax _VALID_URL (closes #12141, closes #12159) --- youtube_dl/extractor/einthusan.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/einthusan.py b/youtube_dl/extractor/einthusan.py index 8a2a17b63..3f6268637 100644 --- a/youtube_dl/extractor/einthusan.py +++ b/youtube_dl/extractor/einthusan.py @@ -18,8 +18,8 @@ from ..utils import ( class EinthusanIE(InfoExtractor): - _VALID_URL = r'https?://einthusan\.tv/movie/watch/(?P<id>[0-9]+)' - _TEST = { + _VALID_URL = r'https?://einthusan\.tv/movie/watch/(?P<id>[^/?#&]+)' + _TESTS = [{ 'url': 'https://einthusan.tv/movie/watch/9097/', 'md5': 'ff0f7f2065031b8a2cf13a933731c035', 'info_dict': { @@ -29,7 +29,10 @@ class EinthusanIE(InfoExtractor): 'description': 'md5:33ef934c82a671a94652a9b4e54d931b', 'thumbnail': r're:^https?://.*\.jpg$', } - } + }, { + 'url': 'https://einthusan.tv/movie/watch/51MZ/?lang=hindi', + 'only_matching': True, + }] # reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js def _decrypt(self, encrypted_data, video_id): From fef51645d6c224f898ff6f44d041a458d21e8547 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Fri, 17 Feb 2017 23:13:51 +0800 Subject: [PATCH 034/225] [theplatform] Recognize URLs with whitespaces (closes #12044) --- ChangeLog | 1 + youtube_dl/extractor/generic.py | 7 ++++++- youtube_dl/extractor/theplatform.py | 6 ++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4e69b03d0..d5fe3dd5b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors ++ [theplatform] Recognize URLs with whitespaces (#12044) + [generic] Support complex JWPlayer embedded videos (#12030) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 3db31debe..9868ca6d0 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -1501,7 +1501,12 @@ class GenericIE(InfoExtractor): 'skip_download': True, }, 'add_ie': [VideoPressIE.ie_key()], - } + }, + { + # ThePlatform embedded with whitespaces in URLs + 'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm', + 'only_matching': True, + }, # { # # TODO: find another test # # http://schema.org/VideoObject diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 5c5987c6a..9a424b1c6 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -179,10 +179,12 @@ class ThePlatformIE(ThePlatformBaseIE, AdobePassIE): if m: return [m.group('url')] + # Are whitesapces ignored in URLs? + # https://github.com/rg3/youtube-dl/issues/12044 matches = re.findall( - r'<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage) + r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage) if matches: - return list(zip(*matches))[1] + return [re.sub(r'\s', '', list(zip(*matches))[1][0])] @staticmethod def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False): From d94badc755228ee3159b9b499aa718d27fa472ed Mon Sep 17 00:00:00 2001 From: Vijay Singh <sudovijay@users.noreply.github.com> Date: Tue, 7 Feb 2017 10:32:45 +0530 Subject: [PATCH 035/225] [openload] Semifix extraction (closes #10408) just updated the code. i don't do much python still i tried to convert my code. lemme know if there is any prob with it --- youtube_dl/extractor/openload.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py index 32289d897..bd1120fd8 100644 --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -75,17 +75,20 @@ class OpenloadIE(InfoExtractor): '<span[^>]+id="[^"]+"[^>]*>([0-9]+)</span>', webpage, 'openload ID') - first_three_chars = int(float(ol_id[0:][:3])) - fifth_char = int(float(ol_id[3:5])) - urlcode = '' - num = 5 + first_two_chars = int(float(ol_id[0:][:2])) + urlcode = {} + num = 2 while num < len(ol_id): - urlcode += compat_chr(int(float(ol_id[num:][:3])) + - first_three_chars - fifth_char * int(float(ol_id[num + 3:][:2]))) + key = int(float(ol_id[num + 3:][:2])) + urlcode[key] = compat_chr(int(float(ol_id[num:][:3])) - first_two_chars) num += 5 + + sorted(urlcode, key=lambda key: urlcode[key]) - video_url = 'https://openload.co/stream/' + urlcode + urllink = ''.join(['%s' % (value) for (key, value) in urlcode.items()]) + + video_url = 'https://openload.co/stream/' + urllink title = self._og_search_title(webpage, default=None) or self._search_regex( r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, From 90fad0e74cd8079246c5f3d8150650b5f65f998b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 22:31:16 +0700 Subject: [PATCH 036/225] [openload] Fix extraction (closes #12002) --- youtube_dl/extractor/openload.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py index bd1120fd8..10896c442 100644 --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -76,19 +76,16 @@ class OpenloadIE(InfoExtractor): webpage, 'openload ID') first_two_chars = int(float(ol_id[0:][:2])) - urlcode = {} + urlcode = [] num = 2 while num < len(ol_id): key = int(float(ol_id[num + 3:][:2])) - urlcode[key] = compat_chr(int(float(ol_id[num:][:3])) - first_two_chars) + urlcode.append((key, compat_chr(int(float(ol_id[num:][:3])) - first_two_chars))) num += 5 - - sorted(urlcode, key=lambda key: urlcode[key]) - urllink = ''.join(['%s' % (value) for (key, value) in urlcode.items()]) - - video_url = 'https://openload.co/stream/' + urllink + video_url = 'https://openload.co/stream/' + ''.join( + [value for _, value in sorted(urlcode, key=lambda x: x[0])]) title = self._og_search_title(webpage, default=None) or self._search_regex( r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, From c2bde5d08163ce46548ea60333750a0a74a8fe44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A9stin=20Reed?= <TRox1972@noreply.github.com> Date: Mon, 9 Jan 2017 18:22:53 +0100 Subject: [PATCH 037/225] [ellentv] Improve --- youtube_dl/extractor/ellentv.py | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/youtube_dl/extractor/ellentv.py b/youtube_dl/extractor/ellentv.py index 74bbc5c51..e0a13dd76 100644 --- a/youtube_dl/extractor/ellentv.py +++ b/youtube_dl/extractor/ellentv.py @@ -1,13 +1,9 @@ # coding: utf-8 from __future__ import unicode_literals -import json - from .common import InfoExtractor -from ..utils import ( - ExtractorError, - NO_DEFAULT, -) +from .kaltura import KalturaIE +from ..utils import NO_DEFAULT class EllenTVIE(InfoExtractor): @@ -65,7 +61,7 @@ class EllenTVIE(InfoExtractor): if partner_id and kaltura_id: break - return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura') + return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key()) class EllenTVClipsIE(InfoExtractor): @@ -77,14 +73,14 @@ class EllenTVClipsIE(InfoExtractor): 'id': 'meryl-streep-vanessa-hudgens', 'title': 'Meryl Streep, Vanessa Hudgens', }, - 'playlist_mincount': 7, + 'playlist_mincount': 5, } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) - playlist = self._extract_playlist(webpage) + playlist = self._extract_playlist(webpage, playlist_id) return { '_type': 'playlist', @@ -93,16 +89,13 @@ class EllenTVClipsIE(InfoExtractor): 'entries': self._extract_entries(playlist) } - def _extract_playlist(self, webpage): + def _extract_playlist(self, webpage, playlist_id): json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json') - try: - return json.loads('[{' + json_string + '}]') - except ValueError as ve: - raise ExtractorError('Failed to download JSON', cause=ve) + return self._parse_json('[{' + json_string + '}]', playlist_id) def _extract_entries(self, playlist): return [ self.url_result( 'kaltura:%s:%s' % (item['kaltura_partner_id'], item['kaltura_entry_id']), - 'Kaltura') + KalturaIE.ie_key(), video_id=item['kaltura_entry_id']) for item in playlist] From db76c30c6ecb5d198a72f1807163c9b69771bba1 Mon Sep 17 00:00:00 2001 From: Tobias Gruetzmacher <tobias-git@23.gs> Date: Tue, 7 Jun 2016 23:42:56 +0200 Subject: [PATCH 038/225] [heise] Support videos embedded in any article. --- youtube_dl/extractor/heise.py | 119 +++++++++++++++++++++++++--------- 1 file changed, 90 insertions(+), 29 deletions(-) diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py index 1629cdb8d..a5ec0fae9 100644 --- a/youtube_dl/extractor/heise.py +++ b/youtube_dl/extractor/heise.py @@ -11,54 +11,115 @@ from ..utils import ( class HeiseIE(InfoExtractor): _VALID_URL = r'''(?x) - https?://(?:www\.)?heise\.de/video/artikel/ - .+?(?P<id>[0-9]+)\.html(?:$|[?#]) + https?://(?:www\.)?heise\.de/.+?(?P<id>[0-9]+)\.html(?:$|[?#]) ''' - _TEST = { - 'url': ( - 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html' - ), - 'md5': 'ffed432483e922e88545ad9f2f15d30e', - 'info_dict': { - 'id': '2404147', - 'ext': 'mp4', - 'title': ( - "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone" + _TESTS = [ + { + 'url': ( + 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html' ), - 'format_id': 'mp4_720p', - 'timestamp': 1411812600, - 'upload_date': '20140927', - 'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.', - 'thumbnail': r're:^https?://.*\.jpe?g$', - } - } + 'md5': 'ffed432483e922e88545ad9f2f15d30e', + 'info_dict': { + 'id': '2404147', + 'ext': 'mp4', + 'title': ( + "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone" + ), + 'format_id': 'mp4_720p', + 'timestamp': 1411812600, + 'upload_date': '20140927', + 'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.', + 'thumbnail': r're:^https?://.*/gallery/$', + } + }, + { + 'url': ( + 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html' + ), + 'md5': 'ffed432483e922e88545ad9f2f15d30e', + 'info_dict': { + 'id': '2403911', + 'ext': 'mp4', + 'title': ( + "c't uplink 3.3: Owncloud, Tastaturen, Peilsender Smartphone" + ), + 'format_id': 'mp4_720p', + 'timestamp': 1411803000, + 'upload_date': '20140927', + 'description': "In c't uplink erklären wir in dieser Woche, wie man mit Owncloud die Kontrolle über die eigenen Daten behält. Darüber hinaus erklären wir, dass zur Wahl der richtigen Tastatur mehr gehört, als man denkt und wie Smartphones uns weiter verraten.", + 'thumbnail': r're:^https?://.*/gallery/$', + } + }, + { + 'url': ( + 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom' + ), + 'md5': 'ffed432483e922e88545ad9f2f15d30e', + 'info_dict': { + 'id': '2404251', + 'ext': 'mp4', + 'title': ( + "c't uplink: Owncloud, Tastaturen, Peilsender Smartphone" + ), + 'format_id': 'mp4_720p', + 'timestamp': 1411811400, + 'upload_date': '20140927', + 'description': 'In uplink-Episode 3.3 sprechen wir über Owncloud und wie man sich damit von Cloudanbietern emanzipieren kann. Außerdem erklären wir, woran man alles beim Kauf einer Tastatur denken sollte und was Smartphones nun über uns verraten.', + 'thumbnail': r're:^https?://.*/gallery/$', + } + }, + { + 'url': ( + 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html' + ), + 'md5': '0616c9297d9c989f9b2a23b483b408c3', + 'info_dict': { + 'id': '3214137', + 'ext': 'mp4', + 'title': ( + "c\u2019t zockt \u201eGlitchspace\u201c, \u201eThe Mind's Eclipse\u201c und \u201eWindowframe\u201c." + ), + 'format_id': 'mp4_720p', + 'timestamp': 1464011220, + 'upload_date': '20160523', + 'description': "Unsere Spiele-Tipps der Woche: Das Puzzle-Adventure Glitchspace, das Jump&Run-Spiel Windowframe und The Mind's Eclipse", + 'thumbnail': r're:^https?://.*/gallery/$', + } + }, + + ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) container_id = self._search_regex( - r'<div class="videoplayerjw".*?data-container="([0-9]+)"', + r'<div class="videoplayerjw"[^>]*data-container="([0-9]+)"', webpage, 'container ID') sequenz_id = self._search_regex( - r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"', + r'<div class="videoplayerjw"[^>]*data-sequenz="([0-9]+)"', webpage, 'sequenz ID') data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id) doc = self._download_xml(data_url, video_id) info = { 'id': video_id, - 'thumbnail': self._og_search_thumbnail(webpage), + 'thumbnail': doc.find('.//{http://rss.jwpcdn.com/}image').text, 'timestamp': parse_iso8601( - self._html_search_meta('date', webpage)), - 'description': self._og_search_description(webpage), + self._html_search_meta('date', webpage)) } - title = self._html_search_meta('fulltitle', webpage) - if title: - info['title'] = title - else: - info['title'] = self._og_search_title(webpage) + title = self._html_search_meta('fulltitle', webpage, default=None) + if not title or title == "c't": + title = self._search_regex( + r'<div class="videoplayerjw"[^>]*data-title="([^"]+)"', + webpage, 'video title') + info['title'] = title + + desc = self._og_search_description(webpage, default=None) + if not desc: + desc = self._html_search_meta('description', webpage) + info['description'] = desc formats = [] for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'): From bad4ccdb5db7c00865d433558ddfcdfdbd499343 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 23:09:40 +0700 Subject: [PATCH 039/225] [heise] Improve (closes #9725) --- youtube_dl/extractor/heise.py | 146 +++++++++++----------------------- 1 file changed, 48 insertions(+), 98 deletions(-) diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py index a5ec0fae9..382f32771 100644 --- a/youtube_dl/extractor/heise.py +++ b/youtube_dl/extractor/heise.py @@ -6,120 +6,58 @@ from ..utils import ( determine_ext, int_or_none, parse_iso8601, + xpath_text, ) class HeiseIE(InfoExtractor): - _VALID_URL = r'''(?x) - https?://(?:www\.)?heise\.de/.+?(?P<id>[0-9]+)\.html(?:$|[?#]) - ''' - _TESTS = [ - { - 'url': ( - 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html' - ), - 'md5': 'ffed432483e922e88545ad9f2f15d30e', - 'info_dict': { - 'id': '2404147', - 'ext': 'mp4', - 'title': ( - "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone" - ), - 'format_id': 'mp4_720p', - 'timestamp': 1411812600, - 'upload_date': '20140927', - 'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.', - 'thumbnail': r're:^https?://.*/gallery/$', - } - }, - { - 'url': ( - 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html' - ), - 'md5': 'ffed432483e922e88545ad9f2f15d30e', - 'info_dict': { - 'id': '2403911', - 'ext': 'mp4', - 'title': ( - "c't uplink 3.3: Owncloud, Tastaturen, Peilsender Smartphone" - ), - 'format_id': 'mp4_720p', - 'timestamp': 1411803000, - 'upload_date': '20140927', - 'description': "In c't uplink erklären wir in dieser Woche, wie man mit Owncloud die Kontrolle über die eigenen Daten behält. Darüber hinaus erklären wir, dass zur Wahl der richtigen Tastatur mehr gehört, als man denkt und wie Smartphones uns weiter verraten.", - 'thumbnail': r're:^https?://.*/gallery/$', - } - }, - { - 'url': ( - 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom' - ), - 'md5': 'ffed432483e922e88545ad9f2f15d30e', - 'info_dict': { - 'id': '2404251', - 'ext': 'mp4', - 'title': ( - "c't uplink: Owncloud, Tastaturen, Peilsender Smartphone" - ), - 'format_id': 'mp4_720p', - 'timestamp': 1411811400, - 'upload_date': '20140927', - 'description': 'In uplink-Episode 3.3 sprechen wir über Owncloud und wie man sich damit von Cloudanbietern emanzipieren kann. Außerdem erklären wir, woran man alles beim Kauf einer Tastatur denken sollte und was Smartphones nun über uns verraten.', - 'thumbnail': r're:^https?://.*/gallery/$', - } - }, - { - 'url': ( - 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html' - ), - 'md5': '0616c9297d9c989f9b2a23b483b408c3', - 'info_dict': { - 'id': '3214137', - 'ext': 'mp4', - 'title': ( - "c\u2019t zockt \u201eGlitchspace\u201c, \u201eThe Mind's Eclipse\u201c und \u201eWindowframe\u201c." - ), - 'format_id': 'mp4_720p', - 'timestamp': 1464011220, - 'upload_date': '20160523', - 'description': "Unsere Spiele-Tipps der Woche: Das Puzzle-Adventure Glitchspace, das Jump&Run-Spiel Windowframe und The Mind's Eclipse", - 'thumbnail': r're:^https?://.*/gallery/$', - } - }, - - ] + _VALID_URL = r'https?://(?:www\.)?heise\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\.html' + _TESTS = [{ + 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html', + 'md5': 'ffed432483e922e88545ad9f2f15d30e', + 'info_dict': { + 'id': '2404147', + 'ext': 'mp4', + 'title': "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone", + 'format_id': 'mp4_720p', + 'timestamp': 1411812600, + 'upload_date': '20140927', + 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20', + 'thumbnail': r're:^https?://.*/gallery/$', + } + }, { + 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html', + 'only_matching': True, + }, { + 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom', + 'only_matching': True, + }, { + 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) container_id = self._search_regex( - r'<div class="videoplayerjw"[^>]*data-container="([0-9]+)"', + r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"', webpage, 'container ID') sequenz_id = self._search_regex( - r'<div class="videoplayerjw"[^>]*data-sequenz="([0-9]+)"', + r'<div class="videoplayerjw"[^>]+data-sequenz="([0-9]+)"', webpage, 'sequenz ID') - data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id) - doc = self._download_xml(data_url, video_id) - - info = { - 'id': video_id, - 'thumbnail': doc.find('.//{http://rss.jwpcdn.com/}image').text, - 'timestamp': parse_iso8601( - self._html_search_meta('date', webpage)) - } title = self._html_search_meta('fulltitle', webpage, default=None) if not title or title == "c't": title = self._search_regex( - r'<div class="videoplayerjw"[^>]*data-title="([^"]+)"', - webpage, 'video title') - info['title'] = title + r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"', + webpage, 'title') - desc = self._og_search_description(webpage, default=None) - if not desc: - desc = self._html_search_meta('description', webpage) - info['description'] = desc + doc = self._download_xml( + 'http://www.heise.de/videout/feed', video_id, query={ + 'container': container_id, + 'sequenz': sequenz_id, + }) formats = [] for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'): @@ -135,6 +73,18 @@ class HeiseIE(InfoExtractor): 'height': height, }) self._sort_formats(formats) - info['formats'] = formats - return info + description = self._og_search_description( + webpage, default=None) or self._html_search_meta( + 'description', webpage) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or + self._og_search_thumbnail(webpage)), + 'timestamp': parse_iso8601( + self._html_search_meta('date', webpage)), + 'formats': formats, + } From 2c1f442c2bb4de65479f2e6c2f81c5741445184e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 23:18:26 +0700 Subject: [PATCH 040/225] [options] Add missing spaces --- youtube_dl/options.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 349f44778..2fea99ff2 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -298,14 +298,14 @@ def parseOpts(overrideArguments=None): metavar='FILTER', dest='match_filter', default=None, help=( 'Generic video filter. ' - 'Specify any key (see help for -o for a list of available keys) to' - ' match if the key is present, ' - '!key to check if the key is not present,' + 'Specify any key (see help for -o for a list of available keys) to ' + 'match if the key is present, ' + '!key to check if the key is not present, ' 'key > NUMBER (like "comment_count > 12", also works with ' '>=, <, <=, !=, =) to compare against a number, and ' '& to require multiple matches. ' - 'Values which are not known are excluded unless you' - ' put a question mark (?) after the operator.' + 'Values which are not known are excluded unless you ' + 'put a question mark (?) after the operator. ' 'For example, to only match videos that have been liked more than ' '100 times and disliked less than 50 times (or the dislike ' 'functionality is not available at the given service), but who ' From cf3704c132800809caacc6ce89afa87f0dfae487 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 23:47:54 +0700 Subject: [PATCH 041/225] [ChangeLog] Actualize --- ChangeLog | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ChangeLog b/ChangeLog index d5fe3dd5b..00ee0a5a9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,8 +1,13 @@ version <unreleased> Extractors +* [heise] Improve extraction (#9725) +* [ellentv] Improve (#11653) +* [openload] Fix extraction (#10408, #12002) + [theplatform] Recognize URLs with whitespaces (#12044) +* [einthusan] Relax URL regular expression (#12141, #12159) + [generic] Support complex JWPlayer embedded videos (#12030) +* [elpais] Improve extraction (#12139) version 2017.02.16 From 28e35f50702a8841b4caf072a546ff06ca63db96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 17 Feb 2017 23:59:56 +0700 Subject: [PATCH 042/225] release 2017.02.17 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- README.md | 4 ++-- youtube_dl/version.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 06711f73b..6f1361b32 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.16*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.16** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.17*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.17** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.16 +[debug] youtube-dl version 2017.02.17 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 00ee0a5a9..2c90f791d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.17 Extractors * [heise] Improve extraction (#9725) diff --git a/README.md b/README.md index 89876bd7a..c2a1a6b02 100644 --- a/README.md +++ b/README.md @@ -137,13 +137,13 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo --match-filter FILTER Generic video filter. Specify any key (see help for -o for a list of available keys) to match if the key is present, !key to - check if the key is not present,key > + check if the key is not present, key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) - after the operator.For example, to only + after the operator. For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike functionality is not diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 323e80954..530e1856b 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.16' +__version__ = '2017.02.17' From 70bcc444a990ee9ca3daab6f3dc2d5d58a948ba4 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 18 Feb 2017 09:52:43 +0100 Subject: [PATCH 043/225] [viceland] improve info extraction and update test --- youtube_dl/extractor/vice.py | 6 +++--- youtube_dl/extractor/viceland.py | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py index 8a00c8fee..f0a7fd739 100644 --- a/youtube_dl/extractor/vice.py +++ b/youtube_dl/extractor/vice.py @@ -70,10 +70,10 @@ class ViceBaseIE(AdobePassIE): 'url': uplynk_preplay_url, 'id': video_id, 'title': title, - 'description': base.get('body'), + 'description': base.get('body') or base.get('display_body'), 'thumbnail': watch_hub_data.get('cover-image') or watch_hub_data.get('thumbnail'), - 'duration': parse_duration(video_data.get('video_duration') or watch_hub_data.get('video-duration')), - 'timestamp': int_or_none(video_data.get('created_at')), + 'duration': int_or_none(video_data.get('video_duration')) or parse_duration(watch_hub_data.get('video-duration')), + 'timestamp': int_or_none(video_data.get('created_at'), 1000), 'age_limit': parse_age_limit(video_data.get('video_rating')), 'series': video_data.get('show_title') or watch_hub_data.get('show-title'), 'episode_number': int_or_none(episode.get('episode_number') or watch_hub_data.get('episode')), diff --git a/youtube_dl/extractor/viceland.py b/youtube_dl/extractor/viceland.py index 0eff055a6..87f9216b5 100644 --- a/youtube_dl/extractor/viceland.py +++ b/youtube_dl/extractor/viceland.py @@ -7,16 +7,16 @@ from .vice import ViceBaseIE class VicelandIE(ViceBaseIE): _VALID_URL = r'https?://(?:www\.)?viceland\.com/[^/]+/video/[^/]+/(?P<id>[a-f0-9]+)' _TEST = { - 'url': 'https://www.viceland.com/en_us/video/cyberwar-trailer/57608447973ee7705f6fbd4e', + 'url': 'https://www.viceland.com/en_us/video/trapped/588a70d0dba8a16007de7316', 'info_dict': { - 'id': '57608447973ee7705f6fbd4e', + 'id': '588a70d0dba8a16007de7316', 'ext': 'mp4', - 'title': 'CYBERWAR (Trailer)', - 'description': 'Tapping into the geopolitics of hacking and surveillance, Ben Makuch travels the world to meet with hackers, government officials, and dissidents to investigate the ecosystem of cyberwarfare.', + 'title': 'TRAPPED (Series Trailer)', + 'description': 'md5:7a8e95c2b6cd86461502a2845e581ccf', 'age_limit': 14, - 'timestamp': 1466008539, - 'upload_date': '20160615', - 'uploader_id': '11', + 'timestamp': 1485474122, + 'upload_date': '20170126', + 'uploader_id': '57a204098cb727dec794c6a3', 'uploader': 'Viceland', }, 'params': { From bdabbc220c60ea6be50c9b1058405b636f70fb71 Mon Sep 17 00:00:00 2001 From: Alex Monk <krenair@gmail.com> Date: Wed, 17 Aug 2016 21:13:28 +0100 Subject: [PATCH 044/225] [metacafe] Bypass family filter If you don't send this user=ffilter: false cookie, it will 301 redirect you to a page asking about it, and then the title check will fail. --- youtube_dl/extractor/metacafe.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index 9880924e6..adbd44fd1 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -50,6 +50,18 @@ class MetacafeIE(InfoExtractor): }, 'skip': 'Page is temporarily unavailable.', }, + # metacafe video with family filter + { + 'url': 'http://www.metacafe.com/watch/2155630/adult_art_by_david_hart_156/', + 'md5': 'b06082c5079bbdcde677a6291fbdf376', + 'info_dict': { + 'id': '2155630', + 'ext': 'mp4', + 'title': 'Adult Art By David Hart #156', + 'uploader': 'hartistry', + 'description': 'Adult Art By David Hart. All the Art Works presented here are not in the possession of the American Artist, David John Hart. The paintings are in collections worldwide of individuals, countries, art museums, foundations and charities.', + } + }, # AnyClip video { 'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/', @@ -148,8 +160,9 @@ class MetacafeIE(InfoExtractor): # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file headers = {} + headers['Cookie'] = 'user=%7B%22ffilter%22%3Afalse%7D;'; if video_id.startswith('an-'): - headers['Cookie'] = 'flashVersion=0;' + headers['Cookie'] += ' flashVersion=0;' # Retrieve video webpage to extract further information webpage = self._download_webpage(url, video_id, headers=headers) From f75caf059eb7a1a156921124cbf4b720fea526e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 18 Feb 2017 19:58:25 +0700 Subject: [PATCH 045/225] [metacafe] Improve (closes #10371) --- youtube_dl/extractor/metacafe.py | 38 +++++++++++--------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index adbd44fd1..28f59f63c 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -6,12 +6,12 @@ from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_unquote, + compat_urllib_parse_urlencode, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, - urlencode_postdata, get_element_by_attribute, mimetype2ext, ) @@ -57,10 +57,13 @@ class MetacafeIE(InfoExtractor): 'info_dict': { 'id': '2155630', 'ext': 'mp4', - 'title': 'Adult Art By David Hart #156', - 'uploader': 'hartistry', - 'description': 'Adult Art By David Hart. All the Art Works presented here are not in the possession of the American Artist, David John Hart. The paintings are in collections worldwide of individuals, countries, art museums, foundations and charities.', - } + 'title': 'Adult Art By David Hart 156', + 'uploader': '63346', + 'description': 'md5:9afac8fc885252201ad14563694040fc', + }, + 'params': { + 'skip_download': True, + }, }, # AnyClip video { @@ -124,22 +127,6 @@ class MetacafeIE(InfoExtractor): def report_disclaimer(self): self.to_screen('Retrieving disclaimer') - def _confirm_age(self): - # Retrieve disclaimer - self.report_disclaimer() - self._download_webpage(self._DISCLAIMER, None, False, 'Unable to retrieve disclaimer') - - # Confirm age - self.report_age_confirmation() - self._download_webpage( - self._FILTER_POST, None, False, 'Unable to confirm age', - data=urlencode_postdata({ - 'filters': '0', - 'submit': "Continue - I'm over 18", - }), headers={ - 'Content-Type': 'application/x-www-form-urlencoded', - }) - def _real_extract(self, url): # Extract id and simplified title from URL video_id, display_id = re.match(self._VALID_URL, url).groups() @@ -155,14 +142,15 @@ class MetacafeIE(InfoExtractor): if prefix == 'cb': return self.url_result('theplatform:%s' % ext_id, 'ThePlatform') - # self._confirm_age() + headers = { + # Disable family filter + 'Cookie': 'user=%s; ' % compat_urllib_parse_urlencode({'ffilter': False}) + } # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file - headers = {} - headers['Cookie'] = 'user=%7B%22ffilter%22%3Afalse%7D;'; if video_id.startswith('an-'): - headers['Cookie'] += ' flashVersion=0;' + headers['Cookie'] += 'flashVersion=0; ' # Retrieve video webpage to extract further information webpage = self._download_webpage(url, video_id, headers=headers) From a2e3286676606103601f9499154ad465037314d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 18 Feb 2017 20:21:37 +0700 Subject: [PATCH 046/225] [thisav] Add support for html5 media (closes #11771) --- youtube_dl/extractor/thisav.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/thisav.py b/youtube_dl/extractor/thisav.py index b7b3568cb..33683b139 100644 --- a/youtube_dl/extractor/thisav.py +++ b/youtube_dl/extractor/thisav.py @@ -10,6 +10,7 @@ from ..utils import remove_end class ThisAVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*' _TESTS = [{ + # jwplayer 'url': 'http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html', 'md5': '0480f1ef3932d901f0e0e719f188f19b', 'info_dict': { @@ -20,6 +21,7 @@ class ThisAVIE(InfoExtractor): 'uploader_id': 'dj7970' } }, { + # html5 media 'url': 'http://www.thisav.com/video/242352/nerdy-18yo-big-ass-tattoos-and-glasses.html', 'md5': 'ba90c076bd0f80203679e5b60bf523ee', 'info_dict': { @@ -48,8 +50,12 @@ class ThisAVIE(InfoExtractor): }], } else: - info_dict = self._extract_jwplayer_data( - webpage, video_id, require_title=False) + entries = self._parse_html5_media_entries(url, webpage, video_id) + if entries: + info_dict = entries[0] + else: + info_dict = self._extract_jwplayer_data( + webpage, video_id, require_title=False) uploader = self._html_search_regex( r': <a href="http://www.thisav.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>', webpage, 'uploader name', fatal=False) From 02d9b82a233abcb778f3f8601b229f996fd7df94 Mon Sep 17 00:00:00 2001 From: Jakub Wilk <jwilk@jwilk.net> Date: Wed, 11 Jan 2017 18:49:40 +0100 Subject: [PATCH 047/225] [tvn24] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tvn24.py | 47 ++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 youtube_dl/extractor/tvn24.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index be3688d5a..55b4782d3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1009,6 +1009,7 @@ from .tvc import ( ) from .tvigle import TvigleIE from .tvland import TVLandIE +from .tvn24 import TVN24IE from .tvnoe import TVNoeIE from .tvp import ( TVPEmbedIE, diff --git a/youtube_dl/extractor/tvn24.py b/youtube_dl/extractor/tvn24.py new file mode 100644 index 000000000..225ee4a6a --- /dev/null +++ b/youtube_dl/extractor/tvn24.py @@ -0,0 +1,47 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TVN24IE(InfoExtractor): + _VALID_URL = r'http://(?:tvn24bis|(?:www|fakty)\.tvn24)\.pl/.+/(?P<id>[^/]+)\.html' + _TEST = { + 'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html', + 'md5': 'fbdec753d7bc29d96036808275f2130c', + 'info_dict': { + 'id': '1584444', + 'ext': 'mp4', + 'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"', + 'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości "Szkła kontaktowego".', + 'thumbnail': 're:http://.*[.]jpeg', + } + } + + def _real_extract(self, url): + page_id = self._match_id(url) + webpage = self._download_webpage(url, page_id) + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + thumbnail = self._html_search_regex(r'\bdata-poster="(.+?)"', webpage, 'data-poster') + share_params = self._html_search_regex(r'\bdata-share-params="(.+?)"', webpage, 'data-share-params') + share_params = self._parse_json(share_params, page_id) + video_id = share_params['id'] + quality_data = self._html_search_regex(r'\bdata-quality="(.+?)"', webpage, 'data-quality') + quality_data = self._parse_json(quality_data, page_id) + formats = [] + for format_id, url in quality_data.items(): + formats.append({ + 'format_id': format_id, + 'height': int(format_id.rstrip('p')), + 'url': url, + 'ext': 'mp4', + }) + self._sort_formats(formats) + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'formats': formats, + } From e84888b4322abd2e2a74e8a89b7942a68dd0b6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 18 Feb 2017 23:34:09 +0700 Subject: [PATCH 048/225] [tvn24] Improve extraction (closes #11679) --- youtube_dl/extractor/tvn24.py | 59 ++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 15 deletions(-) diff --git a/youtube_dl/extractor/tvn24.py b/youtube_dl/extractor/tvn24.py index 225ee4a6a..12ed6039c 100644 --- a/youtube_dl/extractor/tvn24.py +++ b/youtube_dl/extractor/tvn24.py @@ -2,11 +2,15 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + int_or_none, + unescapeHTML, +) class TVN24IE(InfoExtractor): - _VALID_URL = r'http://(?:tvn24bis|(?:www|fakty)\.tvn24)\.pl/.+/(?P<id>[^/]+)\.html' - _TEST = { + _VALID_URL = r'https?://(?:(?:[^/]+)\.)?tvn24(?:bis)?\.pl/(?:[^/]+/)*(?P<id>[^/]+)\.html' + _TESTS = [{ 'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html', 'md5': 'fbdec753d7bc29d96036808275f2130c', 'info_dict': { @@ -16,28 +20,53 @@ class TVN24IE(InfoExtractor): 'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości "Szkła kontaktowego".', 'thumbnail': 're:http://.*[.]jpeg', } - } + }, { + 'url': 'http://fakty.tvn24.pl/ogladaj-online,60/53-konferencja-bezpieczenstwa-w-monachium,716431.html', + 'only_matching': True, + }, { + 'url': 'http://sport.tvn24.pl/pilka-nozna,105/ligue-1-kamil-glik-rozcial-glowe-monaco-tylko-remisuje-z-bastia,716522.html', + 'only_matching': True, + }, { + 'url': 'http://tvn24bis.pl/poranek,146,m/gen-koziej-w-tvn24-bis-wracamy-do-czasow-zimnej-wojny,715660.html', + 'only_matching': True, + }] def _real_extract(self, url): - page_id = self._match_id(url) - webpage = self._download_webpage(url, page_id) + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + title = self._og_search_title(webpage) - description = self._og_search_description(webpage) - thumbnail = self._html_search_regex(r'\bdata-poster="(.+?)"', webpage, 'data-poster') - share_params = self._html_search_regex(r'\bdata-share-params="(.+?)"', webpage, 'data-share-params') - share_params = self._parse_json(share_params, page_id) - video_id = share_params['id'] - quality_data = self._html_search_regex(r'\bdata-quality="(.+?)"', webpage, 'data-quality') - quality_data = self._parse_json(quality_data, page_id) + + def extract_json(attr, name, fatal=True): + return self._parse_json( + self._search_regex( + r'\b%s=(["\'])(?P<json>(?!\1).+?)\1' % attr, webpage, + name, group='json', fatal=fatal) or '{}', + video_id, transform_source=unescapeHTML, fatal=fatal) + + quality_data = extract_json('data-quality', 'formats') + formats = [] for format_id, url in quality_data.items(): formats.append({ - 'format_id': format_id, - 'height': int(format_id.rstrip('p')), 'url': url, - 'ext': 'mp4', + 'format_id': format_id, + 'height': int_or_none(format_id.rstrip('p')), }) self._sort_formats(formats) + + description = self._og_search_description(webpage) + thumbnail = self._og_search_thumbnail( + webpage, default=None) or self._html_search_regex( + r'\bdata-poster=(["\'])(?P<url>(?!\1).+?)\1', webpage, + 'thumbnail', group='url') + + share_params = extract_json( + 'data-share-params', 'share params', fatal=False) + if isinstance(share_params, dict): + video_id = share_params.get('id') or video_id + return { 'id': video_id, 'title': title, From ac33accd96279ee541952aaa4f0bb72b4f76b9ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 18 Feb 2017 23:59:26 +0700 Subject: [PATCH 049/225] [options] Mention quoted string literals for --match-filter --- youtube_dl/options.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 2fea99ff2..deff54324 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -302,8 +302,10 @@ def parseOpts(overrideArguments=None): 'match if the key is present, ' '!key to check if the key is not present, ' 'key > NUMBER (like "comment_count > 12", also works with ' - '>=, <, <=, !=, =) to compare against a number, and ' - '& to require multiple matches. ' + '>=, <, <=, !=, =) to compare against a number, ' + 'key = \'LITERAL\' (like "uploader = \'Mike Smith\'", also works with !=) ' + 'to match against a string literal ' + 'and & to require multiple matches. ' 'Values which are not known are excluded unless you ' 'put a question mark (?) after the operator. ' 'For example, to only match videos that have been liked more than ' From 049a0f4d6da55f4062658da7593363147c92f4a8 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 18 Feb 2017 21:07:09 +0100 Subject: [PATCH 050/225] [brightcove:legacy] restrict videoPlayer value(closes #12040) --- youtube_dl/extractor/brightcove.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 5c6e99da1..27685eed0 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -191,6 +191,10 @@ class BrightcoveLegacyIE(InfoExtractor): # These fields hold the id of the video videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList') if videoPlayer is not None: + if isinstance(videoPlayer, list): + videoPlayer = videoPlayer[0] + if not (videoPlayer.isdigit() or videoPlayer.startswith('ref:')): + return None params['@videoPlayer'] = videoPlayer linkBase = find_param('linkBaseURL') if linkBase is not None: From bf5b9d859a1f2a68fda0dc57eb839448c7571dfa Mon Sep 17 00:00:00 2001 From: Pierre Mdawar <p.mdawar@gmail.com> Date: Mon, 17 Oct 2016 14:38:37 +0300 Subject: [PATCH 051/225] [utils] Introduce YoutubeDLError base class for all youtube-dl exceptions --- youtube_dl/utils.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 07c07be6f..3f9e592e3 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -701,7 +701,12 @@ def bug_reports_message(): return msg -class ExtractorError(Exception): +class YoutubeDLError(Exception): + """Base exception for YoutubeDL errors.""" + pass + + +class ExtractorError(YoutubeDLError): """Error during info extraction.""" def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): @@ -742,7 +747,7 @@ class RegexNotFoundError(ExtractorError): pass -class DownloadError(Exception): +class DownloadError(YoutubeDLError): """Download Error exception. This exception may be thrown by FileDownloader objects if they are not @@ -756,7 +761,7 @@ class DownloadError(Exception): self.exc_info = exc_info -class SameFileError(Exception): +class SameFileError(YoutubeDLError): """Same File exception. This exception will be thrown by FileDownloader objects if they detect @@ -765,7 +770,7 @@ class SameFileError(Exception): pass -class PostProcessingError(Exception): +class PostProcessingError(YoutubeDLError): """Post Processing exception. This exception may be raised by PostProcessor's .run() method to @@ -773,15 +778,16 @@ class PostProcessingError(Exception): """ def __init__(self, msg): + super(PostProcessingError, self).__init__(msg) self.msg = msg -class MaxDownloadsReached(Exception): +class MaxDownloadsReached(YoutubeDLError): """ --max-downloads limit has been reached. """ pass -class UnavailableVideoError(Exception): +class UnavailableVideoError(YoutubeDLError): """Unavailable Format exception. This exception will be thrown when a video is requested @@ -790,7 +796,7 @@ class UnavailableVideoError(Exception): pass -class ContentTooShortError(Exception): +class ContentTooShortError(YoutubeDLError): """Content Too Short exception. This exception may be raised by FileDownloader objects when a file they @@ -799,12 +805,15 @@ class ContentTooShortError(Exception): """ def __init__(self, downloaded, expected): + super(ContentTooShortError, self).__init__( + 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) + ) # Both in bytes self.downloaded = downloaded self.expected = expected -class XAttrMetadataError(Exception): +class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code @@ -820,7 +829,7 @@ class XAttrMetadataError(Exception): self.reason = 'NOT_SUPPORTED' -class XAttrUnavailableError(Exception): +class XAttrUnavailableError(YoutubeDLError): pass From 773f291dcbce486fefe24e1abd29735d374d0a9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:49:58 +0700 Subject: [PATCH 052/225] Add experimental geo restriction bypass mechanism Based on faking X-Forwarded-For HTTP header --- youtube_dl/YoutubeDL.py | 17 +++ youtube_dl/__init__.py | 2 + youtube_dl/extractor/common.py | 48 +++++- youtube_dl/options.py | 12 ++ youtube_dl/utils.py | 267 +++++++++++++++++++++++++++++++++ 5 files changed, 340 insertions(+), 6 deletions(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index a7bf5a1b0..ebace6b57 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -56,6 +56,8 @@ from .utils import ( ExtractorError, format_bytes, formatSeconds, + GeoRestrictedError, + ISO3166Utils, locked_file, make_HTTPS_handler, MaxDownloadsReached, @@ -272,6 +274,13 @@ class YoutubeDL(object): If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. + bypass_geo_restriction: + Bypass geographic restriction via faking X-Forwarded-For + HTTP header (experimental) + bypass_geo_restriction_as_country: + Two-letter ISO 3166-2 country code that will be used for + explicit geographic restriction bypassing via faking + X-Forwarded-For HTTP header (experimental) The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. @@ -707,6 +716,14 @@ class YoutubeDL(object): return self.process_ie_result(ie_result, download, extra_info) else: return ie_result + except GeoRestrictedError as e: + msg = e.msg + if e.countries: + msg += '\nThis video is available in %s.' % ', '.join( + map(ISO3166Utils.short2full, e.countries)) + msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' + self.report_error(msg) + break except ExtractorError as e: # An error we somewhat expected self.report_error(compat_str(e), e.format_traceback()) break diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 5c5b8094b..94f461a78 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -414,6 +414,8 @@ def _real_main(argv=None): 'cn_verification_proxy': opts.cn_verification_proxy, 'geo_verification_proxy': opts.geo_verification_proxy, 'config_location': opts.config_location, + 'bypass_geo_restriction': opts.bypass_geo_restriction, + 'bypass_geo_restriction_as_country': opts.bypass_geo_restriction_as_country, } with YoutubeDL(ydl_opts) as ydl: diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index f6ff56eda..96815099d 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -6,6 +6,7 @@ import hashlib import json import netrc import os +import random import re import socket import sys @@ -39,6 +40,8 @@ from ..utils import ( ExtractorError, fix_xml_ampersands, float_or_none, + GeoRestrictedError, + GeoUtils, int_or_none, js_to_json, parse_iso8601, @@ -320,17 +323,25 @@ class InfoExtractor(object): _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. + _BYPASS_GEO attribute may be set to False in order to disable + geo restriction bypass mechanisms for a particular extractor. + Though it won't disable explicit geo restriction bypass based on + country code provided with bypass_geo_restriction_as_country. + Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None + _x_forwarded_for_ip = None + _BYPASS_GEO = True _WORKING = True def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False + self._x_forwarded_for_ip = None self.set_downloader(downloader) @classmethod @@ -359,6 +370,10 @@ class InfoExtractor(object): def initialize(self): """Initializes an instance (authentication, etc).""" + if not self._x_forwarded_for_ip: + country_code = self._downloader.params.get('bypass_geo_restriction_as_country', None) + if country_code: + self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if not self._ready: self._real_initialize() self._ready = True @@ -366,8 +381,22 @@ class InfoExtractor(object): def extract(self, url): """Extracts URL information and returns it in list of dicts.""" try: - self.initialize() - return self._real_extract(url) + for _ in range(2): + try: + self.initialize() + return self._real_extract(url) + except GeoRestrictedError as e: + if (not self._downloader.params.get('bypass_geo_restriction_as_country', None) and + self._BYPASS_GEO and + self._downloader.params.get('bypass_geo_restriction', True) and + not self._x_forwarded_for_ip and + e.countries): + self._x_forwarded_for_ip = GeoUtils.random_ipv4(random.choice(e.countries)) + if self._x_forwarded_for_ip: + self.report_warning( + 'Video is geo restricted. Retrying extraction with fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) + continue + raise except ExtractorError: raise except compat_http_client.IncompleteRead as e: @@ -434,6 +463,15 @@ class InfoExtractor(object): if isinstance(url_or_request, (compat_str, str)): url_or_request = url_or_request.partition('#')[0] + # Some sites check X-Forwarded-For HTTP header in order to figure out + # the origin of the client behind proxy. This allows bypassing geo + # restriction by faking this header's value to IP that belongs to some + # geo unrestricted country. We will do so once we encounter any + # geo restriction error. + if self._x_forwarded_for_ip: + if 'X-Forwarded-For' not in headers: + headers['X-Forwarded-For'] = self._x_forwarded_for_ip + urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query) if urlh is False: assert not fatal @@ -609,10 +647,8 @@ class InfoExtractor(object): expected=True) @staticmethod - def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'): - raise ExtractorError( - '%s. You might want to use --proxy to workaround.' % msg, - expected=True) + def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None): + raise GeoRestrictedError(msg, countries=countries) # Methods for following #608 @staticmethod diff --git a/youtube_dl/options.py b/youtube_dl/options.py index deff54324..2e194f6dc 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -549,6 +549,18 @@ def parseOpts(overrideArguments=None): 'Upper bound of a range for randomized sleep before each download ' '(maximum possible number of seconds to sleep). Must only be used ' 'along with --min-sleep-interval.')) + workarounds.add_option( + '--bypass-geo', + action='store_true', dest='bypass_geo_restriction', default=True, + help='Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') + workarounds.add_option( + '--no-bypass-geo', + action='store_false', dest='bypass_geo_restriction', default=True, + help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') + workarounds.add_option( + '--bypass-geo-as-country', metavar='CODE', + dest='bypass_geo_restriction_as_country', default=None, + help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity.add_option( diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 3f9e592e3..4e76b6b7b 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -23,6 +23,7 @@ import operator import os import pipes import platform +import random import re import socket import ssl @@ -747,6 +748,18 @@ class RegexNotFoundError(ExtractorError): pass +class GeoRestrictedError(ExtractorError): + """Geographic restriction Error exception. + + This exception may be thrown when a video is not available from your + geographic location due to geographic restrictions imposed by a website. + """ + def __init__(self, msg, countries=None): + super(GeoRestrictedError, self).__init__(msg, expected=True) + self.msg = msg + self.countries = countries + + class DownloadError(YoutubeDLError): """Download Error exception. @@ -3027,6 +3040,260 @@ class ISO3166Utils(object): return cls._country_map.get(code.upper()) +class GeoUtils(object): + # Major IPv4 address blocks per country + _country_ip_map = { + 'AD': '85.94.160.0/19', + 'AE': '94.200.0.0/13', + 'AF': '149.54.0.0/17', + 'AG': '209.59.64.0/18', + 'AI': '204.14.248.0/21', + 'AL': '46.99.0.0/16', + 'AM': '46.70.0.0/15', + 'AO': '105.168.0.0/13', + 'AP': '159.117.192.0/21', + 'AR': '181.0.0.0/12', + 'AS': '202.70.112.0/20', + 'AT': '84.112.0.0/13', + 'AU': '1.128.0.0/11', + 'AW': '181.41.0.0/18', + 'AZ': '5.191.0.0/16', + 'BA': '31.176.128.0/17', + 'BB': '65.48.128.0/17', + 'BD': '114.130.0.0/16', + 'BE': '57.0.0.0/8', + 'BF': '129.45.128.0/17', + 'BG': '95.42.0.0/15', + 'BH': '37.131.0.0/17', + 'BI': '154.117.192.0/18', + 'BJ': '137.255.0.0/16', + 'BL': '192.131.134.0/24', + 'BM': '196.12.64.0/18', + 'BN': '156.31.0.0/16', + 'BO': '161.56.0.0/16', + 'BQ': '161.0.80.0/20', + 'BR': '152.240.0.0/12', + 'BS': '24.51.64.0/18', + 'BT': '119.2.96.0/19', + 'BW': '168.167.0.0/16', + 'BY': '178.120.0.0/13', + 'BZ': '179.42.192.0/18', + 'CA': '99.224.0.0/11', + 'CD': '41.243.0.0/16', + 'CF': '196.32.200.0/21', + 'CG': '197.214.128.0/17', + 'CH': '85.0.0.0/13', + 'CI': '154.232.0.0/14', + 'CK': '202.65.32.0/19', + 'CL': '152.172.0.0/14', + 'CM': '165.210.0.0/15', + 'CN': '36.128.0.0/10', + 'CO': '181.240.0.0/12', + 'CR': '201.192.0.0/12', + 'CU': '152.206.0.0/15', + 'CV': '165.90.96.0/19', + 'CW': '190.88.128.0/17', + 'CY': '46.198.0.0/15', + 'CZ': '88.100.0.0/14', + 'DE': '53.0.0.0/8', + 'DJ': '197.241.0.0/17', + 'DK': '87.48.0.0/12', + 'DM': '192.243.48.0/20', + 'DO': '152.166.0.0/15', + 'DZ': '41.96.0.0/12', + 'EC': '186.68.0.0/15', + 'EE': '90.190.0.0/15', + 'EG': '156.160.0.0/11', + 'ER': '196.200.96.0/20', + 'ES': '88.0.0.0/11', + 'ET': '196.188.0.0/14', + 'EU': '2.16.0.0/13', + 'FI': '91.152.0.0/13', + 'FJ': '144.120.0.0/16', + 'FM': '119.252.112.0/20', + 'FO': '88.85.32.0/19', + 'FR': '90.0.0.0/9', + 'GA': '41.158.0.0/15', + 'GB': '25.0.0.0/8', + 'GD': '74.122.88.0/21', + 'GE': '31.146.0.0/16', + 'GF': '161.22.64.0/18', + 'GG': '62.68.160.0/19', + 'GH': '45.208.0.0/14', + 'GI': '85.115.128.0/19', + 'GL': '88.83.0.0/19', + 'GM': '160.182.0.0/15', + 'GN': '197.149.192.0/18', + 'GP': '104.250.0.0/19', + 'GQ': '105.235.224.0/20', + 'GR': '94.64.0.0/13', + 'GT': '168.234.0.0/16', + 'GU': '168.123.0.0/16', + 'GW': '197.214.80.0/20', + 'GY': '181.41.64.0/18', + 'HK': '113.252.0.0/14', + 'HN': '181.210.0.0/16', + 'HR': '93.136.0.0/13', + 'HT': '148.102.128.0/17', + 'HU': '84.0.0.0/14', + 'ID': '39.192.0.0/10', + 'IE': '87.32.0.0/12', + 'IL': '79.176.0.0/13', + 'IM': '5.62.80.0/20', + 'IN': '117.192.0.0/10', + 'IO': '203.83.48.0/21', + 'IQ': '37.236.0.0/14', + 'IR': '2.176.0.0/12', + 'IS': '82.221.0.0/16', + 'IT': '79.0.0.0/10', + 'JE': '87.244.64.0/18', + 'JM': '72.27.0.0/17', + 'JO': '176.29.0.0/16', + 'JP': '126.0.0.0/8', + 'KE': '105.48.0.0/12', + 'KG': '158.181.128.0/17', + 'KH': '36.37.128.0/17', + 'KI': '103.25.140.0/22', + 'KM': '197.255.224.0/20', + 'KN': '198.32.32.0/19', + 'KP': '175.45.176.0/22', + 'KR': '175.192.0.0/10', + 'KW': '37.36.0.0/14', + 'KY': '64.96.0.0/15', + 'KZ': '2.72.0.0/13', + 'LA': '115.84.64.0/18', + 'LB': '178.135.0.0/16', + 'LC': '192.147.231.0/24', + 'LI': '82.117.0.0/19', + 'LK': '112.134.0.0/15', + 'LR': '41.86.0.0/19', + 'LS': '129.232.0.0/17', + 'LT': '78.56.0.0/13', + 'LU': '188.42.0.0/16', + 'LV': '46.109.0.0/16', + 'LY': '41.252.0.0/14', + 'MA': '105.128.0.0/11', + 'MC': '88.209.64.0/18', + 'MD': '37.246.0.0/16', + 'ME': '178.175.0.0/17', + 'MF': '74.112.232.0/21', + 'MG': '154.126.0.0/17', + 'MH': '117.103.88.0/21', + 'MK': '77.28.0.0/15', + 'ML': '154.118.128.0/18', + 'MM': '37.111.0.0/17', + 'MN': '49.0.128.0/17', + 'MO': '60.246.0.0/16', + 'MP': '202.88.64.0/20', + 'MQ': '109.203.224.0/19', + 'MR': '41.188.64.0/18', + 'MS': '208.90.112.0/22', + 'MT': '46.11.0.0/16', + 'MU': '105.16.0.0/12', + 'MV': '27.114.128.0/18', + 'MW': '105.234.0.0/16', + 'MX': '187.192.0.0/11', + 'MY': '175.136.0.0/13', + 'MZ': '197.218.0.0/15', + 'NA': '41.182.0.0/16', + 'NC': '101.101.0.0/18', + 'NE': '197.214.0.0/18', + 'NF': '203.17.240.0/22', + 'NG': '105.112.0.0/12', + 'NI': '186.76.0.0/15', + 'NL': '145.96.0.0/11', + 'NO': '84.208.0.0/13', + 'NP': '36.252.0.0/15', + 'NR': '203.98.224.0/19', + 'NU': '49.156.48.0/22', + 'NZ': '49.224.0.0/14', + 'OM': '5.36.0.0/15', + 'PA': '186.72.0.0/15', + 'PE': '186.160.0.0/14', + 'PF': '123.50.64.0/18', + 'PG': '124.240.192.0/19', + 'PH': '49.144.0.0/13', + 'PK': '39.32.0.0/11', + 'PL': '83.0.0.0/11', + 'PM': '70.36.0.0/20', + 'PR': '66.50.0.0/16', + 'PS': '188.161.0.0/16', + 'PT': '85.240.0.0/13', + 'PW': '202.124.224.0/20', + 'PY': '181.120.0.0/14', + 'QA': '37.210.0.0/15', + 'RE': '139.26.0.0/16', + 'RO': '79.112.0.0/13', + 'RS': '178.220.0.0/14', + 'RU': '5.136.0.0/13', + 'RW': '105.178.0.0/15', + 'SA': '188.48.0.0/13', + 'SB': '202.1.160.0/19', + 'SC': '154.192.0.0/11', + 'SD': '154.96.0.0/13', + 'SE': '78.64.0.0/12', + 'SG': '152.56.0.0/14', + 'SI': '188.196.0.0/14', + 'SK': '78.98.0.0/15', + 'SL': '197.215.0.0/17', + 'SM': '89.186.32.0/19', + 'SN': '41.82.0.0/15', + 'SO': '197.220.64.0/19', + 'SR': '186.179.128.0/17', + 'SS': '105.235.208.0/21', + 'ST': '197.159.160.0/19', + 'SV': '168.243.0.0/16', + 'SX': '190.102.0.0/20', + 'SY': '5.0.0.0/16', + 'SZ': '41.84.224.0/19', + 'TC': '65.255.48.0/20', + 'TD': '154.68.128.0/19', + 'TG': '196.168.0.0/14', + 'TH': '171.96.0.0/13', + 'TJ': '85.9.128.0/18', + 'TK': '27.96.24.0/21', + 'TL': '180.189.160.0/20', + 'TM': '95.85.96.0/19', + 'TN': '197.0.0.0/11', + 'TO': '175.176.144.0/21', + 'TR': '78.160.0.0/11', + 'TT': '186.44.0.0/15', + 'TV': '202.2.96.0/19', + 'TW': '120.96.0.0/11', + 'TZ': '156.156.0.0/14', + 'UA': '93.72.0.0/13', + 'UG': '154.224.0.0/13', + 'US': '3.0.0.0/8', + 'UY': '167.56.0.0/13', + 'UZ': '82.215.64.0/18', + 'VA': '212.77.0.0/19', + 'VC': '24.92.144.0/20', + 'VE': '186.88.0.0/13', + 'VG': '172.103.64.0/18', + 'VI': '146.226.0.0/16', + 'VN': '14.160.0.0/11', + 'VU': '202.80.32.0/20', + 'WF': '117.20.32.0/21', + 'WS': '202.4.32.0/19', + 'YE': '134.35.0.0/16', + 'YT': '41.242.116.0/22', + 'ZA': '41.0.0.0/11', + 'ZM': '165.56.0.0/13', + 'ZW': '41.85.192.0/19', + } + + @classmethod + def random_ipv4(cls, code): + block = cls._country_ip_map.get(code.upper()) + if not block: + return None + addr, preflen = block.split('/') + addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] + addr_max = addr_min | (0xffffffff >> int(preflen)) + return socket.inet_ntoa( + compat_struct_pack('!I', random.randint(addr_min, addr_max))) + + class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers From d392005a795a6cf85fda3c0f982254f8a2731e94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:51:16 +0700 Subject: [PATCH 053/225] [dramafever] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/dramafever.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py index bcd9fe2a0..755db806a 100644 --- a/youtube_dl/extractor/dramafever.py +++ b/youtube_dl/extractor/dramafever.py @@ -116,8 +116,9 @@ class DramaFeverIE(DramaFeverBaseIE): 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): - raise ExtractorError( - 'Currently unavailable in your country.', expected=True) + self.raise_geo_restricted( + msg='Currently unavailable in your country', + countries=['US', 'CA']) raise series_id, episode_number = video_id.split('.') From e633f21a96f37a96e8ef0fd4d6c1e4d3c0b41fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:51:33 +0700 Subject: [PATCH 054/225] [go] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/go.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/youtube_dl/extractor/go.py b/youtube_dl/extractor/go.py index f28e6fbf5..ec902c670 100644 --- a/youtube_dl/extractor/go.py +++ b/youtube_dl/extractor/go.py @@ -101,6 +101,10 @@ class GoIE(AdobePassIE): video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers()) errors = entitlement.get('errors', {}).get('errors', []) if errors: + for error in errors: + if error.get('code') == 1002: + self.raise_geo_restricted( + error['message'], countries=['US']) error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) asset_url += '?' + entitlement['uplynkData']['sessionKey'] From 28200e654b8051cadca12e51bd57f77e1ff0a4ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:51:51 +0700 Subject: [PATCH 055/225] [itv] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/itv.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py index b0d860452..aabde15f3 100644 --- a/youtube_dl/extractor/itv.py +++ b/youtube_dl/extractor/itv.py @@ -98,7 +98,10 @@ class ITVIE(InfoExtractor): headers=headers, data=etree.tostring(req_env)) playlist = xpath_element(resp_env, './/Playlist') if playlist is None: + fault_code = xpath_text(resp_env, './/faultcode') fault_string = xpath_text(resp_env, './/faultstring') + if fault_code == 'InvalidGeoRegion': + self.raise_geo_restricted(msg=fault_string, countries=['GB']) raise ExtractorError('%s said: %s' % (self.IE_NAME, fault_string)) title = xpath_text(playlist, 'EpisodeTitle', fatal=True) video_element = xpath_element(playlist, 'VideoEntries/Video', fatal=True) From ff4007891fde74212eb0898bb04c14b2de92ed03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:51:59 +0700 Subject: [PATCH 056/225] [nrk] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/nrk.py | 36 +++++------------------------------- 1 file changed, 5 insertions(+), 31 deletions(-) diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index fc3c0cd3c..78ece33e1 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -1,7 +1,6 @@ # coding: utf-8 from __future__ import unicode_literals -import random import re from .common import InfoExtractor @@ -15,25 +14,6 @@ from ..utils import ( class NRKBaseIE(InfoExtractor): - _faked_ip = None - - def _download_webpage_handle(self, *args, **kwargs): - # NRK checks X-Forwarded-For HTTP header in order to figure out the - # origin of the client behind proxy. This allows to bypass geo - # restriction by faking this header's value to some Norway IP. - # We will do so once we encounter any geo restriction error. - if self._faked_ip: - # NB: str is intentional - kwargs.setdefault(str('headers'), {})['X-Forwarded-For'] = self._faked_ip - return super(NRKBaseIE, self)._download_webpage_handle(*args, **kwargs) - - def _fake_ip(self): - # Use fake IP from 37.191.128.0/17 in order to workaround geo - # restriction - def octet(lb=0, ub=255): - return random.randint(lb, ub) - self._faked_ip = '37.191.%d.%d' % (octet(128), octet()) - def _real_extract(self, url): video_id = self._match_id(url) @@ -44,8 +24,6 @@ class NRKBaseIE(InfoExtractor): title = data.get('fullTitle') or data.get('mainTitle') or data['title'] video_id = data.get('id') or video_id - http_headers = {'X-Forwarded-For': self._faked_ip} if self._faked_ip else {} - entries = [] conviva = data.get('convivaStatistics') or {} @@ -90,7 +68,6 @@ class NRKBaseIE(InfoExtractor): 'duration': duration, 'subtitles': subtitles, 'formats': formats, - 'http_headers': http_headers, }) if not entries: @@ -107,19 +84,16 @@ class NRKBaseIE(InfoExtractor): }] if not entries: - message_type = data.get('messageType', '') - # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked* - if 'IsGeoBlocked' in message_type and not self._faked_ip: - self.report_warning( - 'Video is geo restricted, trying to fake IP') - self._fake_ip() - return self._real_extract(url) - MESSAGES = { 'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller høre programmet', 'ProgramRightsHasExpired': 'Programmet har gått ut', 'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge', } + message_type = data.get('messageType', '') + # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked* + if 'IsGeoBlocked' in message_type: + self.raise_geo_restricted( + msg=MESSAGES.get('ProgramIsGeoBlocked'), countries=['NO']) raise ExtractorError( '%s said: %s' % (self.IE_NAME, MESSAGES.get( message_type, message_type)), From 01b1aa9ff408ce15b8bbea08dbc190f3282141a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:52:11 +0700 Subject: [PATCH 057/225] [ondemandkorea] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/ondemandkorea.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/ondemandkorea.py b/youtube_dl/extractor/ondemandkorea.py index dcd157777..0c85d549e 100644 --- a/youtube_dl/extractor/ondemandkorea.py +++ b/youtube_dl/extractor/ondemandkorea.py @@ -35,7 +35,8 @@ class OnDemandKoreaIE(InfoExtractor): if 'msg_block_01.png' in webpage: self.raise_geo_restricted( - 'This content is not available in your region') + msg='This content is not available in your region', + countries=['US', 'CA']) if 'This video is only available to ODK PLUS members.' in webpage: raise ExtractorError( From 8ab8066cf08352ad336c3ff594d0ac27f6c809c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:52:20 +0700 Subject: [PATCH 058/225] [pbs] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/pbs.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py index 6baed773f..64f47bae3 100644 --- a/youtube_dl/extractor/pbs.py +++ b/youtube_dl/extractor/pbs.py @@ -489,11 +489,12 @@ class PBSIE(InfoExtractor): headers=self.geo_verification_headers()) if redirect_info['status'] == 'error': + message = self._ERRORS.get( + redirect_info['http_code'], redirect_info['message']) + if redirect_info['http_code'] == 403: + self.raise_geo_restricted(msg=message, countries=['US']) raise ExtractorError( - '%s said: %s' % ( - self.IE_NAME, - self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])), - expected=True) + '%s said: %s' % (self.IE_NAME, message), expected=True) format_url = redirect_info.get('url') if not format_url: From 04d906eae3071e37049cfcd2a02e9079b72a265c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:52:33 +0700 Subject: [PATCH 059/225] [svt] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/svt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py index 10cf80885..f2a2200bf 100644 --- a/youtube_dl/extractor/svt.py +++ b/youtube_dl/extractor/svt.py @@ -38,7 +38,8 @@ class SVTBaseIE(InfoExtractor): 'url': vurl, }) if not formats and video_info.get('rights', {}).get('geoBlockedSweden'): - self.raise_geo_restricted('This video is only available in Sweden') + self.raise_geo_restricted( + 'This video is only available in Sweden', countries=['SE']) self._sort_formats(formats) subtitles = {} From 89cc7fe7705b6534f434b514265a0507b70ef40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:52:42 +0700 Subject: [PATCH 060/225] [vbox7] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/vbox7.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py index bef639462..f86d804c1 100644 --- a/youtube_dl/extractor/vbox7.py +++ b/youtube_dl/extractor/vbox7.py @@ -78,7 +78,7 @@ class Vbox7IE(InfoExtractor): video_url = video['src'] if '/na.mp4' in video_url: - self.raise_geo_restricted() + self.raise_geo_restricted(countries=['BG']) uploader = video.get('uploader') From 71631862f4de5a10223642ebdbd5e10db374d270 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:55:23 +0700 Subject: [PATCH 061/225] [srgssr] Improve geo restriction detection --- youtube_dl/extractor/srgssr.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/srgssr.py b/youtube_dl/extractor/srgssr.py index 319a48a7a..a35a0a538 100644 --- a/youtube_dl/extractor/srgssr.py +++ b/youtube_dl/extractor/srgssr.py @@ -14,6 +14,7 @@ from ..utils import ( class SRGSSRIE(InfoExtractor): _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)' + _BYPASS_GEO = False _ERRORS = { 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.', @@ -40,8 +41,11 @@ class SRGSSRIE(InfoExtractor): media_id)[media_type.capitalize()] if media_data.get('block') and media_data['block'] in self._ERRORS: - raise ExtractorError('%s said: %s' % ( - self.IE_NAME, self._ERRORS[media_data['block']]), expected=True) + message = self._ERRORS[media_data['block']] + if media_data['block'] == 'GEOBLOCK': + self.raise_geo_restricted(msg=message, countries=['CH']) + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, message), expected=True) return media_data From 80b59020e02e9c61f74f8f8f8891f9745667edb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:55:31 +0700 Subject: [PATCH 062/225] [vgtv] Improve geo restriction detection --- youtube_dl/extractor/vgtv.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/vgtv.py b/youtube_dl/extractor/vgtv.py index 8a574bc26..1709fd6bb 100644 --- a/youtube_dl/extractor/vgtv.py +++ b/youtube_dl/extractor/vgtv.py @@ -14,6 +14,7 @@ from ..utils import ( class VGTVIE(XstreamIE): IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet' + _BYPASS_GEO = False _HOST_TO_APPNAME = { 'vgtv.no': 'vgtv', @@ -217,7 +218,7 @@ class VGTVIE(XstreamIE): properties = try_get( data, lambda x: x['streamConfiguration']['properties'], list) if properties and 'geoblocked' in properties: - raise self.raise_geo_restricted() + raise self.raise_geo_restricted(countries=['NO']) self._sort_formats(info['formats']) From 5d3fbf77d96ade64c645b6942979c0b99aa4d775 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 18:55:39 +0700 Subject: [PATCH 063/225] [viki] Improve geo restriction detection --- youtube_dl/extractor/viki.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py index 9c48701c1..68a74e246 100644 --- a/youtube_dl/extractor/viki.py +++ b/youtube_dl/extractor/viki.py @@ -27,6 +27,7 @@ class VikiBaseIE(InfoExtractor): _APP_VERSION = '2.2.5.1428709186' _APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)' + _BYPASS_GEO = False _NETRC_MACHINE = 'viki' _token = None @@ -77,8 +78,11 @@ class VikiBaseIE(InfoExtractor): def _check_errors(self, data): for reason, status in data.get('blocking', {}).items(): if status and reason in self._ERRORS: + message = self._ERRORS[reason] + if reason == 'geo': + self.raise_geo_restricted(msg=message) raise ExtractorError('%s said: %s' % ( - self.IE_NAME, self._ERRORS[reason]), expected=True) + self.IE_NAME, message), expected=True) def _real_initialize(self): self._login() From 18a0defab063523cd76a30be2dd5a80e9f9172d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 20:26:43 +0700 Subject: [PATCH 064/225] [utils] Make random_ipv4 return unicode string --- youtube_dl/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 4e76b6b7b..cbf7639c5 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3290,8 +3290,8 @@ class GeoUtils(object): addr, preflen = block.split('/') addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) - return socket.inet_ntoa( - compat_struct_pack('!I', random.randint(addr_min, addr_max))) + return compat_str(socket.inet_ntoa( + compat_struct_pack('!I', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): From 0016b84e16965a07c52946c4672363153e8b18a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 4 Feb 2017 21:06:07 +0700 Subject: [PATCH 065/225] Add faked X-Forwarded-For to formats' HTTP headers --- youtube_dl/YoutubeDL.py | 14 ++++++++++++++ youtube_dl/extractor/common.py | 5 ++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index ebace6b57..1c04e46c1 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -864,8 +864,14 @@ class YoutubeDL(object): if self.params.get('playlistrandom', False): random.shuffle(entries) + x_forwarded_for = ie_result.get('__x_forwarded_for_ip') + for i, entry in enumerate(entries, 1): self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) + # This __x_forwarded_for_ip thing is a bit ugly but requires + # minimal changes + if x_forwarded_for: + entry['__x_forwarded_for_ip'] = x_forwarded_for extra = { 'n_entries': n_entries, 'playlist': playlist, @@ -1250,6 +1256,11 @@ class YoutubeDL(object): if cookies: res['Cookie'] = cookies + if 'X-Forwarded-For' not in res: + x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') + if x_forwarded_for_ip: + res['X-Forwarded-For'] = x_forwarded_for_ip + return res def _calc_cookies(self, info_dict): @@ -1392,6 +1403,9 @@ class YoutubeDL(object): full_format_info = info_dict.copy() full_format_info.update(format) format['http_headers'] = self._calc_headers(full_format_info) + # Remove private housekeeping stuff + if '__x_forwarded_for_ip' in info_dict: + del info_dict['__x_forwarded_for_ip'] # TODO Central sorting goes here diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 96815099d..c1f7f28a0 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -384,7 +384,10 @@ class InfoExtractor(object): for _ in range(2): try: self.initialize() - return self._real_extract(url) + ie_result = self._real_extract(url) + if self._x_forwarded_for_ip: + ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip + return ie_result except GeoRestrictedError as e: if (not self._downloader.params.get('bypass_geo_restriction_as_country', None) and self._BYPASS_GEO and From 0a840f584c3f1fedb6957c05587dec697143f2d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 01:53:41 +0700 Subject: [PATCH 066/225] Rename bypass geo restriction options --- youtube_dl/YoutubeDL.py | 5 ++--- youtube_dl/__init__.py | 4 ++-- youtube_dl/extractor/common.py | 8 ++++---- youtube_dl/options.py | 12 ++++++------ 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 1c04e46c1..68000dea2 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -274,10 +274,9 @@ class YoutubeDL(object): If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. - bypass_geo_restriction: - Bypass geographic restriction via faking X-Forwarded-For + geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental) - bypass_geo_restriction_as_country: + geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header (experimental) diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 94f461a78..f91d29a7b 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -414,8 +414,8 @@ def _real_main(argv=None): 'cn_verification_proxy': opts.cn_verification_proxy, 'geo_verification_proxy': opts.geo_verification_proxy, 'config_location': opts.config_location, - 'bypass_geo_restriction': opts.bypass_geo_restriction, - 'bypass_geo_restriction_as_country': opts.bypass_geo_restriction_as_country, + 'geo_bypass': opts.geo_bypass, + 'geo_bypass_country': opts.geo_bypass_country, } with YoutubeDL(ydl_opts) as ydl: diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index c1f7f28a0..6eb6a25b8 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -326,7 +326,7 @@ class InfoExtractor(object): _BYPASS_GEO attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on - country code provided with bypass_geo_restriction_as_country. + country code provided with geo_bypass_country. Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. @@ -371,7 +371,7 @@ class InfoExtractor(object): def initialize(self): """Initializes an instance (authentication, etc).""" if not self._x_forwarded_for_ip: - country_code = self._downloader.params.get('bypass_geo_restriction_as_country', None) + country_code = self._downloader.params.get('geo_bypass_country', None) if country_code: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if not self._ready: @@ -389,9 +389,9 @@ class InfoExtractor(object): ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip return ie_result except GeoRestrictedError as e: - if (not self._downloader.params.get('bypass_geo_restriction_as_country', None) and + if (not self._downloader.params.get('geo_bypass_country', None) and self._BYPASS_GEO and - self._downloader.params.get('bypass_geo_restriction', True) and + self._downloader.params.get('geo_bypass', True) and not self._x_forwarded_for_ip and e.countries): self._x_forwarded_for_ip = GeoUtils.random_ipv4(random.choice(e.countries)) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 2e194f6dc..ae3f50754 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -550,16 +550,16 @@ def parseOpts(overrideArguments=None): '(maximum possible number of seconds to sleep). Must only be used ' 'along with --min-sleep-interval.')) workarounds.add_option( - '--bypass-geo', - action='store_true', dest='bypass_geo_restriction', default=True, + '--geo-bypass', + action='store_true', dest='geo_bypass', default=True, help='Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') workarounds.add_option( - '--no-bypass-geo', - action='store_false', dest='bypass_geo_restriction', default=True, + '--no-geo-bypass', + action='store_false', dest='geo_bypass', default=True, help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') workarounds.add_option( - '--bypass-geo-as-country', metavar='CODE', - dest='bypass_geo_restriction_as_country', default=None, + '--geo-bypass-country', metavar='CODE', + dest='geo_bypass_country', default=None, help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') From 4248dad92bd87650c791194276296b148f668e68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 03:53:23 +0700 Subject: [PATCH 067/225] Improve geo bypass mechanism * Rename options to preffixly match with --geo-verification-proxy * Introduce _GEO_COUNTRIES for extractors * Implement faking IP right away for sites with known geo restriction --- youtube_dl/extractor/common.py | 59 +++++++++++++++++++-------- youtube_dl/extractor/dramafever.py | 3 +- youtube_dl/extractor/go.py | 3 +- youtube_dl/extractor/itv.py | 4 +- youtube_dl/extractor/nrk.py | 4 +- youtube_dl/extractor/ondemandkorea.py | 3 +- youtube_dl/extractor/pbs.py | 5 ++- youtube_dl/extractor/srgssr.py | 6 ++- youtube_dl/extractor/svt.py | 4 +- youtube_dl/extractor/vbox7.py | 3 +- youtube_dl/extractor/vgtv.py | 5 ++- youtube_dl/extractor/viki.py | 2 +- youtube_dl/utils.py | 2 +- 13 files changed, 72 insertions(+), 31 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 6eb6a25b8..272da74b6 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -323,10 +323,15 @@ class InfoExtractor(object): _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. - _BYPASS_GEO attribute may be set to False in order to disable + _GEO_BYPASS attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on - country code provided with geo_bypass_country. + country code provided with geo_bypass_country. (experimental) + + _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted + countries for this extractor. One of these countries will be used by + geo restriction bypass mechanism right away in order to bypass + geo restriction, of course, if the mechanism is not disabled. (experimental) Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. @@ -335,7 +340,8 @@ class InfoExtractor(object): _ready = False _downloader = None _x_forwarded_for_ip = None - _BYPASS_GEO = True + _GEO_BYPASS = True + _GEO_COUNTRIES = None _WORKING = True def __init__(self, downloader=None): @@ -370,14 +376,28 @@ class InfoExtractor(object): def initialize(self): """Initializes an instance (authentication, etc).""" - if not self._x_forwarded_for_ip: - country_code = self._downloader.params.get('geo_bypass_country', None) - if country_code: - self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) + self.__initialize_geo_bypass() if not self._ready: self._real_initialize() self._ready = True + def __initialize_geo_bypass(self): + if not self._x_forwarded_for_ip: + country_code = self._downloader.params.get('geo_bypass_country', None) + # If there is no explicit country for geo bypass specified and + # the extractor is known to be geo restricted let's fake IP + # as X-Forwarded-For right away. + if (not country_code and + self._GEO_BYPASS and + self._downloader.params.get('geo_bypass', True) and + self._GEO_COUNTRIES): + country_code = random.choice(self._GEO_COUNTRIES) + if country_code: + self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) + if self._downloader.params.get('verbose', False): + self._downloader.to_stdout( + '[debug] Using fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) + def extract(self, url): """Extracts URL information and returns it in list of dicts.""" try: @@ -389,16 +409,8 @@ class InfoExtractor(object): ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip return ie_result except GeoRestrictedError as e: - if (not self._downloader.params.get('geo_bypass_country', None) and - self._BYPASS_GEO and - self._downloader.params.get('geo_bypass', True) and - not self._x_forwarded_for_ip and - e.countries): - self._x_forwarded_for_ip = GeoUtils.random_ipv4(random.choice(e.countries)) - if self._x_forwarded_for_ip: - self.report_warning( - 'Video is geo restricted. Retrying extraction with fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) - continue + if self.__maybe_fake_ip_and_retry(e.countries): + continue raise except ExtractorError: raise @@ -407,6 +419,19 @@ class InfoExtractor(object): except (KeyError, StopIteration) as e: raise ExtractorError('An extractor error has occurred.', cause=e) + def __maybe_fake_ip_and_retry(self, countries): + if (not self._downloader.params.get('geo_bypass_country', None) and + self._GEO_BYPASS and + self._downloader.params.get('geo_bypass', True) and + not self._x_forwarded_for_ip and + countries): + self._x_forwarded_for_ip = GeoUtils.random_ipv4(random.choice(countries)) + if self._x_forwarded_for_ip: + self.report_warning( + 'Video is geo restricted. Retrying extraction with fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) + return True + return False + def set_downloader(self, downloader): """Sets the downloader for this IE.""" self._downloader = downloader diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py index 755db806a..e7abc8889 100644 --- a/youtube_dl/extractor/dramafever.py +++ b/youtube_dl/extractor/dramafever.py @@ -20,6 +20,7 @@ from ..utils import ( class DramaFeverBaseIE(AMPIE): _LOGIN_URL = 'https://www.dramafever.com/accounts/login/' _NETRC_MACHINE = 'dramafever' + _GEO_COUNTRIES = ['US', 'CA'] _CONSUMER_SECRET = 'DA59dtVXYLxajktV' @@ -118,7 +119,7 @@ class DramaFeverIE(DramaFeverBaseIE): if isinstance(e.cause, compat_HTTPError): self.raise_geo_restricted( msg='Currently unavailable in your country', - countries=['US', 'CA']) + countries=self._GEO_COUNTRIES) raise series_id, episode_number = video_id.split('.') diff --git a/youtube_dl/extractor/go.py b/youtube_dl/extractor/go.py index ec902c670..b205bfc7c 100644 --- a/youtube_dl/extractor/go.py +++ b/youtube_dl/extractor/go.py @@ -37,6 +37,7 @@ class GoIE(AdobePassIE): } } _VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys()) + _GEO_COUNTRIES = ['US'] _TESTS = [{ 'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx', 'info_dict': { @@ -104,7 +105,7 @@ class GoIE(AdobePassIE): for error in errors: if error.get('code') == 1002: self.raise_geo_restricted( - error['message'], countries=['US']) + error['message'], countries=self._GEO_COUNTRIES) error_message = ', '.join([error['message'] for error in errors]) raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) asset_url += '?' + entitlement['uplynkData']['sessionKey'] diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py index aabde15f3..021c6b278 100644 --- a/youtube_dl/extractor/itv.py +++ b/youtube_dl/extractor/itv.py @@ -24,6 +24,7 @@ from ..utils import ( class ITVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?itv\.com/hub/[^/]+/(?P<id>[0-9a-zA-Z]+)' + _GEO_COUNTRIES = ['GB'] _TEST = { 'url': 'http://www.itv.com/hub/mr-bean-animated-series/2a2936a0053', 'info_dict': { @@ -101,7 +102,8 @@ class ITVIE(InfoExtractor): fault_code = xpath_text(resp_env, './/faultcode') fault_string = xpath_text(resp_env, './/faultstring') if fault_code == 'InvalidGeoRegion': - self.raise_geo_restricted(msg=fault_string, countries=['GB']) + self.raise_geo_restricted( + msg=fault_string, countries=self._GEO_COUNTRIES) raise ExtractorError('%s said: %s' % (self.IE_NAME, fault_string)) title = xpath_text(playlist, 'EpisodeTitle', fatal=True) video_element = xpath_element(playlist, 'VideoEntries/Video', fatal=True) diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index 78ece33e1..13af9ed1f 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -14,6 +14,7 @@ from ..utils import ( class NRKBaseIE(InfoExtractor): + _GEO_COUNTRIES = ['NO'] def _real_extract(self, url): video_id = self._match_id(url) @@ -93,7 +94,8 @@ class NRKBaseIE(InfoExtractor): # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked* if 'IsGeoBlocked' in message_type: self.raise_geo_restricted( - msg=MESSAGES.get('ProgramIsGeoBlocked'), countries=['NO']) + msg=MESSAGES.get('ProgramIsGeoBlocked'), + countries=self._GEO_COUNTRIES) raise ExtractorError( '%s said: %s' % (self.IE_NAME, MESSAGES.get( message_type, message_type)), diff --git a/youtube_dl/extractor/ondemandkorea.py b/youtube_dl/extractor/ondemandkorea.py index 0c85d549e..df1ce3c1d 100644 --- a/youtube_dl/extractor/ondemandkorea.py +++ b/youtube_dl/extractor/ondemandkorea.py @@ -10,6 +10,7 @@ from ..utils import ( class OnDemandKoreaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ondemandkorea\.com/(?P<id>[^/]+)\.html' + _GEO_COUNTRIES = ['US', 'CA'] _TEST = { 'url': 'http://www.ondemandkorea.com/ask-us-anything-e43.html', 'info_dict': { @@ -36,7 +37,7 @@ class OnDemandKoreaIE(InfoExtractor): if 'msg_block_01.png' in webpage: self.raise_geo_restricted( msg='This content is not available in your region', - countries=['US', 'CA']) + countries=self._GEO_COUNTRIES) if 'This video is only available to ODK PLUS members.' in webpage: raise ExtractorError( diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py index 64f47bae3..3e51b4dd7 100644 --- a/youtube_dl/extractor/pbs.py +++ b/youtube_dl/extractor/pbs.py @@ -193,6 +193,8 @@ class PBSIE(InfoExtractor): ) ''' % '|'.join(list(zip(*_STATIONS))[0]) + _GEO_COUNTRIES = ['US'] + _TESTS = [ { 'url': 'http://www.pbs.org/tpt/constitution-usa-peter-sagal/watch/a-more-perfect-union/', @@ -492,7 +494,8 @@ class PBSIE(InfoExtractor): message = self._ERRORS.get( redirect_info['http_code'], redirect_info['message']) if redirect_info['http_code'] == 403: - self.raise_geo_restricted(msg=message, countries=['US']) + self.raise_geo_restricted( + msg=message, countries=self._GEO_COUNTRIES) raise ExtractorError( '%s said: %s' % (self.IE_NAME, message), expected=True) diff --git a/youtube_dl/extractor/srgssr.py b/youtube_dl/extractor/srgssr.py index a35a0a538..bb73eb1d5 100644 --- a/youtube_dl/extractor/srgssr.py +++ b/youtube_dl/extractor/srgssr.py @@ -14,7 +14,8 @@ from ..utils import ( class SRGSSRIE(InfoExtractor): _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)' - _BYPASS_GEO = False + _GEO_BYPASS = False + _GEO_COUNTRIES = ['CH'] _ERRORS = { 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.', @@ -43,7 +44,8 @@ class SRGSSRIE(InfoExtractor): if media_data.get('block') and media_data['block'] in self._ERRORS: message = self._ERRORS[media_data['block']] if media_data['block'] == 'GEOBLOCK': - self.raise_geo_restricted(msg=message, countries=['CH']) + self.raise_geo_restricted( + msg=message, countries=self._GEO_COUNTRIES) raise ExtractorError( '%s said: %s' % (self.IE_NAME, message), expected=True) diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py index f2a2200bf..9e2c9fcc6 100644 --- a/youtube_dl/extractor/svt.py +++ b/youtube_dl/extractor/svt.py @@ -13,6 +13,7 @@ from ..utils import ( class SVTBaseIE(InfoExtractor): + _GEO_COUNTRIES = ['SE'] def _extract_video(self, video_info, video_id): formats = [] for vr in video_info['videoReferences']: @@ -39,7 +40,8 @@ class SVTBaseIE(InfoExtractor): }) if not formats and video_info.get('rights', {}).get('geoBlockedSweden'): self.raise_geo_restricted( - 'This video is only available in Sweden', countries=['SE']) + 'This video is only available in Sweden', + countries=self._GEO_COUNTRIES) self._sort_formats(formats) subtitles = {} diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py index f86d804c1..8152acefd 100644 --- a/youtube_dl/extractor/vbox7.py +++ b/youtube_dl/extractor/vbox7.py @@ -20,6 +20,7 @@ class Vbox7IE(InfoExtractor): ) (?P<id>[\da-fA-F]+) ''' + _GEO_COUNTRIES = ['BG'] _TESTS = [{ 'url': 'http://vbox7.com/play:0946fff23c', 'md5': 'a60f9ab3a3a2f013ef9a967d5f7be5bf', @@ -78,7 +79,7 @@ class Vbox7IE(InfoExtractor): video_url = video['src'] if '/na.mp4' in video_url: - self.raise_geo_restricted(countries=['BG']) + self.raise_geo_restricted(countries=self._GEO_COUNTRIES) uploader = video.get('uploader') diff --git a/youtube_dl/extractor/vgtv.py b/youtube_dl/extractor/vgtv.py index 1709fd6bb..0f8c156a7 100644 --- a/youtube_dl/extractor/vgtv.py +++ b/youtube_dl/extractor/vgtv.py @@ -14,7 +14,7 @@ from ..utils import ( class VGTVIE(XstreamIE): IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet' - _BYPASS_GEO = False + _GEO_BYPASS = False _HOST_TO_APPNAME = { 'vgtv.no': 'vgtv', @@ -218,7 +218,8 @@ class VGTVIE(XstreamIE): properties = try_get( data, lambda x: x['streamConfiguration']['properties'], list) if properties and 'geoblocked' in properties: - raise self.raise_geo_restricted(countries=['NO']) + raise self.raise_geo_restricted( + countries=[host.rpartition('.')[-1].partition('/')[0].upper()]) self._sort_formats(info['formats']) diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py index 68a74e246..e9c8bf824 100644 --- a/youtube_dl/extractor/viki.py +++ b/youtube_dl/extractor/viki.py @@ -27,7 +27,7 @@ class VikiBaseIE(InfoExtractor): _APP_VERSION = '2.2.5.1428709186' _APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)' - _BYPASS_GEO = False + _GEO_BYPASS = False _NETRC_MACHINE = 'viki' _token = None diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index cbf7639c5..17b83794a 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3291,7 +3291,7 @@ class GeoUtils(object): addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( - compat_struct_pack('!I', random.randint(addr_min, addr_max)))) + compat_struct_pack('!L', random.randint(addr_min, addr_max)))) class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): From 0aa10994f452b4ca978baf124df0cb2239d49305 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 03:58:17 +0700 Subject: [PATCH 068/225] [options] Move geo restriction related options to separate section --- youtube_dl/options.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index ae3f50754..2c880d06a 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -228,17 +228,29 @@ def parseOpts(overrideArguments=None): action='store_const', const='::', dest='source_address', help='Make all connections via IPv6', ) - network.add_option( + + geo = optparse.OptionGroup(parser, 'Geo Restriction') + geo.add_option( '--geo-verification-proxy', dest='geo_verification_proxy', default=None, metavar='URL', help='Use this proxy to verify the IP address for some geo-restricted sites. ' - 'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading.' - ) - network.add_option( + 'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading.') + geo.add_option( '--cn-verification-proxy', dest='cn_verification_proxy', default=None, metavar='URL', - help=optparse.SUPPRESS_HELP, - ) + help=optparse.SUPPRESS_HELP) + geo.add_option( + '--geo-bypass', + action='store_true', dest='geo_bypass', default=True, + help='Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') + geo.add_option( + '--no-geo-bypass', + action='store_false', dest='geo_bypass', default=True, + help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') + geo.add_option( + '--geo-bypass-country', metavar='CODE', + dest='geo_bypass_country', default=None, + help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)') selection = optparse.OptionGroup(parser, 'Video Selection') selection.add_option( @@ -549,18 +561,6 @@ def parseOpts(overrideArguments=None): 'Upper bound of a range for randomized sleep before each download ' '(maximum possible number of seconds to sleep). Must only be used ' 'along with --min-sleep-interval.')) - workarounds.add_option( - '--geo-bypass', - action='store_true', dest='geo_bypass', default=True, - help='Bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') - workarounds.add_option( - '--no-geo-bypass', - action='store_false', dest='geo_bypass', default=True, - help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header (experimental)') - workarounds.add_option( - '--geo-bypass-country', metavar='CODE', - dest='geo_bypass_country', default=None, - help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code (experimental)') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity.add_option( @@ -848,6 +848,7 @@ def parseOpts(overrideArguments=None): parser.add_option_group(general) parser.add_option_group(network) + parser.add_option_group(geo) parser.add_option_group(selection) parser.add_option_group(downloader) parser.add_option_group(filesystem) From 553f6dbac7afac84994eae18f551799f807d1503 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 04:18:22 +0700 Subject: [PATCH 069/225] [downloader/dash] Honor HTTP headers when downloading fragments For example, https://www.oppetarkiv.se/video/1196142/natten-ar-dagens-mor --- youtube_dl/downloader/dash.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py index 8437dde30..e2ddc369e 100644 --- a/youtube_dl/downloader/dash.py +++ b/youtube_dl/downloader/dash.py @@ -43,7 +43,10 @@ class DashSegmentsFD(FragmentFD): count = 0 while count <= fragment_retries: try: - success = ctx['dl'].download(target_filename, {'url': segment_url}) + success = ctx['dl'].download(target_filename, { + 'url': segment_url, + 'http_headers': info_dict.get('http_headers'), + }) if not success: return False down, target_sanitized = sanitize_open(target_filename, 'rb') From de64e23c5663ceb4f62264077a7993d13ace0d6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 04:18:36 +0700 Subject: [PATCH 070/225] [downloader/ism] Honor HTTP headers when downloading fragments --- youtube_dl/downloader/ism.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/downloader/ism.py b/youtube_dl/downloader/ism.py index 93cac5e98..63a636cb7 100644 --- a/youtube_dl/downloader/ism.py +++ b/youtube_dl/downloader/ism.py @@ -238,7 +238,10 @@ class IsmFD(FragmentFD): count = 0 while count <= fragment_retries: try: - success = ctx['dl'].download(target_filename, {'url': segment_url}) + success = ctx['dl'].download(target_filename, { + 'url': segment_url, + 'http_headers': info_dict.get('http_headers'), + }) if not success: return False down, target_sanitized = sanitize_open(target_filename, 'rb') From f1a78ee4ef3bfd8e7ff06a3014d96c3cf11b4d9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 06:16:00 +0700 Subject: [PATCH 071/225] [tv4] Switch to hls3 protocol (closes #12177) --- youtube_dl/extractor/tv4.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/tv4.py b/youtube_dl/extractor/tv4.py index ad79db92b..7918e3d86 100644 --- a/youtube_dl/extractor/tv4.py +++ b/youtube_dl/extractor/tv4.py @@ -80,7 +80,7 @@ class TV4IE(InfoExtractor): subtitles = {} formats = [] # http formats are linked with unresolvable host - for kind in ('hls', ''): + for kind in ('hls3', ''): data = self._download_json( 'https://prima.tv4play.se/api/web/asset/%s/play.json' % video_id, video_id, 'Downloading sources JSON', query={ From c58b7ffef43f60fa6a183c849cfdca42e36eae0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 19 Feb 2017 06:24:38 +0700 Subject: [PATCH 072/225] [tv4] Bypass geo restriction and improve detection --- youtube_dl/extractor/tv4.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/tv4.py b/youtube_dl/extractor/tv4.py index 7918e3d86..7aeb2c620 100644 --- a/youtube_dl/extractor/tv4.py +++ b/youtube_dl/extractor/tv4.py @@ -24,6 +24,7 @@ class TV4IE(InfoExtractor): sport/| ) )(?P<id>[0-9]+)''' + _GEO_COUNTRIES = ['SE'] _TESTS = [ { 'url': 'http://www.tv4.se/kalla-fakta/klipp/kalla-fakta-5-english-subtitles-2491650', @@ -71,10 +72,6 @@ class TV4IE(InfoExtractor): 'http://www.tv4play.se/player/assets/%s.json' % video_id, video_id, 'Downloading video info JSON') - # If is_geo_restricted is true, it doesn't necessarily mean we can't download it - if info.get('is_geo_restricted'): - self.report_warning('This content might not be available in your country due to licensing restrictions.') - title = info['title'] subtitles = {} @@ -113,6 +110,10 @@ class TV4IE(InfoExtractor): 'url': manifest_url, 'ext': 'vtt', }]}) + + if not formats and info.get('is_geo_restricted'): + self.raise_geo_restricted(countries=self._GEO_COUNTRIES) + self._sort_formats(formats) return { From 8936f68a0ba3284c88ec619fb4cc22eb0499e7f3 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Wed, 21 Oct 2015 00:37:28 +0800 Subject: [PATCH 073/225] [travis] Run tests in parallel [test_download] Print test names in case of network errors [test_download] Add comments for nose parameters [test_download] Modify outtmpl to prevent info JSON filename conflicts Thanks @jaimeMF for the idea. [travis] Only download tests should be run in parallel --- devscripts/run_tests.sh | 4 +++- test/test_download.py | 11 ++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/devscripts/run_tests.sh b/devscripts/run_tests.sh index 7f4c1e083..c60807215 100755 --- a/devscripts/run_tests.sh +++ b/devscripts/run_tests.sh @@ -3,6 +3,7 @@ DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter" test_set="" +multiprocess_args="" case "$YTDL_TEST_SET" in core) @@ -10,10 +11,11 @@ case "$YTDL_TEST_SET" in ;; download) test_set="-I test_(?!$DOWNLOAD_TESTS).+\.py" + multiprocess_args="--processes=4 --process-timeout=540" ;; *) break ;; esac -nosetests test --verbose $test_set +nosetests test --verbose $test_set $multiprocess_args diff --git a/test/test_download.py b/test/test_download.py index 463952989..30034f978 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -65,6 +65,10 @@ defs = gettestcases() class TestDownload(unittest.TestCase): + # Parallel testing in nosetests. See + # http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html + _multiprocess_shared_ = True + maxDiff = None def setUp(self): @@ -73,7 +77,7 @@ class TestDownload(unittest.TestCase): # Dynamically generate tests -def generator(test_case): +def generator(test_case, tname): def test_template(self): ie = youtube_dl.extractor.get_info_extractor(test_case['name']) @@ -102,6 +106,7 @@ def generator(test_case): return params = get_params(test_case.get('params', {})) + params['outtmpl'] = tname + '_' + params['outtmpl'] if is_playlist and 'playlist' not in test_case: params.setdefault('extract_flat', 'in_playlist') params.setdefault('skip_download', True) @@ -146,7 +151,7 @@ def generator(test_case): raise if try_num == RETRIES: - report_warning('Failed due to network errors, skipping...') + report_warning('%s failed due to network errors, skipping...' % tname) return print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num)) @@ -221,12 +226,12 @@ def generator(test_case): # And add them to TestDownload for n, test_case in enumerate(defs): - test_method = generator(test_case) tname = 'test_' + str(test_case['name']) i = 1 while hasattr(TestDownload, tname): tname = 'test_%s_%d' % (test_case['name'], i) i += 1 + test_method = generator(test_case, tname) test_method.__name__ = str(tname) setattr(TestDownload, test_method.__name__, test_method) del test_method From 983e9b774643fc588fbfb51d314381025ffac248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 00:59:31 +0700 Subject: [PATCH 074/225] [nrk] Update _API_HOST and relax _VALID_URL --- youtube_dl/extractor/nrk.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index 13af9ed1f..7b98626f2 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -164,12 +164,12 @@ class NRKIE(NRKBaseIE): https?:// (?: (?:www\.)?nrk\.no/video/PS\*| - v8-psapi\.nrk\.no/mediaelement/ + v8[-.]psapi\.nrk\.no/mediaelement/ ) ) - (?P<id>[^/?#&]+) + (?P<id>[^?#&]+) ''' - _API_HOST = 'v8.psapi.nrk.no' + _API_HOST = 'v8-psapi.nrk.no' _TESTS = [{ # video 'url': 'http://www.nrk.no/video/PS*150533', @@ -195,6 +195,9 @@ class NRKIE(NRKBaseIE): }, { 'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9', 'only_matching': True, + }, { + 'url': 'nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70', + 'only_matching': True, }, { 'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9', 'only_matching': True, From 8ffb8e63fe2853f9e51420ba224db428f1241c35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 01:00:53 +0700 Subject: [PATCH 075/225] [prosiebensat1] Throw ExtractionError on unsupported page type (closes #12180) --- youtube_dl/extractor/prosiebensat1.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py index 5091d8456..1245309a7 100644 --- a/youtube_dl/extractor/prosiebensat1.py +++ b/youtube_dl/extractor/prosiebensat1.py @@ -424,3 +424,6 @@ class ProSiebenSat1IE(ProSiebenSat1BaseIE): return self._extract_clip(url, webpage) elif page_type == 'playlist': return self._extract_playlist(url, webpage) + else: + raise ExtractorError( + 'Unsupported page type %s' % page_type, expected=True) From c78dd3549155d4cb8f70707c1b4085f9f974db2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 02:25:39 +0700 Subject: [PATCH 076/225] [nrk] PEP 8 --- youtube_dl/extractor/nrk.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index 7b98626f2..7fe79cb53 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -15,6 +15,7 @@ from ..utils import ( class NRKBaseIE(InfoExtractor): _GEO_COUNTRIES = ['NO'] + def _real_extract(self, url): video_id = self._match_id(url) From 6d4c259765de86bdb8a10e71bfbc7b6e196f6967 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 02:25:55 +0700 Subject: [PATCH 077/225] [svt] PEP 8 --- youtube_dl/extractor/svt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py index 9e2c9fcc6..1b5afb73e 100644 --- a/youtube_dl/extractor/svt.py +++ b/youtube_dl/extractor/svt.py @@ -14,6 +14,7 @@ from ..utils import ( class SVTBaseIE(InfoExtractor): _GEO_COUNTRIES = ['SE'] + def _extract_video(self, video_info, video_id): formats = [] for vr in video_info['videoReferences']: From 2cc7fcd338e8690a5c211b95fb9e0dcdc5d98ef5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 03:06:52 +0700 Subject: [PATCH 078/225] [commonmistakes] Disable UnicodeBOM extractor test for python 3.2 --- youtube_dl/extractor/commonmistakes.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/commonmistakes.py b/youtube_dl/extractor/commonmistakes.py index d3ed4a9a4..79f7a9cd1 100644 --- a/youtube_dl/extractor/commonmistakes.py +++ b/youtube_dl/extractor/commonmistakes.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import sys + from .common import InfoExtractor from ..utils import ExtractorError @@ -33,7 +35,9 @@ class UnicodeBOMIE(InfoExtractor): IE_DESC = False _VALID_URL = r'(?P<bom>\ufeff)(?P<id>.*)$' - _TESTS = [{ + # Disable test for python 3.2 since BOM is broken in re in this version + # (see https://github.com/rg3/youtube-dl/issues/9751) + _TESTS = [] if (3, 0) < sys.version_info <= (3, 3) else [{ 'url': '\ufeffhttp://www.youtube.com/watch?v=BaW_jenozKc', 'only_matching': True, }] From 82f662182b9ade630b37af81ebf8ae7ae6468898 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 23:16:14 +0700 Subject: [PATCH 079/225] [iprima] Modernize --- youtube_dl/extractor/iprima.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index 0fe576883..7afa2def0 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -8,7 +8,6 @@ from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, - sanitized_Request, ) @@ -38,11 +37,13 @@ class IPrimaIE(InfoExtractor): video_id = self._search_regex(r'data-product="([^"]+)">', webpage, 'real id') - req = sanitized_Request( - 'http://play.iprima.cz/prehravac/init?_infuse=1' - '&_ts=%s&productId=%s' % (round(time.time()), video_id)) - req.add_header('Referer', url) - playerpage = self._download_webpage(req, video_id, note='Downloading player') + playerpage = self._download_webpage( + 'http://play.iprima.cz/prehravac/init', + video_id, note='Downloading player', query={ + '_infuse': 1, + '_ts': round(time.time()), + 'productId': video_id, + }, headers={'Referer': url}) formats = [] From da42ff066811490064e0c3039b9db5c0e9a69f58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 23:17:19 +0700 Subject: [PATCH 080/225] [iprima] Improve geo restriction detection and disable geo bypass --- youtube_dl/extractor/iprima.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index 7afa2def0..a29e6a5ba 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -13,6 +13,7 @@ from ..utils import ( class IPrimaIE(InfoExtractor): _VALID_URL = r'https?://play\.iprima\.cz/(?:.+/)?(?P<id>[^?#]+)' + _GEO_BYPASS = False _TESTS = [{ 'url': 'http://play.iprima.cz/gondici-s-r-o-33', @@ -28,6 +29,10 @@ class IPrimaIE(InfoExtractor): }, { 'url': 'http://play.iprima.cz/particka/particka-92', 'only_matching': True, + }, { + # geo restricted + 'url': 'http://play.iprima.cz/closer-nove-pripady/closer-nove-pripady-iv-1', + 'only_matching': True, }] def _real_extract(self, url): @@ -83,7 +88,7 @@ class IPrimaIE(InfoExtractor): extract_formats(src) if not formats and '>GEO_IP_NOT_ALLOWED<' in playerpage: - self.raise_geo_restricted() + self.raise_geo_restricted(countries=['CZ']) self._sort_formats(formats) From 3ccdde8cb76cacb7b2b64469ca51d3b1877da1f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Mon, 20 Feb 2017 23:21:15 +0700 Subject: [PATCH 081/225] [extractor/common] Emphasize geo bypass APIs are experimental --- youtube_dl/extractor/common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 272da74b6..1ae264722 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -333,6 +333,9 @@ class InfoExtractor(object): geo restriction bypass mechanism right away in order to bypass geo restriction, of course, if the mechanism is not disabled. (experimental) + NB: both these geo attributes are experimental and may change in future + or be completely removed. + Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ From 6926304472d4598f095abc7115ca0f36068271d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 00:54:43 +0700 Subject: [PATCH 082/225] [spankbang] Make uploader optional (closes #12193) --- youtube_dl/extractor/spankbang.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/spankbang.py b/youtube_dl/extractor/spankbang.py index 123c33ac3..3394c7e6b 100644 --- a/youtube_dl/extractor/spankbang.py +++ b/youtube_dl/extractor/spankbang.py @@ -23,6 +23,10 @@ class SpankBangIE(InfoExtractor): # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, + }, { + # no uploader + 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', + 'only_matching': True, }] def _real_extract(self, url): @@ -48,7 +52,7 @@ class SpankBangIE(InfoExtractor): thumbnail = self._og_search_thumbnail(webpage) uploader = self._search_regex( r'class="user"[^>]*><img[^>]+>([^<]+)', - webpage, 'uploader', fatal=False) + webpage, 'uploader', default=None) age_limit = self._rta_search(webpage) From 890d44b005c3073442064a847f2e0204619a8b47 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Mon, 20 Feb 2017 19:00:06 +0100 Subject: [PATCH 083/225] [adobepass] add support for Time Warner Cable(closes #12191) --- youtube_dl/extractor/adobepass.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/youtube_dl/extractor/adobepass.py b/youtube_dl/extractor/adobepass.py index 12eeab271..4d655bd5e 100644 --- a/youtube_dl/extractor/adobepass.py +++ b/youtube_dl/extractor/adobepass.py @@ -31,6 +31,11 @@ MSO_INFO = { 'username_field': 'user', 'password_field': 'passwd', }, + 'TWC': { + 'name': 'Time Warner Cable | Spectrum', + 'username_field': 'Ecom_User_ID', + 'password_field': 'Ecom_Password', + }, 'thr030': { 'name': '3 Rivers Communications' }, From e469ab25280433781881d0c3ea6fd423ac5fea71 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 14:38:00 +0100 Subject: [PATCH 084/225] [ninecninemedia] use geo bypass mechanism --- youtube_dl/extractor/ninecninemedia.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/ninecninemedia.py b/youtube_dl/extractor/ninecninemedia.py index ec4d675e2..d9943fc2c 100644 --- a/youtube_dl/extractor/ninecninemedia.py +++ b/youtube_dl/extractor/ninecninemedia.py @@ -19,6 +19,7 @@ class NineCNineMediaBaseIE(InfoExtractor): class NineCNineMediaStackIE(NineCNineMediaBaseIE): IE_NAME = '9c9media:stack' + _GEO_COUNTRIES = ['CA'] _VALID_URL = r'9c9media:stack:(?P<destination_code>[^:]+):(?P<content_id>\d+):(?P<content_package>\d+):(?P<id>\d+)' def _real_extract(self, url): From e39b5d4ab83de7a466c6d4c9528d385758566b22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:00:43 +0700 Subject: [PATCH 085/225] [extractor/common] Allow calling _initialize_geo_bypass from extractors (#11970) --- youtube_dl/extractor/common.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 1ae264722..86aff3312 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -379,12 +379,31 @@ class InfoExtractor(object): def initialize(self): """Initializes an instance (authentication, etc).""" - self.__initialize_geo_bypass() + self._initialize_geo_bypass(self._GEO_COUNTRIES) if not self._ready: self._real_initialize() self._ready = True - def __initialize_geo_bypass(self): + def _initialize_geo_bypass(self, countries): + """ + Initialize geo restriction bypass mechanism. + + This method is used to initialize geo bypass mechanism based on faking + X-Forwarded-For HTTP header. A random country from provided country list + is selected and a random IP brlonging to this country is generated. This + IP will be passed as X-Forwarded-For HTTP header in all subsequent + HTTP requests. + Method does nothing if no countries are specified. + + This method will be used for initial geo bypass mechanism initialization + during the instance initialization with _GEO_COUNTRIES. + + You may also manually call it from extractor's code if geo countries + information is not available beforehand (e.g. obtained during + extraction) or due to some another reason. + """ + if not countries: + return if not self._x_forwarded_for_ip: country_code = self._downloader.params.get('geo_bypass_country', None) # If there is no explicit country for geo bypass specified and @@ -393,8 +412,8 @@ class InfoExtractor(object): if (not country_code and self._GEO_BYPASS and self._downloader.params.get('geo_bypass', True) and - self._GEO_COUNTRIES): - country_code = random.choice(self._GEO_COUNTRIES) + countries): + country_code = random.choice(countries) if country_code: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._downloader.params.get('verbose', False): From dc0a869e5ee7a75218a759706bb11f17c4de6b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:05:31 +0700 Subject: [PATCH 086/225] [extractor/common] Fix typo --- youtube_dl/extractor/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 86aff3312..6d4789d96 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -390,7 +390,7 @@ class InfoExtractor(object): This method is used to initialize geo bypass mechanism based on faking X-Forwarded-For HTTP header. A random country from provided country list - is selected and a random IP brlonging to this country is generated. This + is selected and a random IP belonging to this country is generated. This IP will be passed as X-Forwarded-For HTTP header in all subsequent HTTP requests. Method does nothing if no countries are specified. From 336a76551b92db1c040cbf3c4a9b1857e125ad45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:09:41 +0700 Subject: [PATCH 087/225] [extractor/common] Do not quit _initialize_geo_bypass on empty countries --- youtube_dl/extractor/common.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 6d4789d96..a34fbbc9b 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -393,7 +393,6 @@ class InfoExtractor(object): is selected and a random IP belonging to this country is generated. This IP will be passed as X-Forwarded-For HTTP header in all subsequent HTTP requests. - Method does nothing if no countries are specified. This method will be used for initial geo bypass mechanism initialization during the instance initialization with _GEO_COUNTRIES. @@ -402,8 +401,6 @@ class InfoExtractor(object): information is not available beforehand (e.g. obtained during extraction) or due to some another reason. """ - if not countries: - return if not self._x_forwarded_for_ip: country_code = self._downloader.params.get('geo_bypass_country', None) # If there is no explicit country for geo bypass specified and From eea0716cae1290fe08faea89e24a58ec91098638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:14:33 +0700 Subject: [PATCH 088/225] [extractor/common] Print origin country for fake IP --- youtube_dl/extractor/common.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index a34fbbc9b..4252d6825 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -415,7 +415,8 @@ class InfoExtractor(object): self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._downloader.params.get('verbose', False): self._downloader.to_stdout( - '[debug] Using fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) + '[debug] Using fake IP %s (%s) as X-Forwarded-For.' + % (self._x_forwarded_for_ip, country_code.upper())) def extract(self, url): """Extracts URL information and returns it in list of dicts.""" @@ -444,10 +445,12 @@ class InfoExtractor(object): self._downloader.params.get('geo_bypass', True) and not self._x_forwarded_for_ip and countries): - self._x_forwarded_for_ip = GeoUtils.random_ipv4(random.choice(countries)) + country_code = random.choice(countries) + self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code) if self._x_forwarded_for_ip: self.report_warning( - 'Video is geo restricted. Retrying extraction with fake %s IP as X-Forwarded-For.' % self._x_forwarded_for_ip) + 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.' + % (self._x_forwarded_for_ip, country_code.upper())) return True return False From 159aaaa9d09ce5843ec843d6e10030e229709e17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:46:58 +0700 Subject: [PATCH 089/225] [ChangeLog] Actualize --- ChangeLog | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/ChangeLog b/ChangeLog index 2c90f791d..2b02994e0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,49 @@ +version <unreleased> + +Core +* [extractor/common] Allow calling _initialize_geo_bypass from extractors + (#11970) ++ [adobepass] Add support for Time Warner Cable (#12191) ++ [travis] Run tests in parallel ++ [downloader/ism] Honor HTTP headers when downloading fragments ++ [downloader/dash] Honor HTTP headers when downloading fragments ++ [utils] Add GeoUtils class for working with geo tools and GeoUtils.random_ipv4 ++ Add option --geo-bypass-country for explicit geo bypass on behalf of + specified country ++ Add options to control geo bypass mechanism --geo-bypass and --no-geo-bypass ++ Add experimental geo restriction bypass mechanism based on faking + X-Forwarded-For HTTP header ++ [utils] Introduce GeoRestrictedError for geo restricted videos ++ [utils] Introduce YoutubeDLError base class for all youtube-dl exceptions + +Extractors ++ [ninecninemedia] Use geo bypass mechanism +* [spankbang] Make uploader optional (#12193) ++ [iprima] Improve geo restriction detection and disable geo bypass +* [iprima] Modernize +* [commonmistakes] Disable UnicodeBOM extractor test for python 3.2 ++ [prosiebensat1] Throw ExtractionError on unsupported page type (#12180) +* [nrk] Update _API_HOST and relax _VALID_URL ++ [tv4] Bypass geo restriction and improve detection +* [tv4] Switch to hls3 protocol (#12177) ++ [viki] Improve geo restriction detection ++ [vgtv] Improve geo restriction detection ++ [srgssr] Improve geo restriction detection ++ [vbox7] Improve geo restriction detection and use geo bypass mechanism ++ [svt] Improve geo restriction detection and use geo bypass mechanism ++ [pbs] Improve geo restriction detection and use geo bypass mechanism ++ [ondemandkorea] Improve geo restriction detection and use geo bypass mechanism ++ [nrk] Improve geo restriction detection and use geo bypass mechanism ++ [itv] Improve geo restriction detection and use geo bypass mechanism ++ [go] Improve geo restriction detection and use geo bypass mechanism ++ [dramafever] Improve geo restriction detection and use geo bypass mechanism +* [brightcove:legacy] Restrict videoPlayer value (#12040) ++ [tvn24] Add support for tvn24.pl and tvn24bis.pl (#11679) ++ [thisav] Add support for HTML5 media (#11771) +* [metacafe] Bypass family filter (#10371) +* [viceland] Improve info extraction + + version 2017.02.17 Extractors From 8c6c88c7dae595d5cb7d5926eb00fbaf40103f8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Tue, 21 Feb 2017 23:48:24 +0700 Subject: [PATCH 090/225] release 2017.02.21 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- README.md | 34 +++++++++++++++++++++++----------- docs/supportedsites.md | 1 + youtube_dl/version.py | 2 +- 5 files changed, 29 insertions(+), 16 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 6f1361b32..8b6d14fa2 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.17*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.17** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.21*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.21** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.17 +[debug] youtube-dl version 2017.02.21 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 2b02994e0..a479d274f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.21 Core * [extractor/common] Allow calling _initialize_geo_bypass from extractors diff --git a/README.md b/README.md index c2a1a6b02..1eccfd287 100644 --- a/README.md +++ b/README.md @@ -99,11 +99,21 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo --source-address IP Client-side IP address to bind to -4, --force-ipv4 Make all connections via IPv4 -6, --force-ipv6 Make all connections via IPv6 + +## Geo Restriction: --geo-verification-proxy URL Use this proxy to verify the IP address for some geo-restricted sites. The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. + --geo-bypass Bypass geographic restriction via faking + X-Forwarded-For HTTP header (experimental) + --no-geo-bypass Do not bypass geographic restriction via + faking X-Forwarded-For HTTP header + (experimental) + --geo-bypass-country CODE Force bypass geographic restriction with + explicitly provided two-letter ISO 3166-2 + country code (experimental) ## Video Selection: --playlist-start NUMBER Playlist video to start at (default is 1) @@ -140,17 +150,19 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo check if the key is not present, key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare - against a number, and & to require multiple - matches. Values which are not known are - excluded unless you put a question mark (?) - after the operator. For example, to only - match videos that have been liked more than - 100 times and disliked less than 50 times - (or the dislike functionality is not - available at the given service), but who - also have a description, use --match-filter - "like_count > 100 & dislike_count <? 50 & - description" . + against a number, key = 'LITERAL' (like + "uploader = 'Mike Smith'", also works with + !=) to match against a string literal and & + to require multiple matches. Values which + are not known are excluded unless you put a + question mark (?) after the operator. For + example, to only match videos that have + been liked more than 100 times and disliked + less than 50 times (or the dislike + functionality is not available at the given + service), but who also have a description, + use --match-filter "like_count > 100 & + dislike_count <? 50 & description" . --no-playlist Download only the video, if the URL refers to a video and a playlist. --yes-playlist Download the playlist, if the URL refers to diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 5a436e8f7..1eb9c2cdd 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -804,6 +804,7 @@ - **TVCArticle** - **tvigle**: Интернет-телевидение Tvigle.ru - **tvland.com** + - **TVN24** - **TVNoe** - **tvp**: Telewizja Polska - **tvp:embed**: Telewizja Polska diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 530e1856b..a85aebaa3 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.17' +__version__ = '2017.02.21' From 3444844b04ae482edc5a353d9125b45ba47cd8d8 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 17:47:14 +0100 Subject: [PATCH 091/225] [limelight] extract PlaylistService errors --- youtube_dl/extractor/limelight.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py index a3712665b..422be2528 100644 --- a/youtube_dl/extractor/limelight.py +++ b/youtube_dl/extractor/limelight.py @@ -4,11 +4,13 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import compat_HTTPError from ..utils import ( determine_ext, float_or_none, int_or_none, unsmuggle_url, + ExtractorError, ) @@ -20,9 +22,17 @@ class LimelightBaseIE(InfoExtractor): headers = {} if referer: headers['Referer'] = referer - return self._download_json( - self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), - item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal, headers=headers) + try: + return self._download_json( + self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), + item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal, headers=headers) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: + error = self._parse_json(e.cause.read().decode(), item_id)['detail']['contentAccessPermission'] + if error == 'CountryDisabled': + self.raise_geo_restricted() + raise ExtractorError(error, expected=True) + raise def _call_api(self, organization_id, item_id, method): return self._download_json( @@ -213,6 +223,7 @@ class LimelightMediaIE(LimelightBaseIE): def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) + self._initialize_geo_bypass(smuggled_data.get('geo_countries')) pc, mobile, metadata = self._extract( video_id, 'getPlaylistByMediaId', From 33dc173cdc84efbc1f794033480af0e4af459891 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 17:50:36 +0100 Subject: [PATCH 092/225] [telequebec] use geo bypass mechanism --- youtube_dl/extractor/telequebec.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/telequebec.py b/youtube_dl/extractor/telequebec.py index 4043fcb92..82d73c31d 100644 --- a/youtube_dl/extractor/telequebec.py +++ b/youtube_dl/extractor/telequebec.py @@ -2,7 +2,10 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import ( + int_or_none, + smuggle_url, +) class TeleQuebecIE(InfoExtractor): @@ -28,7 +31,7 @@ class TeleQuebecIE(InfoExtractor): return { '_type': 'url_transparent', 'id': media_id, - 'url': 'limelight:media:' + media_data['streamInfo']['sourceId'], + 'url': smuggle_url('limelight:media:' + media_data['streamInfo']['sourceId'], {'geo_countries': ['CA']}), 'title': media_data['title'], 'description': media_data.get('descriptions', [{'text': None}])[0].get('text'), 'duration': int_or_none(media_data.get('durationInMilliseconds'), 1000), From 86466a8b6f313d0d8c80823e8e61215ac16046a5 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 17:50:53 +0100 Subject: [PATCH 093/225] [cbc] use geo bypass mechanism --- youtube_dl/extractor/cbc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/cbc.py b/youtube_dl/extractor/cbc.py index cf678e7f8..4d93c7744 100644 --- a/youtube_dl/extractor/cbc.py +++ b/youtube_dl/extractor/cbc.py @@ -195,6 +195,7 @@ class CBCPlayerIE(InfoExtractor): class CBCWatchBaseIE(InfoExtractor): _device_id = None _device_token = None + _GEO_COUNTRIES = ['CA'] _API_BASE_URL = 'https://api-cbc.cloud.clearleap.com/cloffice/client/' _NS_MAP = { 'media': 'http://search.yahoo.com/mrss/', From 7345d6d465c4889ae06672a11f8b6e491b0b7fe4 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 17:51:40 +0100 Subject: [PATCH 094/225] [tfo] Improve geo restriction detection and use geo bypass mechanism --- youtube_dl/extractor/tfo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/tfo.py b/youtube_dl/extractor/tfo.py index 6f1eeac57..0e2370cd8 100644 --- a/youtube_dl/extractor/tfo.py +++ b/youtube_dl/extractor/tfo.py @@ -8,10 +8,12 @@ from ..utils import ( HEADRequest, ExtractorError, int_or_none, + clean_html, ) class TFOIE(InfoExtractor): + _GEO_COUNTRIES = ['CA'] _VALID_URL = r'https?://(?:www\.)?tfo\.org/(?:en|fr)/(?:[^/]+/){2}(?P<id>\d+)' _TEST = { 'url': 'http://www.tfo.org/en/universe/tfo-247/100463871/video-game-hackathon', @@ -36,7 +38,9 @@ class TFOIE(InfoExtractor): 'X-tfo-session': self._get_cookies('http://www.tfo.org/')['tfo-session'].value, }) if infos.get('success') == 0: - raise ExtractorError('%s said: %s' % (self.IE_NAME, infos['msg']), expected=True) + if infos.get('code') == 'ErrGeoBlocked': + self.raise_geo_restricted(countries=self._GEO_COUNTRIES) + raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(infos['msg'])), expected=True) video_data = infos['data'] return { From fc320a40d97ed0c439a8d2ace9cfad94a74ff635 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 18:14:55 +0100 Subject: [PATCH 095/225] Revert "[cbc] use geo bypass mechanism" This reverts commit 86466a8b6f313d0d8c80823e8e61215ac16046a5. --- youtube_dl/extractor/cbc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/youtube_dl/extractor/cbc.py b/youtube_dl/extractor/cbc.py index 4d93c7744..cf678e7f8 100644 --- a/youtube_dl/extractor/cbc.py +++ b/youtube_dl/extractor/cbc.py @@ -195,7 +195,6 @@ class CBCPlayerIE(InfoExtractor): class CBCWatchBaseIE(InfoExtractor): _device_id = None _device_token = None - _GEO_COUNTRIES = ['CA'] _API_BASE_URL = 'https://api-cbc.cloud.clearleap.com/cloffice/client/' _NS_MAP = { 'media': 'http://search.yahoo.com/mrss/', From 31615ac279ac60fbd3925995de2eed69a4b3976a Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 19:36:39 +0100 Subject: [PATCH 096/225] [viewster] use geo verifcation headers --- youtube_dl/extractor/viewster.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py index 52dd95e2f..fcf0cb100 100644 --- a/youtube_dl/extractor/viewster.py +++ b/youtube_dl/extractor/viewster.py @@ -86,7 +86,9 @@ class ViewsterIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) # Get 'api_token' cookie - self._request_webpage(HEADRequest('http://www.viewster.com/'), video_id) + self._request_webpage( + HEADRequest('http://www.viewster.com/'), + video_id, headers=self.geo_verification_headers()) cookies = self._get_cookies('http://www.viewster.com/') self._AUTH_TOKEN = compat_urllib_parse_unquote(cookies['api_token'].value) From abd29a2cedaab096e920ed5be9c480921cfacf0a Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Tue, 21 Feb 2017 19:37:26 +0100 Subject: [PATCH 097/225] [crackle] use geo bypass mechanism --- youtube_dl/extractor/crackle.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/crackle.py b/youtube_dl/extractor/crackle.py index 377fb45e9..f919ed208 100644 --- a/youtube_dl/extractor/crackle.py +++ b/youtube_dl/extractor/crackle.py @@ -6,6 +6,7 @@ from ..utils import int_or_none class CrackleIE(InfoExtractor): + _GEO_COUNTRIES = ['US'] _VALID_URL = r'(?:crackle:|https?://(?:(?:www|m)\.)?crackle\.com/(?:playlist/\d+/|(?:[^/]+/)+))(?P<id>\d+)' _TEST = { 'url': 'http://www.crackle.com/comedians-in-cars-getting-coffee/2498934', From 139d8ac106dc173d27cf20361b649c0dbc5f9b67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 01:50:34 +0700 Subject: [PATCH 098/225] [setup] Add python 3.6 classifier --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index ce6dd1870..b8c3e0925 100644 --- a/setup.py +++ b/setup.py @@ -130,6 +130,7 @@ setup( 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', ], cmdclass={'build_lazy_extractors': build_lazy_extractors}, From 0d427c83047778d2984df5594b96f119ec7f8771 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 01:51:27 +0700 Subject: [PATCH 099/225] [setup] Actualize maintainer info --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b8c3e0925..67d6633ed 100644 --- a/setup.py +++ b/setup.py @@ -107,8 +107,8 @@ setup( url='https://github.com/rg3/youtube-dl', author='Ricardo Garcia', author_email='ytdl@yt-dl.org', - maintainer='Philipp Hagemeister', - maintainer_email='phihag@phihag.de', + maintainer='Sergey M.', + maintainer_email='dstftw@gmail.com', packages=[ 'youtube_dl', 'youtube_dl.extractor', 'youtube_dl.downloader', From 71e9577b94a4792a330e9bdab4674c6893ea5bac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 21:19:52 +0700 Subject: [PATCH 100/225] [24video] Add support for 24video.tube (closes #12217) --- youtube_dl/extractor/twentyfourvideo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/twentyfourvideo.py b/youtube_dl/extractor/twentyfourvideo.py index a983ebf05..f3541b654 100644 --- a/youtube_dl/extractor/twentyfourvideo.py +++ b/youtube_dl/extractor/twentyfourvideo.py @@ -12,7 +12,7 @@ from ..utils import ( class TwentyFourVideoIE(InfoExtractor): IE_NAME = '24video' - _VALID_URL = r'https?://(?:www\.)?24video\.(?:net|me|xxx|sex)/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)' + _VALID_URL = r'https?://(?:www\.)?24video\.(?:net|me|xxx|sex|tube)/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.24video.net/video/view/1044982', @@ -37,6 +37,9 @@ class TwentyFourVideoIE(InfoExtractor): }, { 'url': 'http://www.24video.me/video/view/1044982', 'only_matching': True, + }, { + 'url': 'http://www.24video.tube/video/view/2363750', + 'only_matching': True, }] def _real_extract(self, url): From a86e4160880e0747c5a0b774fe8d3b0cbb2990ae Mon Sep 17 00:00:00 2001 From: Tobias Florek <me@ibotty.net> Date: Wed, 22 Feb 2017 15:28:09 +0100 Subject: [PATCH 101/225] [vidzi] Add support for vidzi.cc --- youtube_dl/extractor/vidzi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py index 1f1828fce..b642caf22 100644 --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -13,7 +13,7 @@ from ..utils import ( class VidziIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?vidzi\.tv/(?:embed-)?(?P<id>[0-9a-zA-Z]+)' + _VALID_URL = r'https?://(?:www\.)?vidzi\.(?:tv|cc)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'http://vidzi.tv/cghql9yq6emu.html', 'md5': '4f16c71ca0c8c8635ab6932b5f3f1660', From 58ad6995cd3974eb9cff64f3fa8d34ec68cb6a03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 21:29:53 +0700 Subject: [PATCH 102/225] [vidzi] Add test for #12213 --- youtube_dl/extractor/vidzi.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py index b642caf22..d0556297e 100644 --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -29,6 +29,9 @@ class VidziIE(InfoExtractor): }, { 'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html', 'skip_download': True, + }, { + 'url': 'http://vidzi.cc/cghql9yq6emu.html', + 'skip_download': True, }] def _real_extract(self, url): From 527ef85fe9bc65ed676ab855ee386c7cce8716ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 21:49:30 +0700 Subject: [PATCH 103/225] [dailymotion] Make comment count optional (closes #12209) Not served anymore --- youtube_dl/extractor/dailymotion.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 31bf5faf6..b312401dc 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -66,7 +66,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'uploader_id': 'xijv66', 'age_limit': 0, 'view_count': int, - 'comment_count': int, } }, # Vevo video @@ -140,7 +139,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): view_count = str_to_int(view_count_str) comment_count = int_or_none(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"', - webpage, 'comment count', fatal=False)) + webpage, 'comment count', default=None)) player_v5 = self._search_regex( [r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826 From b5869560a44caaa06b317302425fb472169c2d28 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Thu, 23 Feb 2017 00:08:45 +0800 Subject: [PATCH 104/225] [crunchyroll] Fix descriptions with double quotes (closes #12124) --- ChangeLog | 6 ++++++ youtube_dl/extractor/crunchyroll.py | 23 +++++++++++++++++++---- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index a479d274f..e57e7fece 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractors +* [crunchyroll] Fix descriptions with double quotes (#12124) + + version 2017.02.21 Core diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 109d1c5a8..d2b87442d 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -123,7 +123,7 @@ class CrunchyrollIE(CrunchyrollBaseIE): 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', 'info_dict': { 'id': '645513', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', 'description': 'md5:2d17137920c64f2f49981a7797d275ef', 'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', @@ -192,6 +192,21 @@ class CrunchyrollIE(CrunchyrollBaseIE): # geo-restricted (US), 18+ maturity wall, non-premium available 'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', 'only_matching': True, + }, { + # A description with double quotes + 'url': 'http://www.crunchyroll.com/11eyes/episode-1-piros-jszaka-red-night-535080', + 'info_dict': { + 'id': '535080', + 'ext': 'mp4', + 'title': '11eyes Episode 1 – Piros éjszaka - Red Night', + 'description': 'Kakeru and Yuka are thrown into an alternate nightmarish world they call "Red Night".', + 'uploader': 'Marvelous AQL Inc.', + 'upload_date': '20091021', + }, + 'params': { + # Just test metadata extraction + 'skip_download': True, + }, }] _FORMAT_IDS = { @@ -362,9 +377,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>', webpage, 'video_title') video_title = re.sub(r' {2,}', ' ', video_title) - video_description = self._html_search_regex( - r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id, - webpage, 'description', default=None) + video_description = self._parse_json(self._html_search_regex( + r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id, + webpage, 'description', default='{}'), video_id).get('description') if video_description: video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) video_upload_date = self._html_search_regex( From 63a29b6118d147404b5fff63d82f098c4bfa3ffc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 23:45:01 +0700 Subject: [PATCH 105/225] [ChangeLog] Actualize --- ChangeLog | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ChangeLog b/ChangeLog index e57e7fece..14db5ac13 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,15 @@ version <unreleased> Extractors * [crunchyroll] Fix descriptions with double quotes (#12124) +* [dailymotion] Make comment count optional (#12209) ++ [vidzi] Add support for vidzi.cc (#12213) ++ [24video] Add support for 24video.tube (#12217) ++ [crackle] Use geo bypass mechanism ++ [viewster] Use geo verification headers ++ [tfo] Improve geo restriction detection and use geo bypass mechanism ++ [telequebec] Use geo bypass mechanism ++ [limelight] Extract PlaylistService errors and improve geo restriction + detection version 2017.02.21 From 345b24538b24772c6c5917439e62c510437fce04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Wed, 22 Feb 2017 23:50:42 +0700 Subject: [PATCH 106/225] release 2017.02.22 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- youtube_dl/version.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 8b6d14fa2..923f28276 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.21*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.21** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.22*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.22** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.21 +[debug] youtube-dl version 2017.02.22 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 14db5ac13..cff065171 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.22 Extractors * [crunchyroll] Fix descriptions with double quotes (#12124) diff --git a/youtube_dl/version.py b/youtube_dl/version.py index a85aebaa3..fce1b8558 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.21' +__version__ = '2017.02.22' From bc61c80c143d8faed3e264c2487204924acd1eb6 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 23 Feb 2017 11:47:06 +0100 Subject: [PATCH 107/225] [leeco] raise GeoRestrictedError and use geo bypass mechanism --- youtube_dl/extractor/leeco.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/leeco.py b/youtube_dl/extractor/leeco.py index 4321f90c8..9eda956d2 100644 --- a/youtube_dl/extractor/leeco.py +++ b/youtube_dl/extractor/leeco.py @@ -30,7 +30,7 @@ from ..utils import ( class LeIE(InfoExtractor): IE_DESC = '乐视网' _VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|(?:sports\.le|(?:www\.)?lesports)\.com/(?:match|video))/(?P<id>\d+)\.html' - + _GEO_COUNTRIES = ['CN'] _URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html' _TESTS = [{ @@ -126,10 +126,9 @@ class LeIE(InfoExtractor): if playstatus['status'] == 0: flag = playstatus['flag'] if flag == 1: - msg = 'Country %s auth error' % playstatus['country'] + self.raise_geo_restricted() else: - msg = 'Generic error. flag = %d' % flag - raise ExtractorError(msg, expected=True) + raise ExtractorError('Generic error. flag = %d' % flag, expected=True) def _real_extract(self, url): media_id = self._match_id(url) From c59f7036101b5349b3b02a8bd700eff507012e3f Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 23 Feb 2017 11:49:35 +0100 Subject: [PATCH 108/225] [sohu] raise GeoRestrictedError --- youtube_dl/extractor/sohu.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 30760ca06..7da12cef8 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -108,12 +108,11 @@ class SohuIE(InfoExtractor): if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( - 'Sohu said: There\'s something wrong in the video.', + '%s said: There\'s something wrong in the video.' % self.IE_NAME, expected=True) else: - raise ExtractorError( - 'Sohu said: The video is only licensed to users in Mainland China.', - expected=True) + self.raise_geo_restricted( + '%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME) formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): From 30eaa3a7023a3c03c62eb481f6415cb0599e0da5 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 23 Feb 2017 11:50:04 +0100 Subject: [PATCH 109/225] [mgtv] fix extraction --- youtube_dl/extractor/mgtv.py | 50 +++++++++++++++++------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/youtube_dl/extractor/mgtv.py b/youtube_dl/extractor/mgtv.py index 659ede8c2..d53d96aae 100644 --- a/youtube_dl/extractor/mgtv.py +++ b/youtube_dl/extractor/mgtv.py @@ -2,16 +2,17 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..compat import compat_str from ..utils import int_or_none class MGTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?mgtv\.com/v/(?:[^/]+/)*(?P<id>\d+)\.html' + _VALID_URL = r'https?://(?:www\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html' IE_DESC = '芒果TV' _TESTS = [{ 'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html', - 'md5': '1bdadcf760a0b90946ca68ee9a2db41a', + 'md5': 'b1ffc0fc163152acf6beaa81832c9ee7', 'info_dict': { 'id': '3116640', 'ext': 'mp4', @@ -21,48 +22,45 @@ class MGTVIE(InfoExtractor): 'thumbnail': r're:^https?://.*\.jpg$', }, }, { - # no tbr extracted from stream_url - 'url': 'http://www.mgtv.com/v/1/1/f/3324755.html', + 'url': 'http://www.mgtv.com/b/301817/3826653.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) api_data = self._download_json( - 'http://v.api.mgtv.com/player/video', video_id, + 'http://pcweb.api.mgtv.com/player/video', video_id, query={'video_id': video_id}, headers=self.geo_verification_headers())['data'] info = api_data['info'] + title = info['title'].strip() + stream_domain = api_data['stream_domain'][0] formats = [] for idx, stream in enumerate(api_data['stream']): - stream_url = stream.get('url') - if not stream_url: + stream_path = stream.get('url') + if not stream_path: + continue + format_data = self._download_json( + stream_domain + stream_path, video_id, + note='Download video info for format #%d' % idx) + format_url = format_data.get('info') + if not format_url: continue tbr = int_or_none(self._search_regex( - r'(\d+)\.mp4', stream_url, 'tbr', default=None)) - - def extract_format(stream_url, format_id, idx, query={}): - format_info = self._download_json( - stream_url, video_id, - note='Download video info for format %s' % (format_id or '#%d' % idx), - query=query) - return { - 'format_id': format_id, - 'url': format_info['info'], - 'ext': 'mp4', - 'tbr': tbr, - } - - formats.append(extract_format( - stream_url, 'hls-%d' % tbr if tbr else None, idx * 2)) - formats.append(extract_format(stream_url.replace( - '/playlist.m3u8', ''), 'http-%d' % tbr if tbr else None, idx * 2 + 1, {'pno': 1031})) + r'_(\d+)_mp4/', format_url, 'tbr', default=None)) + formats.append({ + 'format_id': compat_str(tbr or idx), + 'url': format_url, + 'ext': 'mp4', + 'tbr': tbr, + 'protocol': 'm3u8_native', + }) self._sort_formats(formats) return { 'id': video_id, - 'title': info['title'].strip(), + 'title': title, 'formats': formats, 'description': info.get('desc'), 'duration': int_or_none(info.get('duration')), From 9e03aa75c779b79da79353ef1ecc4520ad06d6d0 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 23 Feb 2017 11:54:43 +0100 Subject: [PATCH 110/225] [crunchyroll] extract playlist entries ids --- youtube_dl/extractor/crunchyroll.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index d2b87442d..a1fc6a756 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -534,11 +534,11 @@ class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>', webpage, 'title') episode_paths = re.findall( - r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"', + r'(?s)<li id="showview_videos_media_(\d+)"[^>]+>.*?<a href="([^"]+)"', webpage) entries = [ - self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll') - for ep in episode_paths + self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll', ep_id) + for ep_id, ep in episode_paths ] entries.reverse() From ada77fa544e185a8cd7c3e5d6374e0b6995557a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 18:02:04 +0700 Subject: [PATCH 111/225] [instagram] Add support for multi video posts (closes #12226) --- youtube_dl/extractor/instagram.py | 54 +++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index 98f408c18..c1921cbcf 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( get_element_by_attribute, int_or_none, @@ -50,6 +51,33 @@ class InstagramIE(InfoExtractor): 'params': { 'skip_download': True, }, + }, { + # multi video post + 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/', + 'playlist': [{ + 'info_dict': { + 'id': 'BQ0dSaohpPW', + 'ext': 'mp4', + 'title': 'Video 1', + }, + }, { + 'info_dict': { + 'id': 'BQ0dTpOhuHT', + 'ext': 'mp4', + 'title': 'Video 2', + }, + }, { + 'info_dict': { + 'id': 'BQ0dT7RBFeF', + 'ext': 'mp4', + 'title': 'Video 3', + }, + }], + 'info_dict': { + 'id': 'BQ0eAlwhDrw', + 'title': 'Post by instagram', + 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957', + }, }, { 'url': 'https://instagram.com/p/-Cmh1cukG2/', 'only_matching': True, @@ -113,6 +141,32 @@ class InstagramIE(InfoExtractor): 'timestamp': int_or_none(comment.get('created_at')), } for comment in media.get( 'comments', {}).get('nodes', []) if comment.get('text')] + if not video_url: + edges = try_get( + media, lambda x: x['edge_sidecar_to_children']['edges'], + list) or [] + if edges: + entries = [] + for edge_num, edge in enumerate(edges, start=1): + node = try_get(edge, lambda x: x['node'], dict) + if not node: + continue + node_video_url = try_get(node, lambda x: x['video_url'], compat_str) + if not node_video_url: + continue + entries.append({ + 'id': node.get('shortcode') or node['id'], + 'title': 'Video %d' % edge_num, + 'url': node_video_url, + 'thumbnail': node.get('display_url'), + 'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])), + 'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])), + 'view_count': int_or_none(node.get('video_view_count')), + }) + return self.playlist_result( + entries, video_id, + 'Post by %s' % uploader_id if uploader_id else None, + description) if not video_url: video_url = self._og_search_video_url(webpage, secure=False) From d5fd9a3be305aa8fead8fb70aae64703afe49e43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 18:44:04 +0700 Subject: [PATCH 112/225] [skylinewebcams] Add extractor (closes #12221) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/skylinewebcams.py | 42 ++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 youtube_dl/extractor/skylinewebcams.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 55b4782d3..83a170fa7 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -852,6 +852,7 @@ from .shared import ( from .showroomlive import ShowRoomLiveIE from .sina import SinaIE from .sixplay import SixPlayIE +from .skylinewebcams import SkylineWebcamsIE from .skynewsarabia import ( SkyNewsArabiaIE, SkyNewsArabiaArticleIE, diff --git a/youtube_dl/extractor/skylinewebcams.py b/youtube_dl/extractor/skylinewebcams.py new file mode 100644 index 000000000..5b4aaac6f --- /dev/null +++ b/youtube_dl/extractor/skylinewebcams.py @@ -0,0 +1,42 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class SkylineWebcamsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html' + _TEST = { + 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', + 'info_dict': { + 'id': 'scalinata-piazza-di-spagna-barcaccia', + 'ext': 'mp4', + 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + stream_url = self._search_regex( + r'url\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage, + 'stream url', group='url') + + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + + return { + 'id': video_id, + 'url': stream_url, + 'ext': 'mp4', + 'title': self._live_title(title), + 'description': description, + 'is_live': True, + } From 0f3d41b44d84869e7f4e809692ce71567b3f7130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 19:48:54 +0700 Subject: [PATCH 113/225] [devscripts/run_tests] Exclude youtube lists tests from core build --- devscripts/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/devscripts/run_tests.sh b/devscripts/run_tests.sh index c60807215..6ba26720d 100755 --- a/devscripts/run_tests.sh +++ b/devscripts/run_tests.sh @@ -1,6 +1,6 @@ #!/bin/bash -DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter" +DOWNLOAD_TESTS="age_restriction|download|subtitles|write_annotations|iqiyi_sdk_interpreter|youtube_lists" test_set="" multiprocess_args="" From 28572a1a0b27ba3ccedac5d8d093f925dfb7485f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 5 Mar 2016 23:07:25 +0600 Subject: [PATCH 114/225] [compat] Add compat_numeric_types --- youtube_dl/compat.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index 718902019..b257e2e81 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -2760,6 +2760,10 @@ else: compat_kwargs = lambda kwargs: kwargs +compat_numeric_types = ((int, float, long, complex) if sys.version_info[0] < 3 + else (int, float, complex)) + + if sys.version_info < (2, 7): def compat_socket_create_connection(address, timeout, source_address=None): host, port = address @@ -2895,6 +2899,7 @@ __all__ = [ 'compat_input', 'compat_itertools_count', 'compat_kwargs', + 'compat_numeric_types', 'compat_ord', 'compat_os_name', 'compat_parse_qs', From d0d9ade4860fd44a07f5513d13b66233fdca0e89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 6 Mar 2016 03:52:42 +0600 Subject: [PATCH 115/225] [YoutubeDL] Add support for string formatting operations in output template --- test/test_YoutubeDL.py | 14 ++++++++++++++ youtube_dl/YoutubeDL.py | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index 2cfcf743a..8491a88bd 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -526,6 +526,7 @@ class TestYoutubeDL(unittest.TestCase): 'id': '1234', 'ext': 'mp4', 'width': None, + 'height': 1080, } def fname(templ): @@ -535,6 +536,19 @@ class TestYoutubeDL(unittest.TestCase): self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4') # Replace missing fields with 'NA' self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4') + self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4') + self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4') + self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4') + self.assertEqual(fname('%(height)06d.%(ext)s'), '001080.mp4') + self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4') + self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4') + self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4') + self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4') + self.assertEqual(fname('%(height) 0 6d.%(ext)s'), ' 01080.mp4') + self.assertEqual(fname('%%(height)06d.%(ext)s'), '%(height)06d.mp4') + self.assertEqual(fname('%(width)06d.%(ext)s'), 'NA.mp4') + self.assertEqual(fname('%(width)06d.%%(ext)s'), 'NA.%(ext)s') + self.assertEqual(fname('%%(width)06d.%(ext)s'), '%(width)06d.mp4') def test_format_note(self): ydl = YoutubeDL() diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 68000dea2..bdaf06e62 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -33,6 +33,7 @@ from .compat import ( compat_get_terminal_size, compat_http_client, compat_kwargs, + compat_numeric_types, compat_os_name, compat_str, compat_tokenize_tokenize, @@ -609,12 +610,45 @@ class YoutubeDL(object): compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id')) - template_dict = dict((k, sanitize(k, v)) + template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: 'NA', template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) + + NUMERIC_FIELDS = set(( + 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', + 'upload_year', 'upload_month', 'upload_day', + 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', + 'average_rating', 'comment_count', 'age_limit', + 'start_time', 'end_time', + 'chapter_number', 'season_number', 'episode_number', + )) + + # Missing numeric fields used together with integer presentation types + # in format specification will break the argument substitution since + # string 'NA' is returned for missing fields. We will patch output + # template for missing fields to meet string presentation type. + for numeric_field in NUMERIC_FIELDS: + if numeric_field not in template_dict: + # As of [1] format syntax is: + # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type + # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting + FORMAT_RE = r'''(?x) + (?<!%) + % + \({0}\) # mapping key + (?:[#0\-+ ]+)? # conversion flags (optional) + (?:\d+)? # minimum field width (optional) + (?:\.\d+)? # precision (optional) + [hlL]? # length modifier (optional) + [diouxXeEfFgGcrs%] # conversion type + ''' + outtmpl = re.sub( + FORMAT_RE.format(numeric_field), + r'%({0})s'.format(numeric_field), outtmpl) + tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 From 89db639dfe02b291bbf901973ca00d6e60fc1dce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 22:01:09 +0700 Subject: [PATCH 116/225] [YoutubeDL] Rewrite outtmpl for playlist_index and autonumber for backward compatibility --- youtube_dl/YoutubeDL.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index bdaf06e62..fc5e67828 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -594,10 +594,7 @@ class YoutubeDL(object): autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 - autonumber_templ = '%0' + str(autonumber_size) + 'd' - template_dict['autonumber'] = autonumber_templ % (self.params.get('autonumber_start', 1) - 1 + self._num_downloads) - if template_dict.get('playlist_index') is not None: - template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index']) + template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) @@ -617,6 +614,20 @@ class YoutubeDL(object): outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) + # For fields playlist_index and autonumber convert all occurrences + # of %(field)s to %(field)0Nd for backward compatibility + field_size_compat_map = { + 'playlist_index': len(str(template_dict['n_entries'])), + 'autonumber': autonumber_size, + } + FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' + mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) + if mobj: + outtmpl = re.sub( + FIELD_SIZE_COMPAT_RE, + r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], + outtmpl) + NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'upload_year', 'upload_month', 'upload_day', @@ -624,6 +635,7 @@ class YoutubeDL(object): 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', + 'playlist_index', )) # Missing numeric fields used together with integer presentation types From b3175982c31a61ff4184d666b0bdb6dd34213365 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 22:01:57 +0700 Subject: [PATCH 117/225] [YoutubeDL] Add more numeric fields for NA substitution in outtmpl --- youtube_dl/YoutubeDL.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index fc5e67828..56a8691eb 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -635,6 +635,7 @@ class YoutubeDL(object): 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', + 'track_number', 'disc_number', 'release_year', 'playlist_index', )) From fafc2bf5a92b8397148e47e0c9b46fb4d9212075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 22:11:16 +0700 Subject: [PATCH 118/225] [options] Deprecate --autonumber-size --- youtube_dl/options.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 2c880d06a..5a11dddf9 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -680,7 +680,9 @@ def parseOpts(overrideArguments=None): filesystem.add_option( '--autonumber-size', dest='autonumber_size', metavar='NUMBER', default=5, type=int, - help='Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given (default is %default)') + help='[deprecated; use output template with %(autonumber)0Nd, where N in the number of digits] ' + 'Specify the number of digits in %(autonumber)s when it is present ' + 'in output filename template or --auto-number option is given (default is %default)') filesystem.add_option( '--autonumber-start', dest='autonumber_start', metavar='NUMBER', default=1, type=int, From 050f143c1286ddafcb6966a0f679c5bbaceecca5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Thu, 23 Feb 2017 23:00:13 +0700 Subject: [PATCH 119/225] [README.md] Clarify sequence types in output template and document numeric string formatting operations --- README.md | 138 +++++++++++++++++++++++++++--------------------------- 1 file changed, 70 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 1eccfd287..b4364ef61 100644 --- a/README.md +++ b/README.md @@ -486,87 +486,89 @@ The `-o` option allows users to indicate a template for the output file names. **tl;dr:** [navigate me to examples](#output-template-examples). -The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are: +The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [python string formatting operations](https://docs.python.org/2/library/stdtypes.html#string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a formatting operations. Allowed names along with sequence type are: - - `id`: Video identifier - - `title`: Video title - - `url`: Video URL - - `ext`: Video filename extension - - `alt_title`: A secondary title of the video - - `display_id`: An alternative identifier for the video - - `uploader`: Full name of the video uploader - - `license`: License name the video is licensed under - - `creator`: The creator of the video - - `release_date`: The date (YYYYMMDD) when the video was released - - `timestamp`: UNIX timestamp of the moment the video became available - - `upload_date`: Video upload date (YYYYMMDD) - - `uploader_id`: Nickname or id of the video uploader - - `location`: Physical location where the video was filmed - - `duration`: Length of the video in seconds - - `view_count`: How many users have watched the video on the platform - - `like_count`: Number of positive ratings of the video - - `dislike_count`: Number of negative ratings of the video - - `repost_count`: Number of reposts of the video - - `average_rating`: Average rating give by users, the scale used depends on the webpage - - `comment_count`: Number of comments on the video - - `age_limit`: Age restriction for the video (years) - - `format`: A human-readable description of the format - - `format_id`: Format code specified by `--format` - - `format_note`: Additional info about the format - - `width`: Width of the video - - `height`: Height of the video - - `resolution`: Textual description of width and height - - `tbr`: Average bitrate of audio and video in KBit/s - - `abr`: Average audio bitrate in KBit/s - - `acodec`: Name of the audio codec in use - - `asr`: Audio sampling rate in Hertz - - `vbr`: Average video bitrate in KBit/s - - `fps`: Frame rate - - `vcodec`: Name of the video codec in use - - `container`: Name of the container format - - `filesize`: The number of bytes, if known in advance - - `filesize_approx`: An estimate for the number of bytes - - `protocol`: The protocol that will be used for the actual download - - `extractor`: Name of the extractor - - `extractor_key`: Key name of the extractor - - `epoch`: Unix epoch when creating the file - - `autonumber`: Five-digit number that will be increased with each download, starting at zero - - `playlist`: Name or id of the playlist that contains the video - - `playlist_index`: Index of the video in the playlist padded with leading zeros according to the total length of the playlist - - `playlist_id`: Playlist identifier - - `playlist_title`: Playlist title + - `id` (string): Video identifier + - `title` (string): Video title + - `url` (string): Video URL + - `ext` (string): Video filename extension + - `alt_title` (string): A secondary title of the video + - `display_id` (string): An alternative identifier for the video + - `uploader` (string): Full name of the video uploader + - `license` (string): License name the video is licensed under + - `creator` (string): The creator of the video + - `release_date` (string): The date (YYYYMMDD) when the video was released + - `timestamp` (numeric): UNIX timestamp of the moment the video became available + - `upload_date` (string): Video upload date (YYYYMMDD) + - `uploader_id` (string): Nickname or id of the video uploader + - `location` (string): Physical location where the video was filmed + - `duration` (numeric): Length of the video in seconds + - `view_count` (numeric): How many users have watched the video on the platform + - `like_count` (numeric): Number of positive ratings of the video + - `dislike_count` (numeric): Number of negative ratings of the video + - `repost_count` (numeric): Number of reposts of the video + - `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage + - `comment_count` (numeric): Number of comments on the video + - `age_limit` (numeric): Age restriction for the video (years) + - `format` (string): A human-readable description of the format + - `format_id` (string): Format code specified by `--format` + - `format_note` (string): Additional info about the format + - `width` (numeric): Width of the video + - `height` (numeric): Height of the video + - `resolution` (string): Textual description of width and height + - `tbr` (numeric): Average bitrate of audio and video in KBit/s + - `abr` (numeric): Average audio bitrate in KBit/s + - `acodec` (string): Name of the audio codec in use + - `asr` (numeric): Audio sampling rate in Hertz + - `vbr` (numeric): Average video bitrate in KBit/s + - `fps` (numeric): Frame rate + - `vcodec` (string): Name of the video codec in use + - `container` (string): Name of the container format + - `filesize` (numeric): The number of bytes, if known in advance + - `filesize_approx` (numeric): An estimate for the number of bytes + - `protocol` (string): The protocol that will be used for the actual download + - `extractor` (string): Name of the extractor + - `extractor_key` (string): Key name of the extractor + - `epoch` (numeric): Unix epoch when creating the file + - `autonumber` (numeric): Five-digit number that will be increased with each download, starting at zero + - `playlist` (string): Name or id of the playlist that contains the video + - `playlist_index` (numeric): Index of the video in the playlist padded with leading zeros according to the total length of the playlist + - `playlist_id` (string): Playlist identifier + - `playlist_title` (string): Playlist title Available for the video that belongs to some logical chapter or section: - - `chapter`: Name or title of the chapter the video belongs to - - `chapter_number`: Number of the chapter the video belongs to - - `chapter_id`: Id of the chapter the video belongs to + - `chapter` (string): Name or title of the chapter the video belongs to + - `chapter_number` (numeric): Number of the chapter the video belongs to + - `chapter_id` (string): Id of the chapter the video belongs to Available for the video that is an episode of some series or programme: - - `series`: Title of the series or programme the video episode belongs to - - `season`: Title of the season the video episode belongs to - - `season_number`: Number of the season the video episode belongs to - - `season_id`: Id of the season the video episode belongs to - - `episode`: Title of the video episode - - `episode_number`: Number of the video episode within a season - - `episode_id`: Id of the video episode + - `series` (string): Title of the series or programme the video episode belongs to + - `season` (string): Title of the season the video episode belongs to + - `season_number` (numeric): Number of the season the video episode belongs to + - `season_id` (string): Id of the season the video episode belongs to + - `episode` (string): Title of the video episode + - `episode_number` (numeric): Number of the video episode within a season + - `episode_id` (string): Id of the video episode Available for the media that is a track or a part of a music album: - - `track`: Title of the track - - `track_number`: Number of the track within an album or a disc - - `track_id`: Id of the track - - `artist`: Artist(s) of the track - - `genre`: Genre(s) of the track - - `album`: Title of the album the track belongs to - - `album_type`: Type of the album - - `album_artist`: List of all artists appeared on the album - - `disc_number`: Number of the disc or other physical medium the track belongs to - - `release_year`: Year (YYYY) when the album was released + - `track` (string): Title of the track + - `track_number` (numeric): Number of the track within an album or a disc + - `track_id` (string): Id of the track + - `artist` (string): Artist(s) of the track + - `genre` (string): Genre(s) of the track + - `album` (string): Title of the album the track belongs to + - `album_type` (string): Type of the album + - `album_artist` (string): List of all artists appeared on the album + - `disc_number` (numeric): Number of the disc or other physical medium the track belongs to + - `release_year` (numeric): Year (YYYY) when the album was released Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with `NA`. For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj`, this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory. +For numeric sequences you can use numeric related formatting, for example, `%(view_count)05d` will result in a string with view count padded with zeros up to 5 characters, like in `00042`. + Output templates can also contain arbitrary hierarchical path, e.g. `-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s'` which will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you. To use percent literals in an output template use `%%`. To output to stdout use `-o -`. From 8e1409fd805b3b5e3731da66a2101494643a06ea Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 23 Feb 2017 18:42:06 +0100 Subject: [PATCH 120/225] [go] sign all uplynk urls and use geo bypass only for free videos(closes #12087)(closes #12210) --- youtube_dl/extractor/go.py | 81 ++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/youtube_dl/extractor/go.py b/youtube_dl/extractor/go.py index b205bfc7c..21ed846b2 100644 --- a/youtube_dl/extractor/go.py +++ b/youtube_dl/extractor/go.py @@ -37,7 +37,6 @@ class GoIE(AdobePassIE): } } _VALID_URL = r'https?://(?:(?P<sub_domain>%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P<id>\w+)|season-\d+/\d+-(?P<display_id>[^/?#]+))' % '|'.join(_SITE_INFO.keys()) - _GEO_COUNTRIES = ['US'] _TESTS = [{ 'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx', 'info_dict': { @@ -79,44 +78,60 @@ class GoIE(AdobePassIE): ext = determine_ext(asset_url) if ext == 'm3u8': video_type = video_data.get('type') - if video_type == 'lf': - data = { - 'video_id': video_data['id'], - 'video_type': video_type, - 'brand': brand, - 'device': '001', - } - if video_data.get('accesslevel') == '1': - requestor_id = site_info['requestor_id'] - resource = self._get_mvpd_resource( - requestor_id, title, video_id, None) - auth = self._extract_mvpd_auth( - url, video_id, requestor_id, resource) - data.update({ - 'token': auth, - 'token_type': 'ap', - 'adobe_requestor_id': requestor_id, - }) - entitlement = self._download_json( - 'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json', - video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers()) - errors = entitlement.get('errors', {}).get('errors', []) - if errors: - for error in errors: - if error.get('code') == 1002: - self.raise_geo_restricted( - error['message'], countries=self._GEO_COUNTRIES) - error_message = ', '.join([error['message'] for error in errors]) - raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) - asset_url += '?' + entitlement['uplynkData']['sessionKey'] + data = { + 'video_id': video_data['id'], + 'video_type': video_type, + 'brand': brand, + 'device': '001', + } + if video_data.get('accesslevel') == '1': + requestor_id = site_info['requestor_id'] + resource = self._get_mvpd_resource( + requestor_id, title, video_id, None) + auth = self._extract_mvpd_auth( + url, video_id, requestor_id, resource) + data.update({ + 'token': auth, + 'token_type': 'ap', + 'adobe_requestor_id': requestor_id, + }) + else: + self._initialize_geo_bypass(['US']) + entitlement = self._download_json( + 'https://api.entitlement.watchabc.go.com/vp2/ws-secure/entitlement/2020/authorize.json', + video_id, data=urlencode_postdata(data), headers=self.geo_verification_headers()) + errors = entitlement.get('errors', {}).get('errors', []) + if errors: + for error in errors: + if error.get('code') == 1002: + self.raise_geo_restricted( + error['message'], countries=['US']) + error_message = ', '.join([error['message'] for error in errors]) + raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) + asset_url += '?' + entitlement['uplynkData']['sessionKey'] formats.extend(self._extract_m3u8_formats( asset_url, video_id, 'mp4', m3u8_id=format_id or 'hls', fatal=False)) else: - formats.append({ + f = { 'format_id': format_id, 'url': asset_url, 'ext': ext, - }) + } + if re.search(r'(?:/mp4/source/|_source\.mp4)', asset_url): + f.update({ + 'format_id': ('%s-' % format_id if format_id else '') + 'SOURCE', + 'preference': 1, + }) + else: + mobj = re.search(r'/(\d+)x(\d+)/', asset_url) + if mobj: + height = int(mobj.group(2)) + f.update({ + 'format_id': ('%s-' % format_id if format_id else '') + '%dP' % height, + 'width': int(mobj.group(1)), + 'height': height, + }) + formats.append(f) self._sort_formats(formats) subtitles = {} From 19f3821821ada01fbf9b466402bc1d0366b3edb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 02:09:13 +0700 Subject: [PATCH 121/225] [devscripts/make_lazy_extractors] Fix making lazy extractors on python 3 under Windows --- devscripts/make_lazy_extractors.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/devscripts/make_lazy_extractors.py b/devscripts/make_lazy_extractors.py index 19114d30d..0a1762dbc 100644 --- a/devscripts/make_lazy_extractors.py +++ b/devscripts/make_lazy_extractors.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals, print_function from inspect import getsource +import io import os from os.path import dirname as dirn import sys @@ -95,5 +96,5 @@ module_contents.append( module_src = '\n'.join(module_contents) + '\n' -with open(lazy_extractors_filename, 'wt') as f: +with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f: f.write(module_src) From 0f57447de790fb2434cdd80d819876859ac4fcc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 04:56:58 +0700 Subject: [PATCH 122/225] [postprocessor/ffmpeg] Add mising space (closes #12232) --- youtube_dl/postprocessor/ffmpeg.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py index 1881f4849..96ddb3b36 100644 --- a/youtube_dl/postprocessor/ffmpeg.py +++ b/youtube_dl/postprocessor/ffmpeg.py @@ -536,8 +536,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): ext = sub['ext'] if ext == new_ext: self._downloader.to_screen( - '[ffmpeg] Subtitle file for %s is already in the requested' - 'format' % new_ext) + '[ffmpeg] Subtitle file for %s is already in the requested format' % new_ext) continue old_file = subtitles_filename(filename, lang, ext) sub_filenames.append(old_file) From f2980fddeb587f113afe15cc3ecf4bfc3911ca67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 05:01:31 +0700 Subject: [PATCH 123/225] [lynda:course] Add webpage extraction fallback (closes #12238) --- youtube_dl/extractor/lynda.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py index da94eab56..d2f75296a 100644 --- a/youtube_dl/extractor/lynda.py +++ b/youtube_dl/extractor/lynda.py @@ -260,9 +260,24 @@ class LyndaCourseIE(LyndaBaseIE): course_path = mobj.group('coursepath') course_id = mobj.group('courseid') + item_template = 'https://www.lynda.com/%s/%%s-4.html' % course_path + course = self._download_json( 'https://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, - course_id, 'Downloading course JSON') + course_id, 'Downloading course JSON', fatal=False) + + if not course: + webpage = self._download_webpage(url, course_id) + entries = [ + self.url_result( + item_template % video_id, ie=LyndaIE.ie_key(), + video_id=video_id) + for video_id in re.findall( + r'data-video-id=["\'](\d+)', webpage)] + return self.playlist_result( + entries, course_id, + self._og_search_title(webpage, fatal=False), + self._og_search_description(webpage)) if course.get('Status') == 'NotFound': raise ExtractorError( @@ -283,7 +298,7 @@ class LyndaCourseIE(LyndaBaseIE): if video_id: entries.append({ '_type': 'url_transparent', - 'url': 'https://www.lynda.com/%s/%s-4.html' % (course_path, video_id), + 'url': item_template % video_id, 'ie_key': LyndaIE.ie_key(), 'chapter': chapter.get('Title'), 'chapter_number': int_or_none(chapter.get('ChapterIndex')), From be5df5ee311b3ad027f5d59fe077969babd0aa53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 06:04:27 +0700 Subject: [PATCH 124/225] Suppress help for all deprecated options and print warning when used --- youtube_dl/YoutubeDL.py | 14 ++++++++++++-- youtube_dl/__init__.py | 3 +++ youtube_dl/options.py | 12 +++++------- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 56a8691eb..f7254560c 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -328,11 +328,21 @@ class YoutubeDL(object): self.params.update(params) self.cache = Cache(self) - if self.params.get('cn_verification_proxy') is not None: - self.report_warning('--cn-verification-proxy is deprecated. Use --geo-verification-proxy instead.') + def check_deprecated(param, option, suggestion): + if self.params.get(param) is not None: + self.report_warning( + '%s is deprecated. Use %s instead.' % (option, suggestion)) + return True + return False + + if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] + check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') + check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') + check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') + if params.get('bidi_workaround', False): try: import pty diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index f91d29a7b..0c401baa6 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -416,6 +416,9 @@ def _real_main(argv=None): 'config_location': opts.config_location, 'geo_bypass': opts.geo_bypass, 'geo_bypass_country': opts.geo_bypass_country, + # just for deprecation check + 'autonumber': opts.autonumber if opts.autonumber is True else None, + 'usetitle': opts.usetitle if opts.usetitle is True else None, } with YoutubeDL(ydl_opts) as ydl: diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 5a11dddf9..8b51d3c6f 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -679,10 +679,8 @@ def parseOpts(overrideArguments=None): help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info')) filesystem.add_option( '--autonumber-size', - dest='autonumber_size', metavar='NUMBER', default=5, type=int, - help='[deprecated; use output template with %(autonumber)0Nd, where N in the number of digits] ' - 'Specify the number of digits in %(autonumber)s when it is present ' - 'in output filename template or --auto-number option is given (default is %default)') + dest='autonumber_size', metavar='NUMBER', type=int, + help=optparse.SUPPRESS_HELP) filesystem.add_option( '--autonumber-start', dest='autonumber_start', metavar='NUMBER', default=1, type=int, @@ -694,15 +692,15 @@ def parseOpts(overrideArguments=None): filesystem.add_option( '-A', '--auto-number', action='store_true', dest='autonumber', default=False, - help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000') + help=optparse.SUPPRESS_HELP) filesystem.add_option( '-t', '--title', action='store_true', dest='usetitle', default=False, - help='[deprecated] Use title in file name (default)') + help=optparse.SUPPRESS_HELP) filesystem.add_option( '-l', '--literal', default=False, action='store_true', dest='usetitle', - help='[deprecated] Alias of --title') + help=optparse.SUPPRESS_HELP) filesystem.add_option( '-w', '--no-overwrites', action='store_true', dest='nooverwrites', default=False, From f2f7961820155b6db4a70f083f011cd014c9b51c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 06:07:41 +0700 Subject: [PATCH 125/225] [ChangeLog] Actualize --- ChangeLog | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/ChangeLog b/ChangeLog index cff065171..6f63723fc 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,23 @@ +version <unreleased> + +Core +* [options] Hide deprecated options from --help +* [options] Deprecate --autonumber-size ++ [YoutubeDL] Add support for string formatting operations in output template + (#5185, #5748, #6841, #9929, #9966 #9978, #12189) + +Extractors ++ [lynda:course] Add webpage extraction fallback (#12238) +* [go] Sign all uplynk URLs and use geo bypass only for free videos + (#12087, #12210) ++ [skylinewebcams] Add support for skylinewebcams.com (#12221) ++ [instagram] Add support for multi video posts (#12226) ++ [crunchyroll] Extract playlist entries ids +* [mgtv] Fix extraction ++ [sohu] Raise GeoRestrictedError ++ [leeco] Raise GeoRestrictedError and use geo bypass mechanism + + version 2017.02.22 Extractors From 6b097cff278c93de0665bf681729d75121a98eed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 06:09:15 +0700 Subject: [PATCH 126/225] release 2017.02.24 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- README.md | 10 ---------- docs/supportedsites.md | 1 + youtube_dl/version.py | 2 +- 5 files changed, 6 insertions(+), 15 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 923f28276..743b796ac 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.22*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.22** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.24*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.24** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.22 +[debug] youtube-dl version 2017.02.24 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 6f63723fc..6034c9ea4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.24 Core * [options] Hide deprecated options from --help diff --git a/README.md b/README.md index b4364ef61..0fc5984dc 100644 --- a/README.md +++ b/README.md @@ -217,21 +217,11 @@ Alternatively, refer to the [developer instructions](#developer-instructions) fo --id Use only video ID in file name -o, --output TEMPLATE Output filename template, see the "OUTPUT TEMPLATE" for all the info - --autonumber-size NUMBER Specify the number of digits in - %(autonumber)s when it is present in output - filename template or --auto-number option - is given (default is 5) --autonumber-start NUMBER Specify the start value for %(autonumber)s (default is 1) --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames - -A, --auto-number [deprecated; use -o - "%(autonumber)s-%(title)s.%(ext)s" ] Number - downloaded files starting from 00000 - -t, --title [deprecated] Use title in file name - (default) - -l, --literal [deprecated] Alias of --title -w, --no-overwrites Do not overwrite files -c, --continue Force resume of partially downloaded files. By default, youtube-dl will resume diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 1eb9c2cdd..f97397331 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -680,6 +680,7 @@ - **Shared**: shared.sx - **ShowRoomLive** - **Sina** + - **SkylineWebcams** - **skynewsarabia:article** - **skynewsarabia:video** - **SkySports** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index fce1b8558..129447e10 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.22' +__version__ = '2017.02.24' From 42dcdbe11cd738e6b196bc9c14b746a71d61de5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 10:52:41 +0700 Subject: [PATCH 127/225] [ivi] Raise GeoRestrictedError --- youtube_dl/extractor/ivi.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/ivi.py b/youtube_dl/extractor/ivi.py index 3d3c15024..cb51cef2d 100644 --- a/youtube_dl/extractor/ivi.py +++ b/youtube_dl/extractor/ivi.py @@ -16,6 +16,8 @@ class IviIE(InfoExtractor): IE_DESC = 'ivi.ru' IE_NAME = 'ivi' _VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)' + _GEO_BYPASS = False + _GEO_COUNTRIES = ['RU'] _TESTS = [ # Single movie @@ -91,7 +93,11 @@ class IviIE(InfoExtractor): if 'error' in video_json: error = video_json['error'] - if error['origin'] == 'NoRedisValidData': + origin = error['origin'] + if origin == 'NotAllowedForLocation': + self.raise_geo_restricted( + msg=error['message'], countries=self._GEO_COUNTRIES) + elif origin == 'NoRedisValidData': raise ExtractorError('Video %s does not exist' % video_id, expected=True) raise ExtractorError( 'Unable to download video %s: %s' % (video_id, error['message']), From 39e7277ed16c1647d636c766d57870121f5f2d68 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 24 Feb 2017 11:21:13 +0100 Subject: [PATCH 128/225] [openload] fix extraction(closes #10408) --- youtube_dl/extractor/openload.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py index 10896c442..fc7ff43a6 100644 --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -72,16 +72,21 @@ class OpenloadIE(InfoExtractor): raise ExtractorError('File not found', expected=True) ol_id = self._search_regex( - '<span[^>]+id="[^"]+"[^>]*>([0-9]+)</span>', + '<span[^>]+id="[^"]+"[^>]*>([0-9A-Za-z]+)</span>', webpage, 'openload ID') - first_two_chars = int(float(ol_id[0:][:2])) + first_char = int(ol_id[0]) urlcode = [] - num = 2 + num = 1 while num < len(ol_id): - key = int(float(ol_id[num + 3:][:2])) - urlcode.append((key, compat_chr(int(float(ol_id[num:][:3])) - first_two_chars))) + i = ord(ol_id[num]) + key = 0 + if i <= 90: + key = i - 65 + elif i >= 97: + key = 25 + i - 97 + urlcode.append((key, compat_chr(int(ol_id[num + 2:num + 5]) // int(ol_id[num + 1]) - first_char))) num += 5 video_url = 'https://openload.co/stream/' + ''.join( From 68f17a9c2df07828d364421e59ede26981aa7756 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 24 Feb 2017 12:27:56 +0100 Subject: [PATCH 129/225] [tubitv] use geo bypass mechanism --- youtube_dl/extractor/tubitv.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py index 3a37df2e8..c44018aec 100644 --- a/youtube_dl/extractor/tubitv.py +++ b/youtube_dl/extractor/tubitv.py @@ -16,6 +16,7 @@ class TubiTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tubitv\.com/video/(?P<id>[0-9]+)' _LOGIN_URL = 'http://tubitv.com/login' _NETRC_MACHINE = 'tubitv' + _GEO_COUNTRIES = ['US'] _TEST = { 'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday', 'md5': '43ac06be9326f41912dc64ccf7a80320', From 51ed496307a8a98134f25e8128c317663ccd4355 Mon Sep 17 00:00:00 2001 From: Thomas Christlieb <thomaschristlieb@hotmail.com> Date: Fri, 24 Feb 2017 15:08:45 +0100 Subject: [PATCH 130/225] [thescene] Fix extraction (closes #12235) --- youtube_dl/extractor/thescene.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/thescene.py b/youtube_dl/extractor/thescene.py index ce1326c03..6c16d66ed 100644 --- a/youtube_dl/extractor/thescene.py +++ b/youtube_dl/extractor/thescene.py @@ -32,7 +32,7 @@ class TheSceneIE(InfoExtractor): player = self._download_webpage(player_url, display_id) info = self._parse_json( self._search_regex( - r'(?m)var\s+video\s+=\s+({.+?});$', player, 'info json'), + r'(?m)video\s*:\s*({.+?}),$', player, 'info json'), display_id) qualities_order = qualities(('low', 'high')) @@ -40,7 +40,7 @@ class TheSceneIE(InfoExtractor): 'format_id': '{0}-{1}'.format(f['type'].split('/')[0], f['quality']), 'url': f['src'], 'quality': qualities_order(f['quality']), - } for f in info['sources'][0]] + } for f in info['sources']] self._sort_formats(formats) return { From 441d7a32e563b3985c58af5ab80d76ad943f0c07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 21:22:29 +0700 Subject: [PATCH 131/225] [thescene] Extract more metadata --- youtube_dl/extractor/thescene.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/thescene.py b/youtube_dl/extractor/thescene.py index 6c16d66ed..b8504f0eb 100644 --- a/youtube_dl/extractor/thescene.py +++ b/youtube_dl/extractor/thescene.py @@ -3,7 +3,10 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse -from ..utils import qualities +from ..utils import ( + int_or_none, + qualities, +) class TheSceneIE(InfoExtractor): @@ -16,6 +19,11 @@ class TheSceneIE(InfoExtractor): 'ext': 'mp4', 'title': 'Narciso Rodriguez: Spring 2013 Ready-to-Wear', 'display_id': 'narciso-rodriguez-spring-2013-ready-to-wear', + 'duration': 127, + 'series': 'Style.com Fashion Shows', + 'season': 'Ready To Wear Spring 2013', + 'tags': list, + 'categories': list, }, } @@ -35,6 +43,9 @@ class TheSceneIE(InfoExtractor): r'(?m)video\s*:\s*({.+?}),$', player, 'info json'), display_id) + video_id = info['id'] + title = info['title'] + qualities_order = qualities(('low', 'high')) formats = [{ 'format_id': '{0}-{1}'.format(f['type'].split('/')[0], f['quality']), @@ -44,9 +55,14 @@ class TheSceneIE(InfoExtractor): self._sort_formats(formats) return { - 'id': info['id'], + 'id': video_id, 'display_id': display_id, - 'title': info['title'], + 'title': title, 'formats': formats, 'thumbnail': info.get('poster_frame'), + 'duration': int_or_none(info.get('duration')), + 'series': info.get('series_title'), + 'season': info.get('season_title'), + 'tags': info.get('tags'), + 'categories': info.get('categories'), } From f3bc281239bafa971195eefd30773d152bfdb10c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 21:48:34 +0700 Subject: [PATCH 132/225] [noco] Swtich login URL to https (closes #12246) --- youtube_dl/extractor/noco.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py index 70ff2ab36..fc0624dd0 100644 --- a/youtube_dl/extractor/noco.py +++ b/youtube_dl/extractor/noco.py @@ -23,7 +23,7 @@ from ..utils import ( class NocoIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)' - _LOGIN_URL = 'http://noco.tv/do.php' + _LOGIN_URL = 'https://noco.tv/do.php' _API_URL_TEMPLATE = 'https://api.noco.tv/1.1/%s?ts=%s&tk=%s' _SUB_LANG_TEMPLATE = '&sub_lang=%s' _NETRC_MACHINE = 'noco' From ad3033037ce6b6809c3d06f2074d36691ba10cbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 21:50:13 +0700 Subject: [PATCH 133/225] [noco] Modernize --- youtube_dl/extractor/noco.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py index fc0624dd0..8b83e1f76 100644 --- a/youtube_dl/extractor/noco.py +++ b/youtube_dl/extractor/noco.py @@ -69,16 +69,17 @@ class NocoIE(InfoExtractor): if username is None: return - login_form = { - 'a': 'login', - 'cookie': '1', - 'username': username, - 'password': password, - } - request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form)) - request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') - - login = self._download_json(request, None, 'Logging in as %s' % username) + login = self._download_json( + self._LOGIN_URL, None, 'Logging in as %s' % username, + data=urlencode_postdata({ + 'a': 'login', + 'cookie': '1', + 'username': username, + 'password': password, + }), + headers={ + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + }) if 'erreur' in login: raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True) From 5d3a51e1b939ada71b3ec6ae7e004ccb6e0861c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 21:57:39 +0700 Subject: [PATCH 134/225] [ChangeLog] Actualize --- ChangeLog | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ChangeLog b/ChangeLog index 6034c9ea4..0225af42a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,15 @@ +version <unreleased> + +Extractors +* [noco] Modernize +* [noco] Switch login URL to https (#12246) ++ [thescene] Extract more metadata +* [thescene] Fix extraction (#12235) ++ [tubitv] Use geo bypass mechanism +* [openload] Fix extraction (#10408) ++ [ivi] Raise GeoRestrictedError + + version 2017.02.24 Core From 204efc850905a5b78c86f884b94210644784b9c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Fri, 24 Feb 2017 21:59:39 +0700 Subject: [PATCH 135/225] release 2017.02.24.1 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- youtube_dl/version.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 743b796ac..564108122 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.24*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.24** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.24.1*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.24.1** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v <your command line> [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.24 +[debug] youtube-dl version 2017.02.24.1 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 0225af42a..add8a6758 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version <unreleased> +version 2017.02.24.1 Extractors * [noco] Modernize diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 129447e10..fe7462eac 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.24' +__version__ = '2017.02.24.1' From 231bcd0b6b7737e6b0484ce8aaa4a14de442a1ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 02:51:53 +0700 Subject: [PATCH 136/225] [amcnetworks] Relax _VALID_URL (#12127) --- youtube_dl/extractor/amcnetworks.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/amcnetworks.py b/youtube_dl/extractor/amcnetworks.py index b71d1a093..3a0ec6776 100644 --- a/youtube_dl/extractor/amcnetworks.py +++ b/youtube_dl/extractor/amcnetworks.py @@ -10,7 +10,7 @@ from ..utils import ( class AMCNetworksIE(ThePlatformIE): - _VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|wetv)\.com/(?:movies/|shows/[^/]+/(?:full-episodes/)?[^/]+/episode-\d+(?:-(?:[^/]+/)?|/))(?P<id>[^/?#]+)' + _VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|wetv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1', 'md5': '', @@ -44,6 +44,12 @@ class AMCNetworksIE(ThePlatformIE): }, { 'url': 'http://www.bbcamerica.com/shows/doctor-who/full-episodes/the-power-of-the-daleks/episode-01-episode-1-color-version', 'only_matching': True, + }, { + 'url': 'http://www.wetv.com/shows/mama-june-from-not-to-hot/full-episode/season-01/thin-tervention', + 'only_matching': True, + }, { + 'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3', + 'only_matching': True, }] def _real_extract(self, url): From eeb0a9568442a4dbbf3478579abe2696fbe890e2 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sat, 25 Feb 2017 18:40:05 +0800 Subject: [PATCH 137/225] [extractor/common] Add 'preference' to _parse_html5_media_entries Some websites, like NJPWorld, put different qualities on different player pages. --- youtube_dl/extractor/common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 4252d6825..eb3c091aa 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2010,7 +2010,7 @@ class InfoExtractor(object): }) return formats - def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None): + def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None): def absolute_url(video_url): return compat_urlparse.urljoin(base_url, video_url) @@ -2032,7 +2032,8 @@ class InfoExtractor(object): is_plain_url = False formats = self._extract_m3u8_formats( full_url, video_id, ext='mp4', - entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id) + entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id, + preference=preference) elif ext == 'mpd': is_plain_url = False formats = self._extract_mpd_formats( From db182c63fb4a58974e425a56d235131fd9efc531 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sat, 25 Feb 2017 18:44:39 +0800 Subject: [PATCH 138/225] [njpwworld] Add new extractor (closes #11561) --- ChangeLog | 6 +++ youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/njpwworld.py | 83 ++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 youtube_dl/extractor/njpwworld.py diff --git a/ChangeLog b/ChangeLog index add8a6758..e0e1f52d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractors ++ [njpwworld] Add new extractor (#11561) + + version 2017.02.24.1 Extractors diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 83a170fa7..703bc5d34 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -637,6 +637,7 @@ from .ninecninemedia import ( from .ninegag import NineGagIE from .ninenow import NineNowIE from .nintendo import NintendoIE +from .njpwworld import NJPWWorldIE from .nobelprize import NobelPrizeIE from .noco import NocoIE from .normalboots import NormalbootsIE diff --git a/youtube_dl/extractor/njpwworld.py b/youtube_dl/extractor/njpwworld.py new file mode 100644 index 000000000..f5e3f6815 --- /dev/null +++ b/youtube_dl/extractor/njpwworld.py @@ -0,0 +1,83 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + get_element_by_class, + urlencode_postdata, +) + + +class NJPWWorldIE(InfoExtractor): + _VALID_URL = r'https?://njpwworld\.com/p/(?P<id>[a-z0-9_]+)' + IE_DESC = '新日本プロレスワールド' + _NETRC_MACHINE = 'njpwworld' + + _TEST = { + 'url': 'http://njpwworld.com/p/s_series_00155_1_9/', + 'info_dict': { + 'id': 's_series_00155_1_9', + 'ext': 'mp4', + 'title': '第9試合 ランディ・サベージ vs リック・スタイナー', + 'tags': list, + }, + 'params': { + 'skip_download': True, # AES-encrypted m3u8 + }, + 'skip': 'Requires login', + } + + def _real_initialize(self): + self._login() + + def _login(self): + username, password = self._get_login_info() + # No authentication to be performed + if not username: + return True + + webpage, urlh = self._download_webpage_handle( + 'https://njpwworld.com/auth/login', None, + note='Logging in', errnote='Unable to login', + data=urlencode_postdata({'login_id': username, 'pw': password})) + # /auth/login will return 302 for successful logins + if urlh.geturl() == 'https://njpwworld.com/auth/login': + self.report_warning('unable to login') + return False + + return True + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + formats = [] + for player_url, kind in re.findall(r'<a[^>]+href="(/player[^"]+)".+?<img[^>]+src="[^"]+qf_btn_([^".]+)', webpage): + player_url = compat_urlparse.urljoin(url, player_url) + + player_page = self._download_webpage( + player_url, video_id, note='Downloading player page') + + entries = self._parse_html5_media_entries( + player_url, player_page, video_id, m3u8_id='hls-%s' % kind, + m3u8_entry_protocol='m3u8_native', + preference=2 if 'hq' in kind else 1) + formats.extend(entries[0]['formats']) + + self._sort_formats(formats) + + post_content = get_element_by_class('post-content', webpage) + tags = re.findall( + r'<li[^>]+class="tag-[^"]+"><a[^>]*>([^<]+)</a></li>', post_content + ) if post_content else None + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'formats': formats, + 'tags': tags, + } From 831217291ac05ad75ef16fd6d9985e255489c1e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 19:44:31 +0700 Subject: [PATCH 139/225] [compat] Use try except for compat_numeric_types --- youtube_dl/compat.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index b257e2e81..0c119e417 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -2760,8 +2760,10 @@ else: compat_kwargs = lambda kwargs: kwargs -compat_numeric_types = ((int, float, long, complex) if sys.version_info[0] < 3 - else (int, float, complex)) +try: + compat_numeric_types = (int, float, long, complex) +except NameError: # Python 3 + compat_numeric_types = (int, float, complex) if sys.version_info < (2, 7): From 922ab7840b01bd108887849063572cffa855cdc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 20:16:40 +0700 Subject: [PATCH 140/225] [etonline] Add extractor (closes #12236) --- youtube_dl/extractor/etonline.py | 39 ++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 40 insertions(+) create mode 100644 youtube_dl/extractor/etonline.py diff --git a/youtube_dl/extractor/etonline.py b/youtube_dl/extractor/etonline.py new file mode 100644 index 000000000..17d7cfec6 --- /dev/null +++ b/youtube_dl/extractor/etonline.py @@ -0,0 +1,39 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class ETOnlineIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?etonline\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'http://www.etonline.com/tv/211130_dove_cameron_liv_and_maddie_emotional_episode_series_finale/', + 'info_dict': { + 'id': '211130_dove_cameron_liv_and_maddie_emotional_episode_series_finale', + 'title': 'md5:a21ec7d3872ed98335cbd2a046f34ee6', + 'description': 'md5:8b94484063f463cca709617c79618ccd', + }, + 'playlist_count': 2, + }, { + 'url': 'http://www.etonline.com/media/video/here_are_the_stars_who_love_bringing_their_moms_as_dates_to_the_oscars-211359/', + 'only_matching': True, + }] + BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911076001/default_default/index.html?videoId=ref:%s' + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + entries = [ + self.url_result( + self.BRIGHTCOVE_URL_TEMPLATE % video_id, 'BrightcoveNew', video_id) + for video_id in re.findall( + r'site\.brightcove\s*\([^,]+,\s*["\'](title_\d+)', webpage)] + + return self.playlist_result( + entries, playlist_id, + self._og_search_title(webpage, fatal=False), + self._og_search_description(webpage)) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 703bc5d34..9f5aaf1d8 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -288,6 +288,7 @@ from .espn import ( ESPNArticleIE, ) from .esri import EsriVideoIE +from .etonline import ETOnlineIE from .europa import EuropaIE from .everyonesmixtape import EveryonesMixtapeIE from .expotv import ExpoTVIE From 103f8c8d36170d5cb489420db0e8fe383b1f93dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 20:38:21 +0700 Subject: [PATCH 141/225] [xhamster] Capture and output videoClosed error (#12263) --- youtube_dl/extractor/xhamster.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index 36a8c9840..7b6703714 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from ..utils import ( dict_get, + ExtractorError, int_or_none, parse_duration, unified_strdate, @@ -57,6 +58,10 @@ class XHamsterIE(InfoExtractor): }, { 'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html', 'only_matching': True, + }, { + # This video is visible for marcoalfa123456's friends only + 'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html', + 'only_matching': True, }] def _real_extract(self, url): @@ -78,6 +83,12 @@ class XHamsterIE(InfoExtractor): mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo) webpage = self._download_webpage(mrss_url, video_id) + error = self._html_search_regex( + r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>', + webpage, 'error', default=None) + if error: + raise ExtractorError(error, expected=True) + title = self._html_search_regex( [r'<h1[^>]*>([^<]+)</h1>', r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"', From d374d943f3d9eca4b2052cfc830905d299a0688a Mon Sep 17 00:00:00 2001 From: Pratyush Singh <singh.pratyush96@gmail.com> Date: Sat, 24 Dec 2016 21:05:41 +0530 Subject: [PATCH 142/225] [downloader/common] Limit displaying 2 digits after decimal point in sleep interval message --- youtube_dl/downloader/common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py index 3dc144b4e..2c4470a95 100644 --- a/youtube_dl/downloader/common.py +++ b/youtube_dl/downloader/common.py @@ -347,7 +347,10 @@ class FileDownloader(object): if min_sleep_interval: max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval) sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval) - self.to_screen('[download] Sleeping %s seconds...' % sleep_interval) + self.to_screen( + '[download] Sleeping %s seconds...' % ( + int(sleep_interval) if sleep_interval.is_integer() + else '%.2f' % sleep_interval)) time.sleep(sleep_interval) return self.real_download(filename, info_dict) From 5fc8d893612285ddba082f381006337573b36afa Mon Sep 17 00:00:00 2001 From: Ricardo Constantino <wiiaboo@gmail.com> Date: Sat, 18 Feb 2017 17:42:31 +0000 Subject: [PATCH 143/225] [freshlive] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/freshlive.py | 64 ++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 youtube_dl/extractor/freshlive.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 9f5aaf1d8..860b8f422 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -339,6 +339,7 @@ from .francetv import ( ) from .freesound import FreesoundIE from .freespeech import FreespeechIE +from .freshlive import FreshliveIE from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE from .fusion import FusionIE diff --git a/youtube_dl/extractor/freshlive.py b/youtube_dl/extractor/freshlive.py new file mode 100644 index 000000000..113f8f4fa --- /dev/null +++ b/youtube_dl/extractor/freshlive.py @@ -0,0 +1,64 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + +from ..utils import ( + int_or_none, + parse_iso8601 +) + +class FreshliveIE(InfoExtractor): + _VALID_URL = r'https?://freshlive\.tv/(?P<streamer>[^/]+)/(?P<id>[0-9]+)' + _TEST = { + 'url': 'https://freshlive.tv/satotv/74712', + 'md5': '224f50d268b6b9f94e4198deccd55d6d', + 'info_dict': { + 'description': 'テスト', + 'duration': 1511, + 'id': '74712', + 'ext': 'mp4', + 'timestamp': 1483621764, + 'title': 'テスト', + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20170105', + 'uploader': 'サトTV', + 'uploader_id': 'satotv', + 'view_count': int, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + options = self._parse_json( + self._search_regex( + r'window\.__CONTEXT__\s*=\s*({.+?});\s*</script>', + webpage, 'initial context'), + video_id) + + programs = options['context']['dispatcher']['stores']['ProgramStore']['programs'] + info = programs.get(video_id, {}) + + video_url = info.get('liveStreamUrl') or info.get('archiveStreamUrl') + if not video_url: + raise ExtractorError('%s not a valid broadcast ID' % video_id, expected=True) + + formats = self._extract_m3u8_formats( + video_url, video_id, ext='mp4', m3u8_id='hls') + + return { + 'id': video_id, + 'formats': formats, + 'title': info.get('title'), + 'description': info.get('description'), + 'duration': int_or_none(info.get('airTime')), + 'is_live': int_or_none(info.get('airTime')) == None, + 'thumbnail': info.get('thumbnailUrl'), + 'uploader': info.get('channel', {}).get('title'), + 'uploader_id': info.get('channel', {}).get('code'), + 'uploader_url': info.get('channel', {}).get('permalink'), + 'timestamp': parse_iso8601(info.get('startAt')), + 'view_count': int_or_none(info.get('viewCount')), + } \ No newline at end of file From e498758b9cfa983d05b10a185ecdef480a93cf30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 22:56:42 +0700 Subject: [PATCH 144/225] [freshlive] Fix issues and improve (closes #12175) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/freshlive.py | 64 ++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 860b8f422..58139ee4e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -339,7 +339,7 @@ from .francetv import ( ) from .freesound import FreesoundIE from .freespeech import FreespeechIE -from .freshlive import FreshliveIE +from .freshlive import FreshLiveIE from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE from .fusion import FusionIE diff --git a/youtube_dl/extractor/freshlive.py b/youtube_dl/extractor/freshlive.py index 113f8f4fa..a90f9156c 100644 --- a/youtube_dl/extractor/freshlive.py +++ b/youtube_dl/extractor/freshlive.py @@ -2,34 +2,40 @@ from __future__ import unicode_literals from .common import InfoExtractor - +from ..compat import compat_str from ..utils import ( + ExtractorError, int_or_none, - parse_iso8601 + try_get, + unified_timestamp, ) -class FreshliveIE(InfoExtractor): - _VALID_URL = r'https?://freshlive\.tv/(?P<streamer>[^/]+)/(?P<id>[0-9]+)' + +class FreshLiveIE(InfoExtractor): + _VALID_URL = r'https?://freshlive\.tv/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://freshlive.tv/satotv/74712', - 'md5': '224f50d268b6b9f94e4198deccd55d6d', + 'md5': '9f0cf5516979c4454ce982df3d97f352', 'info_dict': { - 'description': 'テスト', - 'duration': 1511, 'id': '74712', 'ext': 'mp4', - 'timestamp': 1483621764, 'title': 'テスト', + 'description': 'テスト', 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 1511, + 'timestamp': 1483619655, 'upload_date': '20170105', 'uploader': 'サトTV', 'uploader_id': 'satotv', 'view_count': int, + 'comment_count': int, + 'is_live': False, } } def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) options = self._parse_json( @@ -38,27 +44,41 @@ class FreshliveIE(InfoExtractor): webpage, 'initial context'), video_id) - programs = options['context']['dispatcher']['stores']['ProgramStore']['programs'] - info = programs.get(video_id, {}) + info = options['context']['dispatcher']['stores']['ProgramStore']['programs'][video_id] - video_url = info.get('liveStreamUrl') or info.get('archiveStreamUrl') - if not video_url: - raise ExtractorError('%s not a valid broadcast ID' % video_id, expected=True) + title = info['title'] + + if info.get('status') == 'upcoming': + raise ExtractorError('Stream %s is upcoming' % video_id, expected=True) + + stream_url = info.get('liveStreamUrl') or info['archiveStreamUrl'] + + is_live = info.get('liveStreamUrl') is not None formats = self._extract_m3u8_formats( - video_url, video_id, ext='mp4', m3u8_id='hls') + stream_url, video_id, ext='mp4', + entry_protocol='m3u8' if is_live else 'm3u8_native', + m3u8_id='hls') + + if is_live: + title = self._live_title(title) return { 'id': video_id, 'formats': formats, - 'title': info.get('title'), + 'title': title, 'description': info.get('description'), - 'duration': int_or_none(info.get('airTime')), - 'is_live': int_or_none(info.get('airTime')) == None, 'thumbnail': info.get('thumbnailUrl'), - 'uploader': info.get('channel', {}).get('title'), - 'uploader_id': info.get('channel', {}).get('code'), - 'uploader_url': info.get('channel', {}).get('permalink'), - 'timestamp': parse_iso8601(info.get('startAt')), + 'duration': int_or_none(info.get('airTime')), + 'timestamp': unified_timestamp(info.get('createdAt')), + 'uploader': try_get( + info, lambda x: x['channel']['title'], compat_str), + 'uploader_id': try_get( + info, lambda x: x['channel']['code'], compat_str), + 'uploader_url': try_get( + info, lambda x: x['channel']['permalink'], compat_str), 'view_count': int_or_none(info.get('viewCount')), - } \ No newline at end of file + 'comment_count': int_or_none(info.get('commentCount')), + 'tags': info.get('tags', []), + 'is_live': is_live, + } From 9d0c08a02c55136221621e8b8dda5860211af8dd Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sat, 25 Feb 2017 01:40:12 +0800 Subject: [PATCH 145/225] [vevo] Fix videos with the new streams/streamsV3 format (closes #11719) --- ChangeLog | 2 ++ youtube_dl/extractor/vevo.py | 22 +++++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/ChangeLog b/ChangeLog index e0e1f52d0..701afd57a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,8 @@ version <unreleased> Extractors +* [vevo] Fix extraction for videos with the new streams/streamsV3 format + (#11719) + [njpwworld] Add new extractor (#11561) diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index c4e37f694..9aa38bc5a 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -17,12 +17,12 @@ from ..utils import ( class VevoBaseIE(InfoExtractor): - def _extract_json(self, webpage, video_id, item): + def _extract_json(self, webpage, video_id): return self._parse_json( self._search_regex( r'window\.__INITIAL_STORE__\s*=\s*({.+?});\s*</script>', webpage, 'initial store'), - video_id)['default'][item] + video_id) class VevoIE(VevoBaseIE): @@ -139,6 +139,11 @@ class VevoIE(VevoBaseIE): # no genres available 'url': 'http://www.vevo.com/watch/INS171400764', 'only_matching': True, + }, { + # Another case available only via the webpage; using streams/streamsV3 formats + # Geo-restricted to Netherlands/Germany + 'url': 'http://www.vevo.com/watch/boostee/pop-corn-clip-officiel/FR1A91600909', + 'only_matching': True, }] _VERSIONS = { 0: 'youtube', # only in AuthenticateVideo videoVersions @@ -193,7 +198,14 @@ class VevoIE(VevoBaseIE): # https://github.com/rg3/youtube-dl/issues/9366) if not video_versions: webpage = self._download_webpage(url, video_id) - video_versions = self._extract_json(webpage, video_id, 'streams')[video_id][0] + json_data = self._extract_json(webpage, video_id) + if 'streams' in json_data.get('default', {}): + video_versions = json_data['default']['streams'][video_id][0] + else: + video_versions = [ + value + for key, value in json_data['apollo']['data'].items() + if key.startswith('%s.streams' % video_id)] uploader = None artist = None @@ -207,7 +219,7 @@ class VevoIE(VevoBaseIE): formats = [] for video_version in video_versions: - version = self._VERSIONS.get(video_version['version']) + version = self._VERSIONS.get(video_version.get('version'), 'generic') version_url = video_version.get('url') if not version_url: continue @@ -339,7 +351,7 @@ class VevoPlaylistIE(VevoBaseIE): if video_id: return self.url_result('vevo:%s' % video_id, VevoIE.ie_key()) - playlists = self._extract_json(webpage, playlist_id, '%ss' % playlist_kind) + playlists = self._extract_json(webpage, playlist_id)['default']['%ss' % playlist_kind] playlist = (list(playlists.values())[0] if playlist_kind == 'playlist' else playlists[playlist_id]) From b3aec47665104223578181c71cc90112f5b17fce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 25 Feb 2017 23:27:14 +0700 Subject: [PATCH 146/225] [tvigle] Raise GeoRestrictedError --- youtube_dl/extractor/tvigle.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/tvigle.py b/youtube_dl/extractor/tvigle.py index f3817ab28..3475ef4c3 100644 --- a/youtube_dl/extractor/tvigle.py +++ b/youtube_dl/extractor/tvigle.py @@ -17,6 +17,9 @@ class TvigleIE(InfoExtractor): IE_DESC = 'Интернет-телевидение Tvigle.ru' _VALID_URL = r'https?://(?:www\.)?(?:tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$|cloud\.tvigle\.ru/video/(?P<id>\d+))' + _GEO_BYPASS = False + _GEO_COUNTRIES = ['RU'] + _TESTS = [ { 'url': 'http://www.tvigle.ru/video/sokrat/', @@ -72,8 +75,13 @@ class TvigleIE(InfoExtractor): error_message = item.get('errorMessage') if not videos and error_message: - raise ExtractorError( - '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) + if item.get('isGeoBlocked') is True: + self.raise_geo_restricted( + msg=error_message, countries=self._GEO_COUNTRIES) + else: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error_message), + expected=True) title = item['title'] description = item.get('description') From a5cf17989b04e559fda9a2731a3b33e881c5cc3c Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sun, 26 Feb 2017 17:24:54 +0800 Subject: [PATCH 147/225] [MDR] Relax _VALID_URL and playerURL matching and update _TESTS Ref: #12169 --- ChangeLog | 1 + youtube_dl/extractor/mdr.py | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/ChangeLog b/ChangeLog index 701afd57a..4009acf37 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors ++ [MDR] Recognize more URL patterns (#12169) * [vevo] Fix extraction for videos with the new streams/streamsV3 format (#11719) + [njpwworld] Add new extractor (#11561) diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py index 6e4290aad..322e5b45a 100644 --- a/youtube_dl/extractor/mdr.py +++ b/youtube_dl/extractor/mdr.py @@ -14,7 +14,7 @@ from ..utils import ( class MDRIE(InfoExtractor): IE_DESC = 'MDR.DE and KiKA' - _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+-?(?P<id>\d+)(?:_.+?)?\.html' + _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?\.html' _TESTS = [{ # MDR regularly deletes its videos @@ -31,6 +31,7 @@ class MDRIE(InfoExtractor): 'duration': 250, 'uploader': 'MITTELDEUTSCHER RUNDFUNK', }, + 'skip': '404 not found', }, { 'url': 'http://www.kika.de/baumhaus/videos/video19636.html', 'md5': '4930515e36b06c111213e80d1e4aad0e', @@ -41,6 +42,7 @@ class MDRIE(InfoExtractor): 'duration': 134, 'uploader': 'KIKA', }, + 'skip': '404 not found', }, { 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html', 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357', @@ -49,11 +51,21 @@ class MDRIE(InfoExtractor): 'ext': 'mp4', 'title': 'Beutolomäus und der geheime Weihnachtswunsch', 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd', - 'timestamp': 1450950000, - 'upload_date': '20151224', + 'timestamp': 1482541200, + 'upload_date': '20161224', 'duration': 4628, 'uploader': 'KIKA', }, + }, { + # audio with alternative playerURL pattern + 'url': 'http://www.mdr.de/kultur/videos-und-audios/audio-radio/operation-mindfuck-robert-wilson100.html', + 'info_dict': { + 'id': '100', + 'ext': 'mp4', + 'title': 'Feature: Operation Mindfuck - Robert Anton Wilson', + 'duration': 3239, + 'uploader': 'MITTELDEUTSCHER RUNDFUNK', + }, }, { 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html', 'only_matching': True, @@ -71,7 +83,7 @@ class MDRIE(InfoExtractor): webpage = self._download_webpage(url, video_id) data_url = self._search_regex( - r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+/(?:video|audio)-?[0-9]+-avCustom\.xml)\1', + r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>.+?-avCustom\.xml)\1', webpage, 'data url', group='url').replace(r'\/', '/') doc = self._download_xml( From 8878789f1117b59186ecc6bf82f462201166a26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 26 Feb 2017 16:50:57 +0700 Subject: [PATCH 148/225] [dailymotion] Raise GeoRestrictedError --- youtube_dl/extractor/dailymotion.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index b312401dc..246efde43 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -282,9 +282,14 @@ class DailymotionIE(DailymotionBaseInfoExtractor): } def _check_error(self, info): + error = info.get('error') if info.get('error') is not None: + title = error['title'] + # See https://developer.dailymotion.com/api#access-error + if error.get('code') == 'DM007': + self.raise_geo_restricted(msg=title) raise ExtractorError( - '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True) + '%s said: %s' % (self.IE_NAME, title), expected=True) def _get_subtitles(self, video_id, webpage): try: From fd5c4aab5958a2a086072488913cc190ff028bc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 26 Feb 2017 16:51:21 +0700 Subject: [PATCH 149/225] [youtube] Raise GeoRestrictedError --- youtube_dl/extractor/youtube.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index dec02804b..b3c3cd5b2 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -47,7 +47,6 @@ from ..utils import ( unsmuggle_url, uppercase_escape, urlencode_postdata, - ISO3166Utils, ) @@ -371,6 +370,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): } _SUBTITLE_FORMATS = ('ttml', 'vtt') + _GEO_BYPASS = False + IE_NAME = 'youtube' _TESTS = [ { @@ -917,7 +918,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # itag 212 'url': '1t24XAntNCY', 'only_matching': True, - } + }, + { + # geo restricted to JP + 'url': 'sJL6WA-aGkQ', + 'only_matching': True, + }, ] def __init__(self, *args, **kwargs): @@ -1376,11 +1382,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): if 'token' not in video_info: if 'reason' in video_info: if 'The uploader has not made this video available in your country.' in video_info['reason']: - regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None) - if regions_allowed: - raise ExtractorError('YouTube said: This video is available in %s only' % ( - ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))), - expected=True) + regions_allowed = self._html_search_meta( + 'regionsAllowed', video_webpage, default=None) + countries = regions_allowed.split(',') if regions_allowed else None + self.raise_geo_restricted( + msg=video_info['reason'][0], countries=countries) raise ExtractorError( 'YouTube said: %s' % video_info['reason'][0], expected=True, video_id=video_id) @@ -2126,6 +2132,10 @@ class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): 'id': 'UUs0ifCMCm1icqRbqhUINa0w', 'title': 'Uploads from Deus Ex', }, + }, { + # geo restricted to JP + 'url': 'https://www.youtube.com/user/kananishinoSMEJ', + 'only_matching': True, }] @classmethod From 7fd465525695bb589fa8932e1e36f38ad511735b Mon Sep 17 00:00:00 2001 From: xbe <xbe@users.noreply.github.com> Date: Sun, 26 Feb 2017 03:08:10 -0800 Subject: [PATCH 150/225] [crunchyroll] Extract uploader name that's not a link Provide the Crunchyroll extractor with the ability to extract uploader names that aren't links. Add a test for this new functionality. This fixes #12267. --- youtube_dl/extractor/crunchyroll.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index a1fc6a756..9c6cf00ca 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -207,6 +207,21 @@ class CrunchyrollIE(CrunchyrollBaseIE): # Just test metadata extraction 'skip_download': True, }, + }, { + # make sure we can extract an uploader name that's not a link + 'url': 'http://www.crunchyroll.com/hakuoki-reimeiroku/episode-1-dawn-of-the-divine-warriors-606899', + 'info_dict': { + 'id': '606899', + 'ext': 'mp4', + 'title': 'Hakuoki Reimeiroku Episode 1 – Dawn of the Divine Warriors', + 'description': 'Ryunosuke was left to die, but Serizawa-san asked him a simple question "Do you want to live?"', + 'uploader': 'Geneon Entertainment', + 'upload_date': '20120717', + }, + 'params': { + # just test metadata extraction + 'skip_download': True, + }, }] _FORMAT_IDS = { @@ -388,8 +403,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text if video_upload_date: video_upload_date = unified_strdate(video_upload_date) video_uploader = self._html_search_regex( - r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage, - 'video_uploader', fatal=False) + # try looking for both an uploader that's a link and one that's not + [r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'], + webpage, 'video_uploader', fatal=False) available_fmts = [] for a, fmt in re.findall(r'(<a[^>]+token=["\']showmedia\.([0-9]{3,4})p["\'][^>]+>)', webpage): From fdeea72611b2c2c29a9a34e91ae4bd9b8dfc1f64 Mon Sep 17 00:00:00 2001 From: Alex Seiler <seileralex@gmail.com> Date: Sun, 26 Feb 2017 15:05:52 +0100 Subject: [PATCH 151/225] [cda] Decode URL (fixes #12255) --- youtube_dl/extractor/cda.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/youtube_dl/extractor/cda.py b/youtube_dl/extractor/cda.py index ae7af2f0e..1ee35b501 100755 --- a/youtube_dl/extractor/cda.py +++ b/youtube_dl/extractor/cda.py @@ -1,6 +1,7 @@ # coding: utf-8 from __future__ import unicode_literals +import codecs import re from .common import InfoExtractor @@ -96,6 +97,10 @@ class CDAIE(InfoExtractor): if not video or 'file' not in video: self.report_warning('Unable to extract %s version information' % version) return + if video['file'].startswith('uggc'): + video['file'] = codecs.decode(video['file'], 'rot_13') + if video['file'].endswith('adc.mp4'): + video['file'] = video['file'].replace('adc.mp4', '.mp4') f = { 'url': video['file'], } From 892b47ab6c9147e785c562229e6dab305fffba61 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Mon, 27 Feb 2017 21:34:33 +0800 Subject: [PATCH 152/225] [scivee] Remove extractor (#9315) The Wikipedia page is changed from active to down: https://en.wikipedia.org/w/index.php?title=SciVee&diff=prev&oldid=723161154 Some other interesting bits: $ nslookup www.scivee.tv Server: 8.8.8.8 Address: 8.8.8.8#53 Non-authoritative answer: www.scivee.tv canonical name = scivee.rcsb.org. Name: scivee.rcsb.org Address: 132.249.231.211 $ nslookup rcsb.org Server: 8.8.8.8 Address: 8.8.8.8#53 Non-authoritative answer: Name: rcsb.org Address: 132.249.231.77 Both IPs are from UCSD. I guess it's maintained by a lab and they don't maintain it anymore. --- ChangeLog | 1 + youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/scivee.py | 57 ------------------------------ 3 files changed, 1 insertion(+), 58 deletions(-) delete mode 100644 youtube_dl/extractor/scivee.py diff --git a/ChangeLog b/ChangeLog index 4009acf37..4ed9cb4e0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors +- [scivee] Remove extractor (#9315) + [MDR] Recognize more URL patterns (#12169) * [vevo] Fix extraction for videos with the new streams/streamsV3 format (#11719) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 58139ee4e..d09104096 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -838,7 +838,6 @@ from .safari import ( from .sapo import SapoIE from .savefrom import SaveFromIE from .sbs import SBSIE -from .scivee import SciVeeIE from .screencast import ScreencastIE from .screencastomatic import ScreencastOMaticIE from .scrippsnetworks import ScrippsNetworksWatchIE diff --git a/youtube_dl/extractor/scivee.py b/youtube_dl/extractor/scivee.py deleted file mode 100644 index b1ca12fde..000000000 --- a/youtube_dl/extractor/scivee.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import int_or_none - - -class SciVeeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)' - - _TEST = { - 'url': 'http://www.scivee.tv/node/62352', - 'md5': 'b16699b74c9e6a120f6772a44960304f', - 'info_dict': { - 'id': '62352', - 'ext': 'mp4', - 'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting', - 'description': 'md5:81f1710638e11a481358fab1b11059d7', - }, - 'skip': 'Not accessible from Travis CI server', - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - # annotations XML is malformed - annotations = self._download_webpage( - 'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations') - - title = self._html_search_regex(r'<title>([^<]+)', annotations, 'title') - description = self._html_search_regex(r'([^<]+)', annotations, 'abstract', fatal=False) - filesize = int_or_none(self._html_search_regex( - r'([^<]+)', annotations, 'filesize', fatal=False)) - - formats = [ - { - 'url': 'http://www.scivee.tv/assets/audio/%s' % video_id, - 'ext': 'mp3', - 'format_id': 'audio', - }, - { - 'url': 'http://www.scivee.tv/assets/video/%s' % video_id, - 'ext': 'mp4', - 'format_id': 'video', - 'filesize': filesize, - }, - ] - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id, - 'formats': formats, - } From 0e879f432afe5b9a04a06cbc697cf28d08ac5518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 22:22:43 +0700 Subject: [PATCH 153/225] [youtube:channel] Remove duplicate test --- youtube_dl/extractor/youtube.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index b3c3cd5b2..7053e5512 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -2132,10 +2132,6 @@ class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): 'id': 'UUs0ifCMCm1icqRbqhUINa0w', 'title': 'Uploads from Deus Ex', }, - }, { - # geo restricted to JP - 'url': 'https://www.youtube.com/user/kananishinoSMEJ', - 'only_matching': True, }] @classmethod @@ -2236,7 +2232,7 @@ class YoutubeUserIE(YoutubeChannelIE): 'url': 'https://www.youtube.com/gametrailers', 'only_matching': True, }, { - # This channel is not available. + # This channel is not available, geo restricted to JP 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos', 'only_matching': True, }] From 0dc5a86a329314f551f86c2ef3202342b7506667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 22:43:19 +0700 Subject: [PATCH 154/225] [npo] Add support for hetklokhuis.nl (closes #12293) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/npo.py | 44 ++++++++++++++++++++++-------- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d09104096..0910b7b05 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -669,6 +669,7 @@ from .npo import ( NPORadioIE, NPORadioFragmentIE, SchoolTVIE, + HetKlokhuisIE, VPROIE, WNLIE, ) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 962437145..09e8d9987 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -416,7 +416,21 @@ class NPORadioFragmentIE(InfoExtractor): } -class SchoolTVIE(InfoExtractor): +class NPODataMidEmbedIE(InfoExtractor): + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex( + r'data-mid=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video_id', group='id') + return { + '_type': 'url_transparent', + 'ie_key': 'NPO', + 'url': 'npo:%s' % video_id, + 'display_id': display_id + } + + +class SchoolTVIE(NPODataMidEmbedIE): IE_NAME = 'schooltv' _VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P[^/?#&]+)' @@ -435,17 +449,25 @@ class SchoolTVIE(InfoExtractor): } } - def _real_extract(self, url): - display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - video_id = self._search_regex( - r'data-mid=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video_id', group='id') - return { - '_type': 'url_transparent', - 'ie_key': 'NPO', - 'url': 'npo:%s' % video_id, - 'display_id': display_id + +class HetKlokhuisIE(NPODataMidEmbedIE): + IE_NAME = 'schooltv' + _VALID_URL = r'https?://(?:www\.)?hetklokhuis.nl/[^/]+/\d+/(?P[^/?#&]+)' + + _TEST = { + 'url': 'http://hetklokhuis.nl/tv-uitzending/3471/Zwaartekrachtsgolven', + 'info_dict': { + 'id': 'VPWON_1260528', + 'display_id': 'Zwaartekrachtsgolven', + 'ext': 'm4v', + 'title': 'Het Klokhuis: Zwaartekrachtsgolven', + 'description': 'md5:c94f31fb930d76c2efa4a4a71651dd48', + 'upload_date': '20170223', + }, + 'params': { + 'skip_download': True } + } class NPOPlaylistBaseIE(NPOIE): From f264c62334fdd31a7620b4fdefb822e1bae6bd77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 23:10:00 +0700 Subject: [PATCH 155/225] [npo] Add support for zapp.nl --- youtube_dl/extractor/npo.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 09e8d9987..7c2c93f27 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -51,7 +51,8 @@ class NPOIE(NPOBaseIE): (?: npo\.nl/(?!live|radio)(?:[^/]+/){2}| ntr\.nl/(?:[^/]+/){2,}| - omroepwnl\.nl/video/fragment/[^/]+__ + omroepwnl\.nl/video/fragment/[^/]+__| + zapp\.nl/[^/]+/(?:gemist|filmpjes)/ ) ) (?P[^/?#]+) @@ -140,6 +141,14 @@ class NPOIE(NPOBaseIE): 'upload_date': '20150508', 'duration': 462, }, + }, + { + 'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547', + 'only_matching': True, + }, + { + 'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118', + 'only_matching': True, } ] From dbc01fdb6f4b4c58469ffb75d00a179f5af5cdcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 23:10:29 +0700 Subject: [PATCH 156/225] [hetklokhuis] Fix IE_NAME --- youtube_dl/extractor/npo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 7c2c93f27..b53c29993 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -460,7 +460,7 @@ class SchoolTVIE(NPODataMidEmbedIE): class HetKlokhuisIE(NPODataMidEmbedIE): - IE_NAME = 'schooltv' + IE_NAME = 'hetklokhuis' _VALID_URL = r'https?://(?:www\.)?hetklokhuis.nl/[^/]+/\d+/(?P[^/?#&]+)' _TEST = { From 18abb743762ce5b9b2ffd4d9d5e01b62621cc62e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 23:13:51 +0700 Subject: [PATCH 157/225] [npo] Relax _VALID_URL for zapp.nl --- youtube_dl/extractor/npo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index b53c29993..50473d777 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -52,7 +52,7 @@ class NPOIE(NPOBaseIE): npo\.nl/(?!live|radio)(?:[^/]+/){2}| ntr\.nl/(?:[^/]+/){2,}| omroepwnl\.nl/video/fragment/[^/]+__| - zapp\.nl/[^/]+/(?:gemist|filmpjes)/ + zapp\.nl/[^/]+/[^/]+/ ) ) (?P[^/?#]+) @@ -149,6 +149,10 @@ class NPOIE(NPOBaseIE): { 'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118', 'only_matching': True, + }, + { + 'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990', + 'only_matching': True, } ] From c6184bcf7b58476b659a92290711e4c58faff277 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 23:24:03 +0700 Subject: [PATCH 158/225] [ChangeLog] Actualize --- ChangeLog | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4ed9cb4e0..4cb897f7f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,11 +1,27 @@ version +Core +* [downloader/common] Limit displaying 2 digits after decimal point in sleep + interval message (#12183) ++ [extractor/common] Add preference to _parse_html5_media_entries + Extractors ++ [npo] Add support for zapp.nl ++ [npo] Add support for hetklokhuis.nl (#12293) - [scivee] Remove extractor (#9315) -+ [MDR] Recognize more URL patterns (#12169) ++ [cda] Decode download URL (#12255) ++ [crunchyroll] Improve uploader extraction (#12267) ++ [youtube] Raise GeoRestrictedError ++ [dailymotion] Raise GeoRestrictedError ++ [mdr] Recognize more URL patterns (#12169) ++ [tvigle] Raise GeoRestrictedError * [vevo] Fix extraction for videos with the new streams/streamsV3 format (#11719) -+ [njpwworld] Add new extractor (#11561) ++ [freshlive] Add support for freshlive.tv (#12175) ++ [xhamster] Capture and output videoClosed error (#12263) ++ [etonline] Add support for etonline.com (#12236) ++ [njpwworld] Add support for njpwworld.com (#11561) +* [amcnetworks] Relax URL regular expression (#12127) version 2017.02.24.1 From ef48a1175dc4e28b07c55ae7277d8196abec7ace Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 27 Feb 2017 23:26:07 +0700 Subject: [PATCH 159/225] release 2017.02.27 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- docs/supportedsites.md | 5 ++++- youtube_dl/version.py | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 564108122..6374f7c25 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.24.1*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.24.1** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.27*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.27** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.24.1 +[debug] youtube-dl version 2017.02.27 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 4cb897f7f..949ea1810 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.02.27 Core * [downloader/common] Limit displaying 2 digits after decimal point in sleep diff --git a/docs/supportedsites.md b/docs/supportedsites.md index f97397331..1b01c6d9d 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -239,6 +239,7 @@ - **ESPN** - **ESPNArticle** - **EsriVideo** + - **ETOnline** - **Europa** - **EveryonesMixtape** - **ExpoTV** @@ -274,6 +275,7 @@ - **francetvinfo.fr** - **Freesound** - **freespeech.org** + - **FreshLive** - **Funimation** - **FunnyOrDie** - **Fusion** @@ -310,6 +312,7 @@ - **HellPorno** - **Helsinki**: helsinki.fi - **HentaiStigma** + - **hetklokhuis** - **hgtv.com:show** - **HistoricFilms** - **history:topic**: History.com Topic @@ -511,6 +514,7 @@ - **Nintendo** - **njoy**: N-JOY - **njoy:embed** + - **NJPWWorld**: 新日本プロレスワールド - **NobelPrize** - **Noco** - **Normalboots** @@ -666,7 +670,6 @@ - **savefrom.net** - **SBS**: sbs.com.au - **schooltv** - - **SciVee** - **screen.yahoo:search**: Yahoo screen search - **Screencast** - **ScreencastOMatic** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index fe7462eac..261218b80 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.24.1' +__version__ = '2017.02.27' From f4c68ba372655c8ff4d6b1cfecfa129796159afd Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Tue, 28 Feb 2017 21:40:22 +0800 Subject: [PATCH 160/225] [douyu] Fix extraction and update _TESTS They've switched from flv to hls Closes #12301 --- ChangeLog | 6 ++++ youtube_dl/extractor/douyutv.py | 53 ++++++--------------------------- 2 files changed, 15 insertions(+), 44 deletions(-) diff --git a/ChangeLog b/ChangeLog index 949ea1810..f9986ae61 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version + +Extractors +* [douyu] Fix extraction (#12301) + + version 2017.02.27 Core diff --git a/youtube_dl/extractor/douyutv.py b/youtube_dl/extractor/douyutv.py index 911594413..9a83fb31a 100644 --- a/youtube_dl/extractor/douyutv.py +++ b/youtube_dl/extractor/douyutv.py @@ -1,15 +1,7 @@ # coding: utf-8 from __future__ import unicode_literals -import hashlib -import time -import uuid - from .common import InfoExtractor -from ..compat import ( - compat_str, - compat_urllib_parse_urlencode, -) from ..utils import ( ExtractorError, unescapeHTML, @@ -24,8 +16,8 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '17732', 'display_id': 'iseven', - 'ext': 'flv', - 'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'ext': 'mp4', + 'title': 're:^清晨醒脑!T-ARA根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r're:.*m7show@163\.com.*', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': '7师傅', @@ -39,7 +31,7 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '85982', 'display_id': '85982', - 'ext': 'flv', + 'ext': 'mp4', 'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:746a2f7a253966a06755a912f0acc0d2', 'thumbnail': r're:^https?://.*\.jpg$', @@ -55,8 +47,8 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '17732', 'display_id': '17732', - 'ext': 'flv', - 'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'ext': 'mp4', + 'title': 're:^清晨醒脑!T-ARA根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r're:.*m7show@163\.com.*', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': '7师傅', @@ -96,45 +88,18 @@ class DouyuTVIE(InfoExtractor): if room.get('show_status') == '2': raise ExtractorError('Live stream is offline', expected=True) - tt = compat_str(int(time.time() / 60)) - did = uuid.uuid4().hex.upper() - - sign_content = ''.join((room_id, did, self._API_KEY, tt)) - sign = hashlib.md5((sign_content).encode('utf-8')).hexdigest() - - flv_data = compat_urllib_parse_urlencode({ - 'cdn': 'ws', - 'rate': '0', - 'tt': tt, - 'did': did, - 'sign': sign, - }) - - video_info = self._download_json( - 'http://www.douyu.com/lapi/live/getPlay/%s' % room_id, video_id, - data=flv_data, note='Downloading video info', - headers={'Content-Type': 'application/x-www-form-urlencoded'}) - - error_code = video_info.get('error', 0) - if error_code is not 0: - raise ExtractorError( - '%s reported error %i' % (self.IE_NAME, error_code), - expected=True) - - base_url = video_info['data']['rtmp_url'] - live_path = video_info['data']['rtmp_live'] - - video_url = '%s/%s' % (base_url, live_path) + formats = self._extract_m3u8_formats( + room['hls_url'], video_id, ext='mp4') title = self._live_title(unescapeHTML(room['room_name'])) - description = room.get('notice') + description = room.get('show_details') thumbnail = room.get('room_src') uploader = room.get('nickname') return { 'id': room_id, 'display_id': video_id, - 'url': video_url, + 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, From c9619f0a17927086c49e4b443202be296d734a76 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 27 Feb 2017 18:47:47 +0800 Subject: [PATCH 161/225] [aes] Add aes_cbc_encrypt Used in daisuki.net (#4738) --- test/test_aes.py | 9 ++++++++- youtube_dl/aes.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/test/test_aes.py b/test/test_aes.py index 54078a66d..78a28751b 100644 --- a/test/test_aes.py +++ b/test/test_aes.py @@ -8,7 +8,7 @@ import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_decrypt_text +from youtube_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text from youtube_dl.utils import bytes_to_intlist, intlist_to_bytes import base64 @@ -34,6 +34,13 @@ class TestAES(unittest.TestCase): decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) + def test_cbc_encrypt(self): + data = bytes_to_intlist(self.secret_msg) + encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv)) + self.assertEqual( + encrypted, + b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd") + def test_decrypt_text(self): password = intlist_to_bytes(self.key).decode('utf-8') encrypted = base64.b64encode( diff --git a/youtube_dl/aes.py b/youtube_dl/aes.py index b8ff45481..c5bb3c4ef 100644 --- a/youtube_dl/aes.py +++ b/youtube_dl/aes.py @@ -60,6 +60,34 @@ def aes_cbc_decrypt(data, key, iv): return decrypted_data +def aes_cbc_encrypt(data, key, iv): + """ + Encrypt with aes in CBC mode. Using PKCS#7 padding + + @param {int[]} data cleartext + @param {int[]} key 16/24/32-Byte cipher key + @param {int[]} iv 16-Byte IV + @returns {int[]} encrypted data + """ + expanded_key = key_expansion(key) + block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) + + encrypted_data = [] + previous_cipher_block = iv + for i in range(block_count): + block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] + remaining_length = BLOCK_SIZE_BYTES - len(block) + block += [remaining_length] * remaining_length + mixed_block = xor(block, previous_cipher_block) + + encrypted_block = aes_encrypt(mixed_block, expanded_key) + encrypted_data += encrypted_block + + previous_cipher_block = encrypted_block + + return encrypted_data + + def key_expansion(data): """ Generate key schedule From f48409c7ac186fa38bbeb2df2b210e37a18eb04b Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 27 Feb 2017 18:50:19 +0800 Subject: [PATCH 162/225] [utils] Add pkcs1pad Used in daisuki.net (#4738) --- test/test_utils.py | 9 +++++++++ youtube_dl/utils.py | 15 +++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/test/test_utils.py b/test/test_utils.py index 3cdb21d40..aefd94518 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -52,6 +52,7 @@ from youtube_dl.utils import ( parse_filesize, parse_count, parse_iso8601, + pkcs1pad, read_batch_urls, sanitize_filename, sanitize_path, @@ -1104,6 +1105,14 @@ The first line ohdave_rsa_encrypt(b'aa111222', e, N), '726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881') + def test_pkcs1pad(self): + data = [1, 2, 3] + padded_data = pkcs1pad(data, 32) + self.assertEqual(padded_data[:2], [0, 2]) + self.assertEqual(padded_data[28:], [0, 1, 2, 3]) + + self.assertRaises(ValueError, pkcs1pad, data, 8) + def test_encode_base_n(self): self.assertEqual(encode_base_n(0, 30), '0') self.assertEqual(encode_base_n(80, 30), '2k') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 17b83794a..8bd075eaf 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3336,6 +3336,21 @@ def ohdave_rsa_encrypt(data, exponent, modulus): return '%x' % encrypted +def pkcs1pad(data, length): + """ + Padding input data with PKCS#1 scheme + + @param {int[]} data input data + @param {int} length target length + @returns {int[]} padded data + """ + if len(data) > length - 11: + raise ValueError('Input data too long for PKCS#1 padding') + + pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] + return [0, 2] + pseudo_random + [0] + data + + def encode_base_n(num, n, table=None): FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if not table: From 0a5445ddbeb8d391bbac92e5fe9074c6aa2e1565 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Tue, 28 Feb 2017 19:16:55 +0800 Subject: [PATCH 163/225] [utils] Add bytes_to_long() and long_to_bytes() Used in daisuki.net (#4738) Both are adapted from public domain PyCrypto: https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py --- youtube_dl/utils.py | 51 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 8bd075eaf..807183f4a 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3319,6 +3319,57 @@ class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): self, req, proxy, type) +# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is +# released into Public Domain +# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387 + +def long_to_bytes(n, blocksize=0): + """long_to_bytes(n:long, blocksize:int) : string + Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front of the + byte string with binary zeros so that the length is a multiple of + blocksize. + """ + # after much testing, this algorithm was deemed to be the fastest + s = b'' + n = int(n) + while n > 0: + s = compat_struct_pack('>I', n & 0xffffffff) + s + n = n >> 32 + # strip off leading zeros + for i in range(len(s)): + if s[i] != b'\000'[0]: + break + else: + # only happens when n == 0 + s = b'\000' + i = 0 + s = s[i:] + # add back some pad bytes. this could be done more efficiently w.r.t. the + # de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * b'\000' + s + return s + + +def bytes_to_long(s): + """bytes_to_long(string) : long + Convert a byte string to a long integer. + + This is (essentially) the inverse of long_to_bytes(). + """ + acc = 0 + length = len(s) + if length % 4: + extra = (4 - length % 4) + s = b'\000' * extra + s + length = length + extra + for i in range(0, length, 4): + acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] + return acc + + def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ From 9bd05b5a18c535f5517158d505af5dced498d23e Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 27 Feb 2017 18:56:45 +0800 Subject: [PATCH 164/225] [daisuki] Add new extractor (closes #4738) --- ChangeLog | 1 + youtube_dl/extractor/daisuki.py | 144 +++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 4 + 3 files changed, 149 insertions(+) create mode 100644 youtube_dl/extractor/daisuki.py diff --git a/ChangeLog b/ChangeLog index f9986ae61..401c5885e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version Extractors ++ [daisuki] Add new extractor (#2486, #3186, #4738, #6175, #7776, #10060) * [douyu] Fix extraction (#12301) diff --git a/youtube_dl/extractor/daisuki.py b/youtube_dl/extractor/daisuki.py new file mode 100644 index 000000000..6fd09faf1 --- /dev/null +++ b/youtube_dl/extractor/daisuki.py @@ -0,0 +1,144 @@ +from __future__ import unicode_literals + +import base64 +import json +import random +import re + +from .common import InfoExtractor +from ..aes import ( + aes_cbc_decrypt, + aes_cbc_encrypt, +) +from ..utils import ( + bytes_to_intlist, + bytes_to_long, + clean_html, + ExtractorError, + intlist_to_bytes, + get_element_by_id, + js_to_json, + int_or_none, + long_to_bytes, + pkcs1pad, + remove_end, +) + + +class DaisukiIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?daisuki\.net/[^/]+/[^/]+/[^/]+/watch\.[^.]+\.(?P\d+)\.html' + + _TEST = { + 'url': 'http://www.daisuki.net/tw/en/anime/watch.TheIdolMasterCG.11213.html', + 'info_dict': { + 'id': '11213', + 'ext': 'mp4', + 'title': '#01 Who is in the pumpkin carriage? - THE IDOLM@STER CINDERELLA GIRLS', + 'creator': 'BANDAI NAMCO Entertainment', + }, + 'params': { + 'skip_download': True, # AES-encrypted HLS stream + }, + } + + # The public key in PEM format can be found in clientlibs_anime_watch.min.js + _RSA_KEY = (0xc5524c25e8e14b366b3754940beeb6f96cb7e2feef0b932c7659a0c5c3bf173d602464c2df73d693b513ae06ff1be8f367529ab30bf969c5640522181f2a0c51ea546ae120d3d8d908595e4eff765b389cde080a1ef7f1bbfb07411cc568db73b7f521cedf270cbfbe0ddbc29b1ac9d0f2d8f4359098caffee6d07915020077d, 65537) + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + flashvars = self._parse_json(self._search_regex( + r'(?s)var\s+flashvars\s*=\s*({.+?});', webpage, 'flashvars'), + video_id, transform_source=js_to_json) + + iv = [0] * 16 + + data = {} + for key in ('device_cd', 'mv_id', 'ss1_prm', 'ss2_prm', 'ss3_prm', 'ss_id'): + data[key] = flashvars.get(key, '') + + encrypted_rtn = None + + # Some AES keys are rejected. Try it with different AES keys + for idx in range(5): + aes_key = [random.randint(0, 254) for _ in range(32)] + padded_aeskey = intlist_to_bytes(pkcs1pad(aes_key, 128)) + + n, e = self._RSA_KEY + encrypted_aeskey = long_to_bytes(pow(bytes_to_long(padded_aeskey), e, n)) + init_data = self._download_json('http://www.daisuki.net/bin/bgn/init', video_id, query={ + 's': flashvars.get('s', ''), + 'c': flashvars.get('ss3_prm', ''), + 'e': url, + 'd': base64.b64encode(intlist_to_bytes(aes_cbc_encrypt( + bytes_to_intlist(json.dumps(data)), + aes_key, iv))).decode('ascii'), + 'a': base64.b64encode(encrypted_aeskey).decode('ascii'), + }, note='Downloading JSON metadata' + (' (try #%d)' % (idx + 1) if idx > 0 else '')) + + if 'rtn' in init_data: + encrypted_rtn = init_data['rtn'] + break + + self._sleep(5, video_id) + + if encrypted_rtn is None: + raise ExtractorError('Failed to fetch init data') + + rtn = self._parse_json( + intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist( + base64.b64decode(encrypted_rtn)), + aes_key, iv)).decode('utf-8').rstrip('\0'), + video_id) + + formats = self._extract_m3u8_formats( + rtn['play_url'], video_id, ext='mp4', entry_protocol='m3u8_native') + + title = remove_end(self._og_search_title(webpage), ' - DAISUKI') + + creator = self._html_search_regex( + r'Creator\s*:\s*([^<]+)', webpage, 'creator', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'creator': creator, + } + + +class DaisukiPlaylistIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)daisuki\.net/[^/]+/[^/]+/[^/]+/detail\.(?P[a-zA-Z0-9]+)\.html' + + _TEST = { + 'url': 'http://www.daisuki.net/tw/en/anime/detail.TheIdolMasterCG.html', + 'info_dict': { + 'id': 'TheIdolMasterCG', + 'title': 'THE IDOLM@STER CINDERELLA GIRLS', + 'description': 'md5:0f2c028a9339f7a2c7fbf839edc5c5d8', + }, + 'playlist_count': 26, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + episode_pattern = r'''(?sx) + ]+delay="[^"]+/(\d+)/movie\.jpg".+? + ]+class=".*?\bepisodeNumber\b.*?">(?:]+>)?([^<]+)''' + entries = [{ + '_type': 'url_transparent', + 'url': url.replace('detail', 'watch').replace('.html', '.' + movie_id + '.html'), + 'episode_id': episode_id, + 'episode_number': int_or_none(episode_id), + } for movie_id, episode_id in re.findall(episode_pattern, webpage)] + + playlist_title = remove_end( + self._og_search_title(webpage, fatal=False), ' - Anime - DAISUKI') + playlist_description = clean_html(get_element_by_id('synopsisTxt', webpage)) + + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 0910b7b05..e251d8478 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -227,6 +227,10 @@ from .dailymotion import ( DailymotionUserIE, DailymotionCloudIE, ) +from .daisuki import ( + DaisukiIE, + DaisukiPlaylistIE, +) from .daum import ( DaumIE, DaumClipIE, From 7c4aa6fd6fd6fadf1cf1942c279cd5c0ff5ae498 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Tue, 28 Feb 2017 22:29:01 +0800 Subject: [PATCH 165/225] [daisuki] Add subtitles (#4738) --- youtube_dl/extractor/daisuki.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/youtube_dl/extractor/daisuki.py b/youtube_dl/extractor/daisuki.py index 6fd09faf1..58cc98666 100644 --- a/youtube_dl/extractor/daisuki.py +++ b/youtube_dl/extractor/daisuki.py @@ -34,6 +34,11 @@ class DaisukiIE(InfoExtractor): 'id': '11213', 'ext': 'mp4', 'title': '#01 Who is in the pumpkin carriage? - THE IDOLM@STER CINDERELLA GIRLS', + 'subtitles': { + 'mul': [{ + 'ext': 'ttml', + }], + }, 'creator': 'BANDAI NAMCO Entertainment', }, 'params': { @@ -101,10 +106,20 @@ class DaisukiIE(InfoExtractor): creator = self._html_search_regex( r'Creator\s*:\s*([^<]+)', webpage, 'creator', fatal=False) + subtitles = {} + caption_url = rtn.get('caption_url') + if caption_url: + # mul: multiple languages + subtitles['mul'] = [{ + 'url': caption_url, + 'ext': 'ttml', + }] + return { 'id': video_id, 'title': title, 'formats': formats, + 'subtitles': subtitles, 'creator': creator, } From 87dadd456a138c3107ff6254bd03ed832cf2e6d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 28 Feb 2017 23:06:47 +0700 Subject: [PATCH 166/225] [youtube:playlist] Recognize another playlist pattern (closes #11928, closes #12286) --- youtube_dl/extractor/youtube.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 7053e5512..81c793921 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -1851,7 +1851,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): (?: youtube\.com/ (?: - (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries) + (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11})) \? (?:.*?[&;])*? (?:p|a|list)= | p/ )| @@ -1924,6 +1924,13 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): 'title': 'JODA15', 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu', } + }, { + 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', + 'playlist_mincount': 485, + 'info_dict': { + 'title': '2017 華語最新單曲 (2/24更新)', + 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', + } }, { 'note': 'Embedded SWF player', 'url': 'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0', @@ -2072,7 +2079,7 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): # Check if it's a video-specific URL query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) video_id = query_dict.get('v', [None])[0] or self._search_regex( - r'(?:^|//)youtu\.be/([0-9A-Za-z_-]{11})', url, + r'(?:(?:^|//)youtu\.be/|youtube\.com/embed/(?!videoseries))([0-9A-Za-z_-]{11})', url, 'video id', default=None) if video_id: if self._downloader.params.get('noplaylist'): From 948519b35dec420c3b3ca5369bcba1dead31fcc6 Mon Sep 17 00:00:00 2001 From: Alex Seiler Date: Fri, 17 Feb 2017 04:49:25 +0100 Subject: [PATCH 167/225] [azmedien:showplaylist] Add support for all episodes playlists --- youtube_dl/extractor/azmedien.py | 49 ++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 50 insertions(+) diff --git a/youtube_dl/extractor/azmedien.py b/youtube_dl/extractor/azmedien.py index cbc3ed564..817fc6d57 100644 --- a/youtube_dl/extractor/azmedien.py +++ b/youtube_dl/extractor/azmedien.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( + get_element_by_class, get_element_by_id, strip_or_none, urljoin, @@ -170,3 +171,51 @@ class AZMedienPlaylistIE(AZMedienBaseIE): 'video-title', webpage)), group='title') return self.playlist_result(entries, show_id, title) + + +class AZMedienShowPlaylistIE(AZMedienBaseIE): + IE_DESC = 'AZ Medien Show playlists' + _VALID_URL = r'''(?x) + https?:// + (?:www\.)? + (?P + (?: + telezueri\.ch| + telebaern\.tv| + telem1\.ch + )/ + (?: + all-episodes| + alle-episoden + ) + /[^/]+ + ) + ''' + + _TEST = { + 'url': 'http://www.telezueri.ch/all-episodes/astrotalk', + 'info_dict': { + 'id': 'telezueri.ch/all-episodes/astrotalk', + 'title': 'TeleZüri: AstroTalk - alle episoden', + 'description': 'md5:4c0f7e7d741d906004266e295ceb4a26', + }, + 'playlist_mincount': 13, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + episodes = get_element_by_class('search-mobile-box', webpage) + entries = [self.url_result( + urljoin(url, m.group('url'))) for m in re.finditer( + r']+href=(["\'])(?P.+?)\1', episodes)] + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + return self.playlist_result( + entries, + playlist_id=playlist_id, + playlist_title=title, + playlist_description=description) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e251d8478..b1613a9d3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -83,6 +83,7 @@ from .awaan import ( from .azmedien import ( AZMedienIE, AZMedienPlaylistIE, + AZMedienShowPlaylistIE, ) from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE From 43b38424a9ca2ce962036b17462d59b8acbf6dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 28 Feb 2017 23:37:33 +0700 Subject: [PATCH 168/225] [azmedien:showplaylist] Improve (closes #12160) --- youtube_dl/extractor/azmedien.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/youtube_dl/extractor/azmedien.py b/youtube_dl/extractor/azmedien.py index 817fc6d57..f4e07d901 100644 --- a/youtube_dl/extractor/azmedien.py +++ b/youtube_dl/extractor/azmedien.py @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import unicode_literals import re @@ -174,11 +175,10 @@ class AZMedienPlaylistIE(AZMedienBaseIE): class AZMedienShowPlaylistIE(AZMedienBaseIE): - IE_DESC = 'AZ Medien Show playlists' + IE_DESC = 'AZ Medien show playlists' _VALID_URL = r'''(?x) https?:// (?:www\.)? - (?P (?: telezueri\.ch| telebaern\.tv| @@ -187,22 +187,18 @@ class AZMedienShowPlaylistIE(AZMedienBaseIE): (?: all-episodes| alle-episoden - ) - /[^/]+ - ) + )/ + (?P[^/?#&]+) ''' _TEST = { 'url': 'http://www.telezueri.ch/all-episodes/astrotalk', 'info_dict': { - 'id': 'telezueri.ch/all-episodes/astrotalk', + 'id': 'astrotalk', 'title': 'TeleZüri: AstroTalk - alle episoden', 'description': 'md5:4c0f7e7d741d906004266e295ceb4a26', }, 'playlist_mincount': 13, - 'params': { - 'skip_download': True, - } } def _real_extract(self, url): @@ -211,11 +207,7 @@ class AZMedienShowPlaylistIE(AZMedienBaseIE): episodes = get_element_by_class('search-mobile-box', webpage) entries = [self.url_result( urljoin(url, m.group('url'))) for m in re.finditer( - r']+href=(["\'])(?P.+?)\1', episodes)] - title = self._og_search_title(webpage) + r']+href=(["\'])(?P(?:(?!\1).)+)\1', episodes)] + title = self._og_search_title(webpage, fatal=False) description = self._og_search_description(webpage) - return self.playlist_result( - entries, - playlist_id=playlist_id, - playlist_title=title, - playlist_description=description) + return self.playlist_result(entries, playlist_id, title, description) From 11bae9cdde8ed75b355d5e68ef57dae504ad94ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 28 Feb 2017 23:49:24 +0700 Subject: [PATCH 169/225] [ChangeLog] Actualize --- ChangeLog | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 401c5885e..8d33c055b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,7 +1,15 @@ version +Core ++ [utils] Add bytes_to_long and long_to_bytes ++ [utils] Add pkcs1pad ++ [aes] Add aes_cbc_encrypt + Extractors -+ [daisuki] Add new extractor (#2486, #3186, #4738, #6175, #7776, #10060) ++ [azmedien:showplaylist] Add support for show playlists (#12160) ++ [youtube:playlist] Recognize another playlist pattern (#11928, #12286) ++ [daisuki] Add support for daisuki.net (#2486, #3186, #4738, #6175, #7776, + #10060) * [douyu] Fix extraction (#12301) From 1dc24093f81f349c22e6bda13cb05c26ac3266f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 28 Feb 2017 23:59:22 +0700 Subject: [PATCH 170/225] release 2017.02.28 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- docs/supportedsites.md | 3 +++ youtube_dl/version.py | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 6374f7c25..c907ef931 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.27*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.27** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.28*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.28** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.27 +[debug] youtube-dl version 2017.02.28 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 8d33c055b..2a1fd9018 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.02.28 Core + [utils] Add bytes_to_long and long_to_bytes diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 1b01c6d9d..a08e00fce 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -78,6 +78,7 @@ - **awaan:video** - **AZMedien**: AZ Medien videos - **AZMedienPlaylist**: AZ Medien playlists + - **AZMedienShowPlaylist**: AZ Medien show playlists - **Azubu** - **AzubuLive** - **BaiduVideo**: 百度视频 @@ -191,6 +192,8 @@ - **dailymotion:playlist** - **dailymotion:user** - **DailymotionCloud** + - **Daisuki** + - **DaisukiPlaylist** - **daum.net** - **daum.net:clip** - **daum.net:playlist** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 261218b80..340e23bf8 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.27' +__version__ = '2017.02.28' From aa9cc2ecbfea6c82944b4e07f3e93c904f1ff421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Wed, 1 Mar 2017 05:03:14 +0700 Subject: [PATCH 171/225] [npo] Adapt to app.php API (closes #12311) --- youtube_dl/extractor/npo.py | 79 ++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 44 deletions(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 50473d777..89082c189 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -17,27 +17,9 @@ from ..utils import ( class NPOBaseIE(InfoExtractor): def _get_token(self, video_id): - token_page = self._download_webpage( - 'http://ida.omroep.nl/npoplayer/i.js', - video_id, note='Downloading token') - token = self._search_regex( - r'npoplayer\.token = "(.+?)"', token_page, 'token') - # Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js - token_l = list(token) - first = second = None - for i in range(5, len(token_l) - 4): - if token_l[i].isdigit(): - if first is None: - first = i - elif second is None: - second = i - if first is None or second is None: - first = 12 - second = 13 - - token_l[first], token_l[second] = token_l[second], token_l[first] - - return ''.join(token_l) + return self._download_json( + 'http://ida.omroep.nl/app.php/auth', video_id, + note='Downloading token')['token'] class NPOIE(NPOBaseIE): @@ -187,32 +169,41 @@ class NPOIE(NPOBaseIE): pubopties = metadata.get('pubopties') if pubopties: quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std']) - for format_id in pubopties: - format_info = self._download_json( - 'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s' - % (video_id, format_id, token), - video_id, 'Downloading %s JSON' % format_id) - if format_info.get('error_code', 0) or format_info.get('errorcode', 0): + items = self._download_json( + 'http://ida.omroep.nl/app.php/%s' % video_id, + 'Downloading formats JSON', query={ + 'adaptive': 'yes', + 'token': token, + })['items'][0] + for num, item in enumerate(items): + item_url = item.get('url') + if not item_url: continue - streams = format_info.get('streams') - if streams: - try: - video_info = self._download_json( - streams[0] + '&type=json', - video_id, 'Downloading %s stream JSON' % format_id) - except ExtractorError as ee: - if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: - error = (self._parse_json(ee.cause.read().decode(), video_id, fatal=False) or {}).get('errorstring') - if error: - raise ExtractorError(error, expected=True) - raise - else: - video_info = format_info - video_url = video_info.get('url') + format_id = self._search_regex( + r'video/ida/([^/]+)', item_url, 'format id', + default=None) + try: + stream_info = self._download_json( + item_url + '&type=json', video_id, + 'Downloading %s stream JSON' % item.get('label') or format_id or num) + except ExtractorError as ee: + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: + error = (self._parse_json( + ee.cause.read().decode(), video_id, + fatal=False) or {}).get('errorstring') + if error: + raise ExtractorError(error, expected=True) + raise + if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0): + continue + video_url = stream_info.get('url') if not video_url: continue - if format_id == 'adaptive': - formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4')) + if stream_info.get('family') == 'adaptive': + formats.extend(self._extract_m3u8_formats( + video_url, video_id, ext='mp4', + entry_protocol='m3u8_native', m3u8_id='hls', + fatal=False)) else: formats.append({ 'url': video_url, From 83e8fce628e810e2a5639ef9a21be839526512fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Wed, 1 Mar 2017 22:14:46 +0700 Subject: [PATCH 172/225] [npo] Improve extraction and update tests --- youtube_dl/extractor/npo.py | 410 ++++++++++++++++++------------------ 1 file changed, 201 insertions(+), 209 deletions(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 89082c189..be10fc486 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -3,15 +3,19 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_HTTPError +from ..compat import ( + compat_HTTPError, + compat_str, +) from ..utils import ( + determine_ext, + ExtractorError, fix_xml_ampersands, orderedSet, parse_duration, qualities, strip_jsonp, unified_strdate, - ExtractorError, ) @@ -40,103 +44,101 @@ class NPOIE(NPOBaseIE): (?P[^/?#]+) ''' - _TESTS = [ - { - 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719', - 'md5': '4b3f9c429157ec4775f2c9cb7b911016', - 'info_dict': { - 'id': 'VPWON_1220719', - 'ext': 'm4v', - 'title': 'Nieuwsuur', - 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.', - 'upload_date': '20140622', - }, + _TESTS = [{ + 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719', + 'md5': '4b3f9c429157ec4775f2c9cb7b911016', + 'info_dict': { + 'id': 'VPWON_1220719', + 'ext': 'm4v', + 'title': 'Nieuwsuur', + 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.', + 'upload_date': '20140622', }, - { - 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800', - 'md5': 'da50a5787dbfc1603c4ad80f31c5120b', - 'info_dict': { - 'id': 'VARA_101191800', - 'ext': 'm4v', - 'title': 'De Mega Mike & Mega Thomas show: The best of.', - 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4', - 'upload_date': '20090227', - 'duration': 2400, - }, + }, { + 'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800', + 'md5': 'da50a5787dbfc1603c4ad80f31c5120b', + 'info_dict': { + 'id': 'VARA_101191800', + 'ext': 'm4v', + 'title': 'De Mega Mike & Mega Thomas show: The best of.', + 'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4', + 'upload_date': '20090227', + 'duration': 2400, }, - { - 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289', - 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', - 'info_dict': { - 'id': 'VPWON_1169289', - 'ext': 'm4v', - 'title': 'Tegenlicht: De toekomst komt uit Afrika', - 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea', - 'upload_date': '20130225', - 'duration': 3000, - }, + }, { + 'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289', + 'md5': 'f8065e4e5a7824068ed3c7e783178f2c', + 'info_dict': { + 'id': 'VPWON_1169289', + 'ext': 'm4v', + 'title': 'Tegenlicht: Zwart geld. De toekomst komt uit Afrika', + 'description': 'md5:52cf4eefbc96fffcbdc06d024147abea', + 'upload_date': '20130225', + 'duration': 3000, }, - { - 'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706', - 'info_dict': { - 'id': 'WO_VPRO_043706', - 'ext': 'wmv', - 'title': 'De nieuwe mens - Deel 1', - 'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b', - 'duration': 4680, - }, - 'params': { - # mplayer mms download - 'skip_download': True, - } + }, { + 'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706', + 'info_dict': { + 'id': 'WO_VPRO_043706', + 'ext': 'm4v', + 'title': 'De nieuwe mens - Deel 1', + 'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b', + 'duration': 4680, }, - # non asf in streams - { - 'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771', - 'md5': 'b3da13de374cbe2d5332a7e910bef97f', - 'info_dict': { - 'id': 'WO_NOS_762771', - 'ext': 'mp4', - 'title': 'Hoe gaat Europa verder na Parijs?', - }, - }, - { - 'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content', - 'md5': '01c6a2841675995da1f0cf776f03a9c3', - 'info_dict': { - 'id': 'VPWON_1233944', - 'ext': 'm4v', - 'title': 'Aap, poot, pies', - 'description': 'md5:c9c8005d1869ae65b858e82c01a91fde', - 'upload_date': '20150508', - 'duration': 599, - }, - }, - { - 'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698', - 'md5': 'd30cd8417b8b9bca1fdff27428860d08', - 'info_dict': { - 'id': 'POW_00996502', - 'ext': 'm4v', - 'title': '''"Dit is wel een 'landslide'..."''', - 'description': 'md5:f8d66d537dfb641380226e31ca57b8e8', - 'upload_date': '20150508', - 'duration': 462, - }, - }, - { - 'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547', - 'only_matching': True, - }, - { - 'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118', - 'only_matching': True, - }, - { - 'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990', - 'only_matching': True, + 'params': { + 'skip_download': True, } - ] + }, { + # non asf in streams + 'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771', + 'info_dict': { + 'id': 'WO_NOS_762771', + 'ext': 'mp4', + 'title': 'Hoe gaat Europa verder na Parijs?', + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content', + 'info_dict': { + 'id': 'VPWON_1233944', + 'ext': 'm4v', + 'title': 'Aap, poot, pies', + 'description': 'md5:c9c8005d1869ae65b858e82c01a91fde', + 'upload_date': '20150508', + 'duration': 599, + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698', + 'info_dict': { + 'id': 'POW_00996502', + 'ext': 'm4v', + 'title': '''"Dit is wel een 'landslide'..."''', + 'description': 'md5:f8d66d537dfb641380226e31ca57b8e8', + 'upload_date': '20150508', + 'duration': 462, + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547', + 'only_matching': True, + }, { + 'url': 'http://www.zapp.nl/de-bzt-show/filmpjes/POMS_KN_7315118', + 'only_matching': True, + }, { + 'url': 'http://www.zapp.nl/beste-vrienden-quiz/extra-video-s/WO_NTR_1067990', + 'only_matching': True, + }, { + # live stream + 'url': 'npo:LI_NL1_4188102', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -165,79 +167,115 @@ class NPOIE(NPOBaseIE): token = self._get_token(video_id) formats = [] + urls = set() - pubopties = metadata.get('pubopties') - if pubopties: - quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std']) - items = self._download_json( - 'http://ida.omroep.nl/app.php/%s' % video_id, - 'Downloading formats JSON', query={ - 'adaptive': 'yes', - 'token': token, - })['items'][0] - for num, item in enumerate(items): - item_url = item.get('url') - if not item_url: - continue - format_id = self._search_regex( - r'video/ida/([^/]+)', item_url, 'format id', - default=None) - try: - stream_info = self._download_json( - item_url + '&type=json', video_id, - 'Downloading %s stream JSON' % item.get('label') or format_id or num) - except ExtractorError as ee: - if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: - error = (self._parse_json( - ee.cause.read().decode(), video_id, - fatal=False) or {}).get('errorstring') - if error: - raise ExtractorError(error, expected=True) - raise - if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0): + quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std']) + items = self._download_json( + 'http://ida.omroep.nl/app.php/%s' % video_id, video_id, + 'Downloading formats JSON', query={ + 'adaptive': 'yes', + 'token': token, + })['items'][0] + for num, item in enumerate(items): + item_url = item.get('url') + if not item_url or item_url in urls: + continue + urls.add(item_url) + format_id = self._search_regex( + r'video/ida/([^/]+)', item_url, 'format id', + default=None) + + def add_format_url(format_url): + formats.append({ + 'url': format_url, + 'format_id': format_id, + 'quality': quality(format_id), + }) + + # Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706 + if item.get('contentType') == 'url': + add_format_url(item_url) + continue + + try: + stream_info = self._download_json( + item_url + '&type=json', video_id, + 'Downloading %s stream JSON' + % item.get('label') or format_id or num) + except ExtractorError as ee: + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: + error = (self._parse_json( + ee.cause.read().decode(), video_id, + fatal=False) or {}).get('errorstring') + if error: + raise ExtractorError(error, expected=True) + raise + # Stream URL instead of JSON, example: npo:LI_NL1_4188102 + if isinstance(stream_info, compat_str): + if not stream_info.startswith('http'): continue + video_url = stream_info + # JSON + else: video_url = stream_info.get('url') - if not video_url: + if not video_url or video_url in urls: + continue + urls.add(item_url) + if determine_ext(video_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + video_url, video_id, ext='mp4', + entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) + else: + add_format_url(video_url) + + is_live = metadata.get('medium') == 'live' + + if not is_live: + for num, stream in enumerate(metadata.get('streams', [])): + stream_url = stream.get('url') + if not stream_url or stream_url in urls: continue - if stream_info.get('family') == 'adaptive': + urls.add(stream_url) + # smooth streaming is not supported + stream_type = stream.get('type', '').lower() + if stream_type in ['ss', 'ms']: + continue + if stream_type == 'hds': + f4m_formats = self._extract_f4m_formats( + stream_url, video_id, fatal=False) + # f4m downloader downloads only piece of live stream + for f4m_format in f4m_formats: + f4m_format['preference'] = -1 + formats.extend(f4m_formats) + elif stream_type == 'hls': formats.extend(self._extract_m3u8_formats( - video_url, video_id, ext='mp4', - entry_protocol='m3u8_native', m3u8_id='hls', - fatal=False)) - else: + stream_url, video_id, ext='mp4', fatal=False)) + # Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706 + elif '.asf' in stream_url: + asx = self._download_xml( + stream_url, video_id, + 'Downloading stream %d ASX playlist' % num, + transform_source=fix_xml_ampersands, fatal=False) + if not asx: + continue + ref = asx.find('./ENTRY/Ref') + if ref is None: + continue + video_url = ref.get('href') + if not video_url or video_url in urls: + continue + urls.add(video_url) formats.append({ 'url': video_url, - 'format_id': format_id, - 'quality': quality(format_id), + 'ext': stream.get('formaat', 'asf'), + 'quality': stream.get('kwaliteit'), + 'preference': -10, }) - - streams = metadata.get('streams') - if streams: - for i, stream in enumerate(streams): - stream_url = stream.get('url') - if not stream_url: - continue - if '.asf' not in stream_url: + else: formats.append({ 'url': stream_url, 'quality': stream.get('kwaliteit'), }) - continue - asx = self._download_xml( - stream_url, video_id, - 'Downloading stream %d ASX playlist' % i, - transform_source=fix_xml_ampersands) - ref = asx.find('./ENTRY/Ref') - if ref is None: - continue - video_url = ref.get('href') - if not video_url: - continue - formats.append({ - 'url': video_url, - 'ext': stream.get('formaat', 'asf'), - 'quality': stream.get('kwaliteit'), - }) self._sort_formats(formats) @@ -250,28 +288,28 @@ class NPOIE(NPOBaseIE): return { 'id': video_id, - 'title': title, + 'title': self._live_title(title) if is_live else title, 'description': metadata.get('info'), 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'], 'upload_date': unified_strdate(metadata.get('gidsdatum')), 'duration': parse_duration(metadata.get('tijdsduur')), 'formats': formats, 'subtitles': subtitles, + 'is_live': is_live, } class NPOLiveIE(NPOBaseIE): IE_NAME = 'npo.nl:live' - _VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P.+)' + _VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P[^/?#&]+)' _TEST = { 'url': 'http://www.npo.nl/live/npo-1', 'info_dict': { - 'id': 'LI_NEDERLAND1_136692', + 'id': 'LI_NL1_4188102', 'display_id': 'npo-1', 'ext': 'mp4', - 'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', - 'description': 'Livestream', + 'title': 're:^NPO 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { @@ -287,58 +325,12 @@ class NPOLiveIE(NPOBaseIE): live_id = self._search_regex( r'data-prid="([^"]+)"', webpage, 'live id') - metadata = self._download_json( - 'http://e.omroep.nl/metadata/%s' % live_id, - display_id, transform_source=strip_jsonp) - - token = self._get_token(display_id) - - formats = [] - - streams = metadata.get('streams') - if streams: - for stream in streams: - stream_type = stream.get('type').lower() - # smooth streaming is not supported - if stream_type in ['ss', 'ms']: - continue - stream_info = self._download_json( - 'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp' - % (stream.get('url'), token), - display_id, 'Downloading %s JSON' % stream_type) - if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0): - continue - stream_url = self._download_json( - stream_info['stream'], display_id, - 'Downloading %s URL' % stream_type, - 'Unable to download %s URL' % stream_type, - transform_source=strip_jsonp, fatal=False) - if not stream_url: - continue - if stream_type == 'hds': - f4m_formats = self._extract_f4m_formats(stream_url, display_id) - # f4m downloader downloads only piece of live stream - for f4m_format in f4m_formats: - f4m_format['preference'] = -1 - formats.extend(f4m_formats) - elif stream_type == 'hls': - formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4')) - else: - formats.append({ - 'url': stream_url, - 'preference': -10, - }) - - self._sort_formats(formats) - return { + '_type': 'url_transparent', + 'url': 'npo:%s' % live_id, + 'ie_key': NPOIE.ie_key(), 'id': live_id, 'display_id': display_id, - 'title': self._live_title(metadata['titel']), - 'description': metadata['info'], - 'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'], - 'formats': formats, - 'is_live': True, } From 4b8a984c67cdc1b2bfde77398d74096406db9644 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Wed, 1 Mar 2017 22:21:13 +0700 Subject: [PATCH 173/225] [npo] Add support for audio --- youtube_dl/extractor/npo.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index be10fc486..38fefe492 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -125,6 +125,18 @@ class NPOIE(NPOBaseIE): 'params': { 'skip_download': True, } + }, { + # audio + 'url': 'http://www.npo.nl/jouw-stad-rotterdam/29-01-2017/RBX_FUNX_6683215/RBX_FUNX_7601437', + 'info_dict': { + 'id': 'RBX_FUNX_6683215', + 'ext': 'mp3', + 'title': 'Jouw Stad Rotterdam', + 'description': 'md5:db251505244f097717ec59fabc372d9f', + }, + 'params': { + 'skip_download': True, + } }, { 'url': 'http://www.zapp.nl/de-bzt-show/gemist/KN_1687547', 'only_matching': True, @@ -193,7 +205,7 @@ class NPOIE(NPOBaseIE): }) # Example: http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706 - if item.get('contentType') == 'url': + if item.get('contentType') in ('url', 'audio'): add_format_url(item_url) continue @@ -201,7 +213,7 @@ class NPOIE(NPOBaseIE): stream_info = self._download_json( item_url + '&type=json', video_id, 'Downloading %s stream JSON' - % item.get('label') or format_id or num) + % item.get('label') or item.get('format') or format_id or num) except ExtractorError as ee: if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404: error = (self._parse_json( From 40df485f554ec3fff81ca988b5bf961d54d8e41b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Wed, 1 Mar 2017 23:03:36 +0700 Subject: [PATCH 174/225] [YoutubeDL] Don't sanitize identifiers (closes #12317) --- youtube_dl/YoutubeDL.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index f7254560c..13a3a909e 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -616,7 +616,7 @@ class YoutubeDL(object): sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), - is_id=(k == 'id')) + is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) From 158af5242e983312c0c1e7590faa9844136e338f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Wed, 1 Mar 2017 23:04:02 +0700 Subject: [PATCH 175/225] [utils] Carry long doc string --- youtube_dl/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 807183f4a..8738aa249 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -473,7 +473,8 @@ def timeconvert(timestr): def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. - Set is_id if this is not an arbitrary string, but an ID that should be kept if possible + Set is_id if this is not an arbitrary string, but an ID that should be kept + if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: From af5049f128655cfec8978f17b04e5d88bd91d37f Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Thu, 2 Mar 2017 02:14:42 +0800 Subject: [PATCH 176/225] [adobepass] Add Charter Spectrum (#11465) Thanks @tv21 for the fix! --- youtube_dl/extractor/adobepass.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/youtube_dl/extractor/adobepass.py b/youtube_dl/extractor/adobepass.py index 4d655bd5e..d4816abf5 100644 --- a/youtube_dl/extractor/adobepass.py +++ b/youtube_dl/extractor/adobepass.py @@ -36,6 +36,11 @@ MSO_INFO = { 'username_field': 'Ecom_User_ID', 'password_field': 'Ecom_Password', }, + 'Charter_Direct': { + 'name': 'Charter Spectrum', + 'username_field': 'IDToken1', + 'password_field': 'IDToken2', + }, 'thr030': { 'name': '3 Rivers Communications' }, From c9612c04872656125108980dc61d71ba6b3a4f89 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Thu, 2 Mar 2017 16:59:12 +0800 Subject: [PATCH 177/225] [youtube] Mark errors about rental videos as expected Closes #12324 --- youtube_dl/extractor/youtube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 81c793921..caa048249 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -1454,7 +1454,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # Check for "rental" videos if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: - raise ExtractorError('"rental" videos not supported') + raise ExtractorError('"rental" videos not supported. See https://github.com/rg3/youtube-dl/issues/359 for more information.', expected=True) # Start extracting information self.report_information_extraction(video_id) From 11bb6ad1a5ae3767535720e7dfebb823e8167088 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Thu, 2 Mar 2017 20:49:39 +0800 Subject: [PATCH 178/225] [facebook] Fix extraction (closes #12323) Almost all videos now use the pagelet type 'permalink_video_pagelet' --- ChangeLog | 6 ++++++ youtube_dl/extractor/facebook.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 2a1fd9018..448d7f521 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version + +Extractors +* [facebook] Fix extraction (#12323) + + version 2017.02.28 Core diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index 70b8c95c5..6315d40c5 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -303,7 +303,7 @@ class FacebookIE(InfoExtractor): if not video_data: server_js_data = self._parse_json( self._search_regex( - r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+(?:stream_pagelet|pagelet_group_mall)', + r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+(?:stream_pagelet|pagelet_group_mall|permalink_video_pagelet)', webpage, 'js data', default='{}'), video_id, transform_source=js_to_json, fatal=False) if server_js_data: From 28d15b73f8704c6d00efa14948da29843fdb76e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Thu, 2 Mar 2017 22:29:56 +0700 Subject: [PATCH 179/225] [ChangeLog] Actualize --- ChangeLog | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 448d7f521..e3e0f4544 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,7 +1,14 @@ version +Core ++ [adobepass] Add support for Charter Spectrum (#11465) +* [YoutubeDL] Don't sanitize identifiers in output template (#12317) + Extractors -* [facebook] Fix extraction (#12323) +* [facebook] Fix extraction (#12323, #12330) +* [youtube] Mark errors about rental videos as expected (#12324) ++ [npo] Add support for audio +* [npo] Adapt to app.php API (#12311, #12320) version 2017.02.28 From 250eea6821a5715e2ee7cade8539fcd42177603d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Thu, 2 Mar 2017 22:33:22 +0700 Subject: [PATCH 180/225] release 2017.03.02 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- youtube_dl/version.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index c907ef931..fec4152e3 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.28*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.28** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.02*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.02** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.02.28 +[debug] youtube-dl version 2017.03.02 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index e3e0f4544..e53fb7767 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.03.02 Core + [adobepass] Add support for Charter Spectrum (#11465) diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 340e23bf8..f4c8d3d5f 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.02.28' +__version__ = '2017.03.02' From 4d345bf17b3040ebfedd079e656e1ca658749187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Thu, 2 Mar 2017 23:53:46 +0700 Subject: [PATCH 181/225] [ruutu] Disable DASH formats (closes #12322) Due to causing out of sync issue --- youtube_dl/extractor/ruutu.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py index 20d01754a..6c09df25a 100644 --- a/youtube_dl/extractor/ruutu.py +++ b/youtube_dl/extractor/ruutu.py @@ -82,6 +82,9 @@ class RuutuIE(InfoExtractor): formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) elif ext == 'mpd': + # video-only and audio-only streams are of different + # duration resulting in out of sync issue + continue formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) else: From 9bae185ba6be0b8ffe1d2168937c9d3274c5b60f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Fri, 3 Mar 2017 22:16:00 +0700 Subject: [PATCH 182/225] [24video] Use original host for requests (closes #12339) --- youtube_dl/extractor/twentyfourvideo.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/youtube_dl/extractor/twentyfourvideo.py b/youtube_dl/extractor/twentyfourvideo.py index f3541b654..7af11659f 100644 --- a/youtube_dl/extractor/twentyfourvideo.py +++ b/youtube_dl/extractor/twentyfourvideo.py @@ -1,6 +1,8 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( parse_iso8601, @@ -12,7 +14,7 @@ from ..utils import ( class TwentyFourVideoIE(InfoExtractor): IE_NAME = '24video' - _VALID_URL = r'https?://(?:www\.)?24video\.(?:net|me|xxx|sex|tube)/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P\d+)' + _VALID_URL = r'https?://(?P(?:www\.)?24video\.(?:net|me|xxx|sex|tube))/(?:video/(?:view|xml)/|player/new24_play\.swf\?id=)(?P\d+)' _TESTS = [{ 'url': 'http://www.24video.net/video/view/1044982', @@ -43,10 +45,12 @@ class TwentyFourVideoIE(InfoExtractor): }] def _real_extract(self, url): - video_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + host = mobj.group('host') webpage = self._download_webpage( - 'http://www.24video.sex/video/view/%s' % video_id, video_id) + 'http://%s/video/view/%s' % (host, video_id), video_id) title = self._og_search_title(webpage) description = self._html_search_regex( @@ -72,11 +76,11 @@ class TwentyFourVideoIE(InfoExtractor): # Sets some cookies self._download_xml( - r'http://www.24video.sex/video/xml/%s?mode=init' % video_id, + r'http://%s/video/xml/%s?mode=init' % (host, video_id), video_id, 'Downloading init XML') video_xml = self._download_xml( - 'http://www.24video.sex/video/xml/%s?mode=play' % video_id, + 'http://%s/video/xml/%s?mode=play' % (host, video_id), video_id, 'Downloading video XML') video = xpath_element(video_xml, './/video', 'video', fatal=True) From 692fa200cae38a7e37f646118a268ad408c8ab95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Fri, 3 Mar 2017 22:28:34 +0700 Subject: [PATCH 183/225] [go] Relax _VALID_URL (closes #12341) --- youtube_dl/extractor/go.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/go.py b/youtube_dl/extractor/go.py index 21ed846b2..4c9be47b4 100644 --- a/youtube_dl/extractor/go.py +++ b/youtube_dl/extractor/go.py @@ -36,7 +36,7 @@ class GoIE(AdobePassIE): 'requestor_id': 'DisneyXD', } } - _VALID_URL = r'https?://(?:(?P%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P\w+)|season-\d+/\d+-(?P[^/?#]+))' % '|'.join(_SITE_INFO.keys()) + _VALID_URL = r'https?://(?:(?P%s)\.)?go\.com/(?:[^/]+/)*(?:vdka(?P\w+)|(?:[^/]+/)*(?P[^/?#]+))' % '|'.join(_SITE_INFO.keys()) _TESTS = [{ 'url': 'http://abc.go.com/shows/castle/video/most-recent/vdka0_g86w5onx', 'info_dict': { @@ -52,6 +52,12 @@ class GoIE(AdobePassIE): }, { 'url': 'http://abc.go.com/shows/after-paradise/video/most-recent/vdka3335601', 'only_matching': True, + }, { + 'url': 'http://abc.go.com/shows/the-catch/episode-guide/season-01/10-the-wedding', + 'only_matching': True, + }, { + 'url': 'http://abc.go.com/shows/world-news-tonight/episode-guide/2017-02/17-021717-intense-stand-off-between-man-with-rifle-and-police-in-oakland', + 'only_matching': True, }] def _real_extract(self, url): From d02d4fa0a90f3182d65504508105e8d86886c6ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Fri, 3 Mar 2017 22:49:48 +0700 Subject: [PATCH 184/225] [brightcove:new] Raise GeoRestrictedError --- youtube_dl/extractor/brightcove.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 27685eed0..f8605be82 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -544,8 +544,10 @@ class BrightcoveNewIE(InfoExtractor): except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: json_data = self._parse_json(e.cause.read().decode(), video_id)[0] - raise ExtractorError( - json_data.get('message') or json_data['error_code'], expected=True) + message = json_data.get('message') or json_data['error_code'] + if json_data.get('error_subcode') == 'CLIENT_GEO': + self.raise_geo_restricted(msg=message) + raise ExtractorError(message, expected=True) raise title = json_data['name'].strip() From cbb127568a6182df2c5a2d65426de523f1f7b43f Mon Sep 17 00:00:00 2001 From: Olivier Bilodeau Date: Thu, 15 Dec 2016 20:14:04 -0500 Subject: [PATCH 185/225] [vrak] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vrak.py | 68 ++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 youtube_dl/extractor/vrak.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b1613a9d3..0ac42138a 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1165,6 +1165,7 @@ from .voicerepublic import VoiceRepublicIE from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE +from .vrak import VrakIE from .vube import VubeIE from .vuclip import VuClipIE from .vvvvid import VVVVIDIE diff --git a/youtube_dl/extractor/vrak.py b/youtube_dl/extractor/vrak.py new file mode 100644 index 000000000..692e2fcfc --- /dev/null +++ b/youtube_dl/extractor/vrak.py @@ -0,0 +1,68 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +from .brightcove import BrightcoveNewIE + + +class VrakIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?target=(?P[0-9\.]+).*' + _TEST = { + 'url': 'http://www.vrak.tv/videos?target=1.2240923&filtre=emission&id=1.1806721', + 'md5': 'c5d5ce237bca3b1e990ce1b48d1f0948', + 'info_dict': { + 'id': '5231040869001', + 'ext': 'mp4', + 'title': 'Référendums américains, animés japonais et hooligans russes', + 'upload_date': '20161201', + 'description': 'This video file has been uploaded automatically using Oprah. It should be updated with real description soon.', + 'timestamp': 1480628425, + 'uploader_id': '2890187628001', + } + } + + def _real_extract(self, url): + url_id = self._match_id(url) + webpage = self._download_webpage(url, url_id) + + result = {} + result['title'] = self._html_search_regex( + r'

(.+?)

', webpage, 'title') + + # Inspired from BrightcoveNewIE._extract_url() + entries = [] + for account_id, player_id, _, video_id in re.findall( + # account_id, player_id and embed from: + #
]+ + data-publisher-id=["\'](\d+)["\'] + [^>]* + data-player-id=["\']([^"\']+)["\'] + [^>]* + refId":"([^&]+)" + [^>]* + >.*? +
.*? + RW\ java\.lang\.String\ value\ =\ \'brightcove\.article\.\d+\.\3\' + [^>]* + RW\ java\.lang\.String\ value\ =\ \'(\d+)\' + ''', webpage): + + entries.append( + 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' + % (account_id, player_id, 'default', video_id)) + + if entries: + result = self.url_result(entries[0], BrightcoveNewIE.ie_key()) + + return result From d16f27ca272cb10f4de87814665402b9737175ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Fri, 3 Mar 2017 23:57:01 +0700 Subject: [PATCH 186/225] [brightcove:new] Add ability to smuggle geo_countries into URL --- youtube_dl/extractor/brightcove.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index f8605be82..66c8cb219 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -515,6 +515,9 @@ class BrightcoveNewIE(InfoExtractor): return entries def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + self._initialize_geo_bypass(smuggled_data.get('geo_countries')) + account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage( From 4d058c9862ebcb1fb7ebd988cf053fde200913cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Fri, 3 Mar 2017 23:58:16 +0700 Subject: [PATCH 187/225] [vrak] Improve and update test (closes #11452) --- youtube_dl/extractor/vrak.py | 104 +++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/youtube_dl/extractor/vrak.py b/youtube_dl/extractor/vrak.py index 692e2fcfc..daa247cce 100644 --- a/youtube_dl/extractor/vrak.py +++ b/youtube_dl/extractor/vrak.py @@ -4,65 +4,77 @@ from __future__ import unicode_literals import re from .common import InfoExtractor - from .brightcove import BrightcoveNewIE +from ..utils import ( + int_or_none, + parse_age_limit, + smuggle_url, + unescapeHTML, +) class VrakIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?target=(?P[0-9\.]+).*' + _VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?\btarget=(?P[\d.]+)' _TEST = { - 'url': 'http://www.vrak.tv/videos?target=1.2240923&filtre=emission&id=1.1806721', - 'md5': 'c5d5ce237bca3b1e990ce1b48d1f0948', + 'url': 'http://www.vrak.tv/videos?target=1.2306782&filtre=emission&id=1.1806721', 'info_dict': { - 'id': '5231040869001', + 'id': '5345661243001', 'ext': 'mp4', - 'title': 'Référendums américains, animés japonais et hooligans russes', - 'upload_date': '20161201', - 'description': 'This video file has been uploaded automatically using Oprah. It should be updated with real description soon.', - 'timestamp': 1480628425, + 'title': 'Obésité, film de hockey et Roseline Filion', + 'timestamp': 1488492126, + 'upload_date': '20170302', 'uploader_id': '2890187628001', - } + 'creator': 'VRAK.TV', + 'age_limit': 8, + 'series': 'ALT (Actualité Légèrement Tordue)', + 'episode': 'Obésité, film de hockey et Roseline Filion', + 'tags': list, + }, + 'params': { + 'skip_download': True, + }, } + BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2890187628001/default_default/index.html?videoId=%s' def _real_extract(self, url): - url_id = self._match_id(url) - webpage = self._download_webpage(url, url_id) + video_id = self._match_id(url) - result = {} - result['title'] = self._html_search_regex( - r'

(.+?)

', webpage, 'title') + webpage = self._download_webpage(url, video_id) - # Inspired from BrightcoveNewIE._extract_url() - entries = [] - for account_id, player_id, _, video_id in re.findall( - # account_id, player_id and embed from: - #
]+ - data-publisher-id=["\'](\d+)["\'] - [^>]* - data-player-id=["\']([^"\']+)["\'] - [^>]* - refId":"([^&]+)" - [^>]* - >.*? -
.*? - RW\ java\.lang\.String\ value\ =\ \'brightcove\.article\.\d+\.\3\' - [^>]* - RW\ java\.lang\.String\ value\ =\ \'(\d+)\' - ''', webpage): + title = self._html_search_regex( + r']+\bclass=["\']videoTitle["\'][^>]*>([^<]+)', + webpage, 'title', default=None) or self._og_search_title(webpage) - entries.append( - 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' - % (account_id, player_id, 'default', video_id)) + content = self._parse_json( + self._search_regex( + r'data-player-options-content=(["\'])(?P{.+?})\1', + webpage, 'content', default='{}', group='content'), + video_id, transform_source=unescapeHTML) - if entries: - result = self.url_result(entries[0], BrightcoveNewIE.ie_key()) + ref_id = content.get('refId') or self._search_regex( + r'refId":"([^&]+)"', webpage, 'ref id') - return result + brightcove_id = self._search_regex( + r'''(?x) + java\.lang\.String\s+value\s*=\s*["']brightcove\.article\.\d+\.%s + [^>]* + java\.lang\.String\s+value\s*=\s*["'](\d+) + ''' % re.escape(ref_id), webpage, 'brightcove id') + + return { + '_type': 'url_transparent', + 'ie_key': BrightcoveNewIE.ie_key(), + 'url': smuggle_url( + self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, + {'geo_countries': ['CA']}), + 'id': brightcove_id, + 'description': content.get('description'), + 'creator': content.get('brand'), + 'age_limit': parse_age_limit(content.get('rating')), + 'series': content.get('showName') or content.get( + 'episodeName'), # this is intentional + 'season_number': int_or_none(content.get('seasonNumber')), + 'episode': title, + 'episode_number': int_or_none(content.get('episodeNumber')), + 'tags': content.get('tags', []), + } From bc82f22879e222a1ade35fd8ebd7bb535f9166dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 00:42:51 +0700 Subject: [PATCH 188/225] [rutube] Relax _VALID_URL --- youtube_dl/extractor/rutube.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py index fd1df925b..663b75583 100644 --- a/youtube_dl/extractor/rutube.py +++ b/youtube_dl/extractor/rutube.py @@ -17,7 +17,7 @@ from ..utils import ( class RutubeIE(InfoExtractor): IE_NAME = 'rutube' IE_DESC = 'Rutube videos' - _VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P[\da-z]{32})' + _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P[\da-z]{32})' _TESTS = [{ 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', @@ -39,6 +39,9 @@ class RutubeIE(InfoExtractor): }, { 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, + }, { + 'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661', + 'only_matching': True, }] def _real_extract(self, url): From eb3079b6ce54b63b4cc609198382b6db2cbb6f5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 00:46:33 +0700 Subject: [PATCH 189/225] [generic] Add support for rutube embeds --- youtube_dl/extractor/generic.py | 24 ++++++++++++++++++++++++ youtube_dl/extractor/rutube.py | 6 ++++++ 2 files changed, 30 insertions(+) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 9868ca6d0..ebab9509d 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -84,6 +84,7 @@ from .twentymin import TwentyMinutenIE from .ustream import UstreamIE from .openload import OpenloadIE from .videopress import VideoPressIE +from .rutube import RutubeIE class GenericIE(InfoExtractor): @@ -1502,6 +1503,23 @@ class GenericIE(InfoExtractor): }, 'add_ie': [VideoPressIE.ie_key()], }, + { + # Rutube embed + 'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2', + 'info_dict': { + 'id': '9b3d5bee0a8740bf70dfd29d3ea43541', + 'ext': 'flv', + 'title': 'Магаззино: Казань 2', + 'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a', + 'uploader': 'Магаззино', + 'upload_date': '20170228', + 'uploader_id': '996642', + }, + 'params': { + 'skip_download': True, + }, + 'add_ie': [RutubeIE.ie_key()], + }, { # ThePlatform embedded with whitespaces in URLs 'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm', @@ -2480,6 +2498,12 @@ class GenericIE(InfoExtractor): return _playlist_from_matches( videopress_urls, ie=VideoPressIE.ie_key()) + # Look for Rutube embeds + rutube_urls = RutubeIE._extract_urls(webpage) + if rutube_urls: + return _playlist_from_matches( + rutube_urls, ie=RutubeIE.ie_key()) + # Looking for http://schema.org/VideoObject json_ld = self._search_json_ld( webpage, video_id, default={}, expected_type='VideoObject') diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py index 663b75583..889fa7628 100644 --- a/youtube_dl/extractor/rutube.py +++ b/youtube_dl/extractor/rutube.py @@ -44,6 +44,12 @@ class RutubeIE(InfoExtractor): 'only_matching': True, }] + @staticmethod + def _extract_urls(webpage): + return [mobj.group('url') for mobj in re.finditer( + r']+?src=(["\'])(?P(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1', + webpage)] + def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( From 6f4e4132d8ef835635059d08206ca9bc6fd5dd98 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Sat, 4 Mar 2017 23:23:18 +0800 Subject: [PATCH 190/225] [douyutv] Switch to the PC API to escape the 5-min limitation Thanks @spacemeowx2 for the algo. Ref: https://gist.github.com/spacemeowx2/629b1d131bd7e240a7d28742048e80fc Closes #12316 --- ChangeLog | 6 ++++++ youtube_dl/extractor/douyutv.py | 31 +++++++++++++++++++++---------- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/ChangeLog b/ChangeLog index e53fb7767..13ccb0f8f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version + +Extractors +* [douyutv] Switch to the PC API to escape the 5-min limitation (#12316) + + version 2017.03.02 Core diff --git a/youtube_dl/extractor/douyutv.py b/youtube_dl/extractor/douyutv.py index 9a83fb31a..82d8a042f 100644 --- a/youtube_dl/extractor/douyutv.py +++ b/youtube_dl/extractor/douyutv.py @@ -1,6 +1,9 @@ # coding: utf-8 from __future__ import unicode_literals +import time +import hashlib + from .common import InfoExtractor from ..utils import ( ExtractorError, @@ -16,7 +19,7 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '17732', 'display_id': 'iseven', - 'ext': 'mp4', + 'ext': 'flv', 'title': 're:^清晨醒脑!T-ARA根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r're:.*m7show@163\.com.*', 'thumbnail': r're:^https?://.*\.jpg$', @@ -31,7 +34,7 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '85982', 'display_id': '85982', - 'ext': 'mp4', + 'ext': 'flv', 'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:746a2f7a253966a06755a912f0acc0d2', 'thumbnail': r're:^https?://.*\.jpg$', @@ -47,7 +50,7 @@ class DouyuTVIE(InfoExtractor): 'info_dict': { 'id': '17732', 'display_id': '17732', - 'ext': 'mp4', + 'ext': 'flv', 'title': 're:^清晨醒脑!T-ARA根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': r're:.*m7show@163\.com.*', 'thumbnail': r're:^https?://.*\.jpg$', @@ -66,10 +69,6 @@ class DouyuTVIE(InfoExtractor): 'only_matching': True, }] - # Decompile core.swf in webpage by ffdec "Search SWFs in memory". core.swf - # is encrypted originally, but ffdec can dump memory to get the decrypted one. - _API_KEY = 'A12Svb&%1UUmf@hC' - def _real_extract(self, url): video_id = self._match_id(url) @@ -80,6 +79,7 @@ class DouyuTVIE(InfoExtractor): room_id = self._html_search_regex( r'"room_id\\?"\s*:\s*(\d+),', page, 'room id') + # Grab metadata from mobile API room = self._download_json( 'http://m.douyu.com/html5/live?roomId=%s' % room_id, video_id, note='Downloading room info')['data'] @@ -88,8 +88,19 @@ class DouyuTVIE(InfoExtractor): if room.get('show_status') == '2': raise ExtractorError('Live stream is offline', expected=True) - formats = self._extract_m3u8_formats( - room['hls_url'], video_id, ext='mp4') + # Grab the URL from PC client API + # The m3u8 url from mobile API requires re-authentication every 5 minutes + tt = int(time.time()) + signContent = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%d9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt) + sign = hashlib.md5(signContent.encode('ascii')).hexdigest() + video_url = self._download_json( + 'http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/' + room_id, + video_id, note='Downloading video URL info', + query={'rate': 0}, headers={ + 'auth': sign, + 'time': str(tt), + 'aid': 'pcclient' + })['data']['live_url'] title = self._live_title(unescapeHTML(room['room_name'])) description = room.get('show_details') @@ -99,7 +110,7 @@ class DouyuTVIE(InfoExtractor): return { 'id': room_id, 'display_id': video_id, - 'formats': formats, + 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, From 64b7ccef3e3144a50f2cc01772a5ea5e81d4494d Mon Sep 17 00:00:00 2001 From: Juanjo Benages Date: Thu, 2 Feb 2017 17:33:09 +0100 Subject: [PATCH 191/225] [redbulltv] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/redbull.py | 50 ++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 youtube_dl/extractor/redbull.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 0ac42138a..e42a069b5 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -793,6 +793,7 @@ from .rai import ( ) from .rbmaradio import RBMARadioIE from .rds import RDSIE +from .redbull import RedBullIE from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( diff --git a/youtube_dl/extractor/redbull.py b/youtube_dl/extractor/redbull.py new file mode 100644 index 000000000..e3d978a53 --- /dev/null +++ b/youtube_dl/extractor/redbull.py @@ -0,0 +1,50 @@ +# coding: utf-8 +from __future__ import unicode_literals +import re + +from .common import InfoExtractor + + +class RedBullIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?redbull\.tv/video/(?PAP-\w+)' + _TEST = { + 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', + 'md5': '78e860f631d7a846e712fab8c5fe2c38', + 'info_dict': { + 'id': 'AP-1Q756YYX51W11', + 'ext': 'mp4', + 'title': 'ABC of...WRC', + 'description': 'Buckle up for a crash course in the terminology, rules, drivers, and courses of the World Rally Championship.' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + access_token = self._download_json( + 'http://api-v2.redbull.tv/start?build=4.0.9&category=smartphone&os_version=23&os_family=android', + video_id, note='Downloading access token', + )['auth']['access_token'] + + info = self._download_json( + 'https://api-v2.redbull.tv/views/%s' % video_id, + video_id, note='Downloading video information', + headers={'Authorization': 'Bearer ' + access_token} + )['blocks'][0]['top'][0] + + m3u8_url = info['video_product']['url'] + title = info['title'] + + formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', + m3u8_id='hls') + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': info.get('short_description'), + 'genre': info.get('genre'), + 'duration': info.get('duration') + } From 054a587de8fe2860bcb93a19f8c628b4ddd9ad56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 23:25:09 +0700 Subject: [PATCH 192/225] [redbulltv] Improve extraction (closes #11948, closes #3919) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/redbull.py | 50 -------------- youtube_dl/extractor/redbulltv.py | 106 +++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 51 deletions(-) delete mode 100644 youtube_dl/extractor/redbull.py create mode 100644 youtube_dl/extractor/redbulltv.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e42a069b5..caf1dc766 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -793,7 +793,7 @@ from .rai import ( ) from .rbmaradio import RBMARadioIE from .rds import RDSIE -from .redbull import RedBullIE +from .redbulltv import RedBullTVIE from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( diff --git a/youtube_dl/extractor/redbull.py b/youtube_dl/extractor/redbull.py deleted file mode 100644 index e3d978a53..000000000 --- a/youtube_dl/extractor/redbull.py +++ /dev/null @@ -1,50 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals -import re - -from .common import InfoExtractor - - -class RedBullIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?redbull\.tv/video/(?PAP-\w+)' - _TEST = { - 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', - 'md5': '78e860f631d7a846e712fab8c5fe2c38', - 'info_dict': { - 'id': 'AP-1Q756YYX51W11', - 'ext': 'mp4', - 'title': 'ABC of...WRC', - 'description': 'Buckle up for a crash course in the terminology, rules, drivers, and courses of the World Rally Championship.' - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - access_token = self._download_json( - 'http://api-v2.redbull.tv/start?build=4.0.9&category=smartphone&os_version=23&os_family=android', - video_id, note='Downloading access token', - )['auth']['access_token'] - - info = self._download_json( - 'https://api-v2.redbull.tv/views/%s' % video_id, - video_id, note='Downloading video information', - headers={'Authorization': 'Bearer ' + access_token} - )['blocks'][0]['top'][0] - - m3u8_url = info['video_product']['url'] - title = info['title'] - - formats = self._extract_m3u8_formats( - m3u8_url, video_id, 'mp4', 'm3u8_native', - m3u8_id='hls') - - return { - 'id': video_id, - 'title': title, - 'formats': formats, - 'description': info.get('short_description'), - 'genre': info.get('genre'), - 'duration': info.get('duration') - } diff --git a/youtube_dl/extractor/redbulltv.py b/youtube_dl/extractor/redbulltv.py new file mode 100644 index 000000000..5c73d5bca --- /dev/null +++ b/youtube_dl/extractor/redbulltv.py @@ -0,0 +1,106 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + int_or_none, + try_get, + unified_timestamp, +) + + +class RedBullTVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?redbull\.tv/(?:video|film)/(?PAP-\w+)' + _TESTS = [{ + # film + 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', + 'md5': '78e860f631d7a846e712fab8c5fe2c38', + 'info_dict': { + 'id': 'AP-1Q756YYX51W11', + 'ext': 'mp4', + 'title': 'ABC of...WRC', + 'description': 'md5:5c7ed8f4015c8492ecf64b6ab31e7d31', + 'duration': 1582.04, + 'timestamp': 1488405786, + 'upload_date': '20170301', + }, + }, { + # episode + 'url': 'https://www.redbull.tv/video/AP-1PMT5JCWH1W11/grime?playlist=shows:shows-playall:web', + 'info_dict': { + 'id': 'AP-1PMT5JCWH1W11', + 'ext': 'mp4', + 'title': 'Grime - Hashtags S2 E4', + 'description': 'md5:334b741c8c1ce65be057eab6773c1cf5', + 'duration': 904.6, + 'timestamp': 1487290093, + 'upload_date': '20170217', + 'series': 'Hashtags', + 'season_number': 2, + 'episode_number': 4, + }, + }, { + 'url': 'https://www.redbull.tv/film/AP-1MSKKF5T92111/in-motion', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + access_token = self._download_json( + 'https://api-v2.redbull.tv/start', video_id, + note='Downloading access token', query={ + 'build': '4.0.9', + 'category': 'smartphone', + 'os_version': 23, + 'os_family': 'android', + })['auth']['access_token'] + + info = self._download_json( + 'https://api-v2.redbull.tv/views/%s' % video_id, + video_id, note='Downloading video information', + headers={'Authorization': 'Bearer ' + access_token} + )['blocks'][0]['top'][0] + + video = info['video_product'] + + title = info['title'].strip() + m3u8_url = video['url'] + + formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls') + + subtitles = {} + for _, captions in (try_get( + video, lambda x: x['attachments']['captions'], + dict) or {}).items(): + if not captions or not isinstance(captions, list): + continue + for caption in captions: + caption_url = caption.get('url') + if not caption_url: + continue + subtitles.setdefault(caption.get('lang') or 'en', []).append({ + 'url': caption_url, + 'ext': caption.get('format'), + }) + + subheading = info.get('subheading') + if subheading: + title += ' - %s' % subheading + + return { + 'id': video_id, + 'title': title, + 'description': info.get('long_description') or info.get( + 'short_description'), + 'duration': float_or_none(video.get('duration'), scale=1000), + 'timestamp': unified_timestamp(info.get('published')), + 'series': info.get('show_title'), + 'season_number': int_or_none(info.get('season_number')), + 'episode_number': int_or_none(info.get('episode_number')), + 'formats': formats, + 'subtitles': subtitles, + } From a3ba8a7acfa2db3a8c90000d377c25d14bdad290 Mon Sep 17 00:00:00 2001 From: Lars Vierbergen Date: Sat, 4 Mar 2017 17:47:19 +0100 Subject: [PATCH 193/225] [vier] Add support for vijf.be vier.be and vijf.be run on the same CMS and are property of the same company, so the same extractor can be used for both of them. --- youtube_dl/extractor/vier.py | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/youtube_dl/extractor/vier.py b/youtube_dl/extractor/vier.py index d26fb49b3..5086f591e 100644 --- a/youtube_dl/extractor/vier.py +++ b/youtube_dl/extractor/vier.py @@ -9,7 +9,7 @@ from .common import InfoExtractor class VierIE(InfoExtractor): IE_NAME = 'vier' - _VALID_URL = r'https?://(?:www\.)?vier\.be/(?:[^/]+/videos/(?P[^/]+)(?:/(?P\d+))?|video/v3/embed/(?P\d+))' + _VALID_URL = r'https?://(?:www\.)?(?Pvier|vijf)\.be/(?:[^/]+/videos/(?P[^/]+)(?:/(?P\d+))?|video/v3/embed/(?P\d+))' _TESTS = [{ 'url': 'http://www.vier.be/planb/videos/het-wordt-warm-de-moestuin/16129', 'info_dict': { @@ -23,6 +23,19 @@ class VierIE(InfoExtractor): # m3u8 download 'skip_download': True, }, + }, { + 'url': 'http://www.vijf.be/temptationisland/videos/zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas/2561614', + 'info_dict': { + 'id': '2561614', + 'display_id': 'zo-grappig-temptation-island-hosts-moeten-kiezen-tussen-onmogelijke-dilemmas', + 'ext': 'mp4', + 'title': 'ZO grappig: Temptation Island hosts moeten kiezen tussen onmogelijke dilemma\'s', + 'description': 'Het spel is simpel: Annelien Coorevits en Rick Brandsteder krijgen telkens 2 dilemma\'s voorgeschoteld en ze MOETEN een keuze maken.', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, }, { 'url': 'http://www.vier.be/planb/videos/mieren-herders-van-de-bladluizen', 'only_matching': True, @@ -35,6 +48,7 @@ class VierIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) embed_id = mobj.group('embed_id') display_id = mobj.group('display_id') or embed_id + site = mobj.group('site') webpage = self._download_webpage(url, display_id) @@ -43,7 +57,7 @@ class VierIE(InfoExtractor): webpage, 'video id') application = self._search_regex( [r'data-application="([^"]+)"', r'"application"\s*:\s*"([^"]+)"'], - webpage, 'application', default='vier_vod') + webpage, 'application', default=site + '_vod') filename = self._search_regex( [r'data-filename="([^"]+)"', r'"filename"\s*:\s*"([^"]+)"'], webpage, 'filename') @@ -68,13 +82,19 @@ class VierIE(InfoExtractor): class VierVideosIE(InfoExtractor): IE_NAME = 'vier:videos' - _VALID_URL = r'https?://(?:www\.)?vier\.be/(?P[^/]+)/videos(?:\?.*\bpage=(?P\d+)|$)' + _VALID_URL = r'https?://(?:www\.)?(?Pvier|vijf)\.be/(?P[^/]+)/videos(?:\?.*\bpage=(?P\d+)|$)' _TESTS = [{ 'url': 'http://www.vier.be/demoestuin/videos', 'info_dict': { 'id': 'demoestuin', }, 'playlist_mincount': 153, + }, { + 'url': 'http://www.vijf.be/temptationisland/videos', + 'info_dict': { + 'id': 'temptationisland', + }, + 'playlist_mincount': 159, }, { 'url': 'http://www.vier.be/demoestuin/videos?page=6', 'info_dict': { @@ -92,6 +112,7 @@ class VierVideosIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) program = mobj.group('program') + site = mobj.group('site') page_id = mobj.group('page') if page_id: @@ -105,13 +126,13 @@ class VierVideosIE(InfoExtractor): entries = [] for current_page_id in itertools.count(start_page): current_page = self._download_webpage( - 'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id), + 'http://www.%s.be/%s/videos?page=%d' % (site, program, current_page_id), program, 'Downloading page %d' % (current_page_id + 1)) page_entries = [ - self.url_result('http://www.vier.be' + video_url, 'Vier') + self.url_result('http://www.' + site + '.be' + video_url, 'Vier') for video_url in re.findall( - r'

', current_page)] + r'', current_page)] entries.extend(page_entries) if page_id or '>Meer<' not in current_page: break From 30f8f142d4563df9cf47b0164adbb2c9e0130c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 23:57:03 +0700 Subject: [PATCH 194/225] Credit @ThomasChr for #12015 and #12245 --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 247c0ea13..3d8db1ee2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -202,3 +202,4 @@ Fabian Stahl Bagira Odd Stråbø Philip Herzog +Thomas Christlieb From 466274fe9a8fae0a5f5e0358f48e54f569c10c5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 23:58:12 +0700 Subject: [PATCH 195/225] Credit @p2004a for vodpl (#12122) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 3d8db1ee2..2a0938f3b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -203,3 +203,4 @@ Bagira Odd Stråbø Philip Herzog Thomas Christlieb +Marek Rusinowski From f24c1e558456021d9a8704c9964c6a704e8b73ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 4 Mar 2017 23:59:49 +0700 Subject: [PATCH 196/225] Credit @TobiX for #9725 --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 2a0938f3b..0716d2ad6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -204,3 +204,4 @@ Odd Stråbø Philip Herzog Thomas Christlieb Marek Rusinowski +Tobias Gruetzmacher From 6f211dc936dcd1f8ab6f178f8696f5edf2f385a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 00:01:17 +0700 Subject: [PATCH 197/225] Credit @obilodeau for vrak (#11452) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 0716d2ad6..4f383e214 100644 --- a/AUTHORS +++ b/AUTHORS @@ -205,3 +205,4 @@ Philip Herzog Thomas Christlieb Marek Rusinowski Tobias Gruetzmacher +Olivier Bilodeau From bcefc59279d9d4d7034e0f25a3fcf561a456766d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 00:02:30 +0700 Subject: [PATCH 198/225] Credit @vierbergenlars for vijf.be (#12304) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 4f383e214..fd1b8c9ea 100644 --- a/AUTHORS +++ b/AUTHORS @@ -206,3 +206,4 @@ Thomas Christlieb Marek Rusinowski Tobias Gruetzmacher Olivier Bilodeau +Lars Vierbergen From c64c03be35adae05740058e449c205120f89910d Mon Sep 17 00:00:00 2001 From: Xiao Di Guan Date: Thu, 16 Feb 2017 17:46:54 +1100 Subject: [PATCH 199/225] [twitch] Add basic support for two-factor authentication --- youtube_dl/extractor/twitch.py | 79 ++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 32 deletions(-) diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py index bbba394b0..096a2ac9d 100644 --- a/youtube_dl/extractor/twitch.py +++ b/youtube_dl/extractor/twitch.py @@ -12,7 +12,6 @@ from ..compat import ( compat_str, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, - compat_urlparse, ) from ..utils import ( clean_html, @@ -24,6 +23,7 @@ from ..utils import ( parse_iso8601, update_url_query, urlencode_postdata, + urljoin, ) @@ -32,7 +32,7 @@ class TwitchBaseIE(InfoExtractor): _API_BASE = 'https://api.twitch.tv' _USHER_BASE = 'https://usher.ttvnw.net' - _LOGIN_URL = 'http://www.twitch.tv/login' + _LOGIN_URL = 'https://www.twitch.tv/login' _CLIENT_ID = 'jzkbprff40iqj646a697cyrvl0zt2m6' _NETRC_MACHINE = 'twitch' @@ -64,6 +64,36 @@ class TwitchBaseIE(InfoExtractor): raise ExtractorError( 'Unable to login. Twitch said: %s' % message, expected=True) + def post_login_form(page, urlh, note, data): + form = self._hidden_inputs(page) + form.update(data) + + page_url = urlh.geturl() + post_url = self._search_regex( + r']+action=(["\'])(?P.+?)\1', page, + 'post url', default=page_url, group='url') + post_url = urljoin(page_url, post_url) + + headers = {'Referer': page_url} + + try: + response = self._download_json( + post_url, None, note, + data=urlencode_postdata(form), + headers=headers) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: + response = self._parse_json( + e.cause.read().decode('utf-8'), None) + fail(response['message']) + raise + + if response.get('redirect'): + redirect_url = urljoin(post_url, response['redirect']) + return self._download_webpage_handle( + redirect_url, None, 'Downloading login redirect page', + headers=headers) + login_page, handle = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login page') @@ -71,40 +101,25 @@ class TwitchBaseIE(InfoExtractor): if 'blacklist_message' in login_page: fail(clean_html(login_page)) - login_form = self._hidden_inputs(login_page) - - login_form.update({ + login_data = { 'username': username, 'password': password, - }) + } + redirect_res = post_login_form( + login_page, handle, 'Logging in as %s' % username, login_data) - redirect_url = handle.geturl() + if not redirect_res: + return + redirect_page, handle = redirect_res - post_url = self._search_regex( - r']+action=(["\'])(?P.+?)\1', login_page, - 'post url', default=redirect_url, group='url') - - if not post_url.startswith('http'): - post_url = compat_urlparse.urljoin(redirect_url, post_url) - - headers = {'Referer': redirect_url} - - try: - response = self._download_json( - post_url, None, 'Logging in as %s' % username, - data=urlencode_postdata(login_form), - headers=headers) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: - response = self._parse_json( - e.cause.read().decode('utf-8'), None) - fail(response['message']) - raise - - if response.get('redirect'): - self._download_webpage( - response['redirect'], None, 'Downloading login redirect page', - headers=headers) + if re.search(r'(?i)]+id="two-factor-submit"', redirect_page) is not None: + # TODO: Add mechanism to request an SMS or phone call + tfa_token = self._get_tfa_info('two-factor authentication token') + tfa_data = { + 'authy_token': tfa_token, + 'remember_2fa': 'true', + } + post_login_form(redirect_page, handle, 'Submitting TFA token', tfa_data) def _prefer_source(self, formats): try: From 5316566edcbb1a2ac2e0559a1863b2204242b7d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 02:06:33 +0700 Subject: [PATCH 200/225] [twitch] Use better naming and simplify (closes #11974) --- youtube_dl/extractor/twitch.py | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py index 096a2ac9d..ed36336bd 100644 --- a/youtube_dl/extractor/twitch.py +++ b/youtube_dl/extractor/twitch.py @@ -64,7 +64,7 @@ class TwitchBaseIE(InfoExtractor): raise ExtractorError( 'Unable to login. Twitch said: %s' % message, expected=True) - def post_login_form(page, urlh, note, data): + def login_step(page, urlh, note, data): form = self._hidden_inputs(page) form.update(data) @@ -88,11 +88,10 @@ class TwitchBaseIE(InfoExtractor): fail(response['message']) raise - if response.get('redirect'): - redirect_url = urljoin(post_url, response['redirect']) - return self._download_webpage_handle( - redirect_url, None, 'Downloading login redirect page', - headers=headers) + redirect_url = urljoin(post_url, response['redirect']) + return self._download_webpage_handle( + redirect_url, None, 'Downloading login redirect page', + headers=headers) login_page, handle = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login page') @@ -101,25 +100,19 @@ class TwitchBaseIE(InfoExtractor): if 'blacklist_message' in login_page: fail(clean_html(login_page)) - login_data = { - 'username': username, - 'password': password, - } - redirect_res = post_login_form( - login_page, handle, 'Logging in as %s' % username, login_data) - - if not redirect_res: - return - redirect_page, handle = redirect_res + redirect_page, handle = login_step( + login_page, handle, 'Logging in as %s' % username, { + 'username': username, + 'password': password, + }) if re.search(r'(?i)]+id="two-factor-submit"', redirect_page) is not None: # TODO: Add mechanism to request an SMS or phone call tfa_token = self._get_tfa_info('two-factor authentication token') - tfa_data = { + login_step(redirect_page, handle, 'Submitting TFA token', { 'authy_token': tfa_token, 'remember_2fa': 'true', - } - post_login_form(redirect_page, handle, 'Submitting TFA token', tfa_data) + }) def _prefer_source(self, formats): try: From 75027364ba35d8852f393f67860be817fff05541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 02:22:02 +0700 Subject: [PATCH 201/225] [ChangeLog] Actualize --- ChangeLog | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ChangeLog b/ChangeLog index 13ccb0f8f..7e22db53e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,7 +1,18 @@ version Extractors ++ [twitch] Add basic support for two-factor authentication (#11974) ++ [vier] Add support for vijf.be (#12304) ++ [redbulltv] Add support for redbull.tv (#3919, #11948) * [douyutv] Switch to the PC API to escape the 5-min limitation (#12316) ++ [generic] Add support for rutube embeds ++ [rutube] Relax URL regular expression ++ [vrak] Add support for vrak.tv (#11452) ++ [brightcove:new] Add ability to smuggle geo_countries into URL ++ [brightcove:new] Raise GeoRestrictedError +* [go] Relax URL regular expression (#12341) +* [24video] Use original host for requests (#12339) +* [ruutu] Disable DASH formats (#12322) version 2017.03.02 From afa4597618462df01b47febcd64c531f8ffdd63a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 02:23:08 +0700 Subject: [PATCH 202/225] release 2017.03.05 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- docs/supportedsites.md | 2 ++ youtube_dl/version.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index fec4152e3..988d0d81b 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.02*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.02** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.05*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.05** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.03.02 +[debug] youtube-dl version 2017.03.05 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 7e22db53e..2f3ec1b47 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.03.05 Extractors + [twitch] Add basic support for two-factor authentication (#11974) diff --git a/docs/supportedsites.md b/docs/supportedsites.md index a08e00fce..f02c4dea0 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -626,6 +626,7 @@ - **RaiTV** - **RBMARadio** - **RDS**: RDS.ca + - **RedBullTV** - **RedTube** - **RegioTV** - **RENTV** @@ -916,6 +917,7 @@ - **VoxMedia** - **Vporn** - **vpro**: npo.nl and ntr.nl + - **Vrak** - **VRT** - **vube**: Vube.com - **VuClip** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index f4c8d3d5f..215f9a3c1 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.03.02' +__version__ = '2017.03.05' From 6d0fe752bfeaa9a758099315f006bc15acd1ae76 Mon Sep 17 00:00:00 2001 From: John Hawkinson Date: Sat, 4 Mar 2017 22:19:44 -0500 Subject: [PATCH 203/225] [external:ffmpeg] In test harness, limit to 10k download size Otherwise, if you screw up a playlist test by including a playlist dictionary key, you'll be there for eons while it downloads all the files before erroring out. --- youtube_dl/downloader/external.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/youtube_dl/downloader/external.py b/youtube_dl/downloader/external.py index bdd3545a2..127a92d20 100644 --- a/youtube_dl/downloader/external.py +++ b/youtube_dl/downloader/external.py @@ -270,6 +270,10 @@ class FFmpegFD(ExternalFD): args += ['-rtmp_live', 'live'] args += ['-i', url, '-c', 'copy'] + + if self.params.get('test', False): + args += ['-fs', compat_str(self._TEST_FILE_SIZE)] # -fs limit_size (output), expressed in bytes + if protocol in ('m3u8', 'm3u8_native'): if self.params.get('hls_use_mpegts', False) or tmpfilename == '-': args += ['-f', 'mpegts'] From a50862b7355544d3fd8473bf3ff35e9c6643d789 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 10:24:29 +0700 Subject: [PATCH 204/225] [downloader/external] Add missing import and PEP8 --- youtube_dl/downloader/external.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/youtube_dl/downloader/external.py b/youtube_dl/downloader/external.py index 127a92d20..e13cf547d 100644 --- a/youtube_dl/downloader/external.py +++ b/youtube_dl/downloader/external.py @@ -6,7 +6,10 @@ import sys import re from .common import FileDownloader -from ..compat import compat_setenv +from ..compat import ( + compat_setenv, + compat_str, +) from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS from ..utils import ( cli_option, @@ -272,7 +275,7 @@ class FFmpegFD(ExternalFD): args += ['-i', url, '-c', 'copy'] if self.params.get('test', False): - args += ['-fs', compat_str(self._TEST_FILE_SIZE)] # -fs limit_size (output), expressed in bytes + args += ['-fs', compat_str(self._TEST_FILE_SIZE)] if protocol in ('m3u8', 'm3u8_native'): if self.params.get('hls_use_mpegts', False) or tmpfilename == '-': From ed0cf9b38394b28bae5f05fd6b00c85a9c0e6755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 23:22:27 +0700 Subject: [PATCH 205/225] [extractor/common] Move jwplayer formats extraction in separate method --- youtube_dl/extractor/common.py | 106 +++++++++++++++++---------------- 1 file changed, 56 insertions(+), 50 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index eb3c091aa..9b73a948c 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2198,56 +2198,7 @@ class InfoExtractor(object): this_video_id = video_id or video_data['mediaid'] - formats = [] - for source in video_data['sources']: - source_url = self._proto_relative_url(source['file']) - if base_url: - source_url = compat_urlparse.urljoin(base_url, source_url) - source_type = source.get('type') or '' - ext = mimetype2ext(source_type) or determine_ext(source_url) - if source_type == 'hls' or ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - source_url, this_video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False)) - elif ext == 'mpd': - formats.extend(self._extract_mpd_formats( - source_url, this_video_id, mpd_id=mpd_id, fatal=False)) - # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 - elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'): - formats.append({ - 'url': source_url, - 'vcodec': 'none', - 'ext': ext, - }) - else: - height = int_or_none(source.get('height')) - if height is None: - # Often no height is provided but there is a label in - # format like 1080p. - height = int_or_none(self._search_regex( - r'^(\d{3,})[pP]$', source.get('label') or '', - 'height', default=None)) - a_format = { - 'url': source_url, - 'width': int_or_none(source.get('width')), - 'height': height, - 'ext': ext, - } - if source_url.startswith('rtmp'): - a_format['ext'] = 'flv' - - # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as - # of jwplayer.flash.swf - rtmp_url_parts = re.split( - r'((?:mp4|mp3|flv):)', source_url, 1) - if len(rtmp_url_parts) == 3: - rtmp_url, prefix, play_path = rtmp_url_parts - a_format.update({ - 'url': rtmp_url, - 'play_path': prefix + play_path, - }) - if rtmp_params: - a_format.update(rtmp_params) - formats.append(a_format) + formats = self._parse_jwplayer_formats(video_data['sources'], this_video_id) self._sort_formats(formats) subtitles = {} @@ -2278,6 +2229,61 @@ class InfoExtractor(object): else: return self.playlist_result(entries) + def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None, + m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): + formats = [] + for source in jwplayer_sources_data : + source_url = self._proto_relative_url(source['file']) + if base_url: + source_url = compat_urlparse.urljoin(base_url, source_url) + source_type = source.get('type') or '' + ext = mimetype2ext(source_type) or determine_ext(source_url) + if source_type == 'hls' or ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + source_url, video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False)) + elif ext == 'mpd': + formats.extend(self._extract_mpd_formats( + source_url, video_id, mpd_id=mpd_id, fatal=False)) + # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 + elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'): + formats.append({ + 'url': source_url, + 'vcodec': 'none', + 'ext': ext, + }) + else: + height = int_or_none(source.get('height')) + if height is None: + # Often no height is provided but there is a label in + # format like 1080p. + height = int_or_none(self._search_regex( + r'^(\d{3,})[pP]$', source.get('label') or '', + 'height', default=None)) + a_format = { + 'url': source_url, + 'width': int_or_none(source.get('width')), + 'height': height, + 'ext': ext, + } + if source_url.startswith('rtmp'): + a_format['ext'] = 'flv' + + # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as + # of jwplayer.flash.swf + rtmp_url_parts = re.split( + r'((?:mp4|mp3|flv):)', source_url, 1) + if len(rtmp_url_parts) == 3: + rtmp_url, prefix, play_path = rtmp_url_parts + a_format.update({ + 'url': rtmp_url, + 'play_path': prefix + play_path, + }) + if rtmp_params: + a_format.update(rtmp_params) + formats.append(a_format) + return formats + + def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() From 0236cd0dfde1dda540c1067a9c5982d482005c47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 23:25:03 +0700 Subject: [PATCH 206/225] [extractor/common] Improve height extraction and extract bitrate --- youtube_dl/extractor/common.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 9b73a948c..2887db0c3 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2240,12 +2240,14 @@ class InfoExtractor(object): ext = mimetype2ext(source_type) or determine_ext(source_url) if source_type == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - source_url, video_id, 'mp4', 'm3u8_native', m3u8_id=m3u8_id, fatal=False)) + source_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id=m3u8_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( source_url, video_id, mpd_id=mpd_id, fatal=False)) # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67 - elif source_type.startswith('audio') or ext in ('oga', 'aac', 'mp3', 'mpeg', 'vorbis'): + elif source_type.startswith('audio') or ext in ( + 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'): formats.append({ 'url': source_url, 'vcodec': 'none', @@ -2255,19 +2257,19 @@ class InfoExtractor(object): height = int_or_none(source.get('height')) if height is None: # Often no height is provided but there is a label in - # format like 1080p. + # format like "1080p", "720p SD", or 1080. height = int_or_none(self._search_regex( - r'^(\d{3,})[pP]$', source.get('label') or '', + r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''), 'height', default=None)) a_format = { 'url': source_url, 'width': int_or_none(source.get('width')), 'height': height, + 'tbr': int_or_none(source.get('bitrate')), 'ext': ext, } if source_url.startswith('rtmp'): a_format['ext'] = 'flv' - # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as # of jwplayer.flash.swf rtmp_url_parts = re.split( From 1a2192cb904ff42a309ab2c2477fc226f8651f33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 23:28:32 +0700 Subject: [PATCH 207/225] [extractor/common] Pass arguments to _parse_jwplayer_formats and PEP8 --- youtube_dl/extractor/common.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 2887db0c3..78dc5be24 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2198,7 +2198,9 @@ class InfoExtractor(object): this_video_id = video_id or video_data['mediaid'] - formats = self._parse_jwplayer_formats(video_data['sources'], this_video_id) + formats = self._parse_jwplayer_formats( + video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id, + mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url) self._sort_formats(formats) subtitles = {} @@ -2232,7 +2234,7 @@ class InfoExtractor(object): def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None, m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None): formats = [] - for source in jwplayer_sources_data : + for source in jwplayer_sources_data: source_url = self._proto_relative_url(source['file']) if base_url: source_url = compat_urlparse.urljoin(base_url, source_url) @@ -2285,7 +2287,6 @@ class InfoExtractor(object): formats.append(a_format) return formats - def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() From 5dd376345b8c87e8c2130b80e73e690c5e721a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 5 Mar 2017 23:31:38 +0700 Subject: [PATCH 208/225] [tunepk] Add extractor (closes #12197, closes #12243) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tunepk.py | 90 ++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 youtube_dl/extractor/tunepk.py diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index caf1dc766..b056dff53 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1000,6 +1000,7 @@ from .tunein import ( TuneInTopicIE, TuneInShortenerIE, ) +from .tunepk import TunePkIE from .turbo import TurboIE from .tutv import TutvIE from .tv2 import ( diff --git a/youtube_dl/extractor/tunepk.py b/youtube_dl/extractor/tunepk.py new file mode 100644 index 000000000..9d42651ce --- /dev/null +++ b/youtube_dl/extractor/tunepk.py @@ -0,0 +1,90 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + int_or_none, + try_get, + unified_timestamp, +) + + +class TunePkIE(InfoExtractor): + _VALID_URL = r'''(?x) + https?:// + (?: + (?:www\.)?tune\.pk/(?:video/|player/embed_player.php?.*?\bvid=)| + embed\.tune\.pk/play/ + ) + (?P\d+) + ''' + _TESTS = [{ + 'url': 'https://tune.pk/video/6919541/maudie-2017-international-trailer-1-ft-ethan-hawke-sally-hawkins', + 'md5': '0c537163b7f6f97da3c5dd1e3ef6dd55', + 'info_dict': { + 'id': '6919541', + 'ext': 'mp4', + 'title': 'Maudie (2017) | International Trailer # 1 ft Ethan Hawke, Sally Hawkins', + 'description': 'md5:eb5a04114fafef5cec90799a93a2d09c', + 'thumbnail': r're:^https?://.*\.jpg$', + 'timestamp': 1487327564, + 'upload_date': '20170217', + 'uploader': 'Movie Trailers', + 'duration': 107, + 'view_count': int, + } + }, { + 'url': 'https://tune.pk/player/embed_player.php?vid=6919541&folder=2017/02/17/&width=600&height=350&autoplay=no', + 'only_matching': True, + }, { + 'url': 'https://embed.tune.pk/play/6919541?autoplay=no&ssl=yes&inline=true', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'https://tune.pk/video/%s' % video_id, video_id) + + details = self._parse_json( + self._search_regex( + r'new\s+TunePlayer\(({.+?})\)\s*;\s*\n', webpage, 'tune player'), + video_id)['details'] + + video = details['video'] + title = video.get('title') or self._og_search_title( + webpage, default=None) or self._html_search_meta( + 'title', webpage, 'title', fatal=True) + + formats = self._parse_jwplayer_formats( + details['player']['sources'], video_id) + self._sort_formats(formats) + + description = self._og_search_description( + webpage, default=None) or self._html_search_meta( + 'description', webpage, 'description') + + thumbnail = video.get('thumb') or self._og_search_thumbnail( + webpage, default=None) or self._html_search_meta( + 'thumbnail', webpage, 'thumbnail') + + timestamp = unified_timestamp(video.get('date_added')) + uploader = try_get( + video, lambda x: x['uploader']['name'], + compat_str) or self._html_search_meta('author', webpage, 'author') + + duration = int_or_none(video.get('duration')) + view_count = int_or_none(video.get('views')) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'uploader': uploader, + 'duration': duration, + 'view_count': view_count, + 'formats': formats, + } From d2b64e04b475b6ef7e5fb74d92ae9a35284fc35a Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 6 Mar 2017 00:32:53 +0800 Subject: [PATCH 209/225] [addanime] Skip an invalid test --- youtube_dl/extractor/addanime.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py index 55a9322a7..9f8a71262 100644 --- a/youtube_dl/extractor/addanime.py +++ b/youtube_dl/extractor/addanime.py @@ -25,7 +25,8 @@ class AddAnimeIE(InfoExtractor): 'ext': 'mp4', 'description': 'One Piece 606', 'title': 'One Piece 606', - } + }, + 'skip': 'Video is gone', }, { 'url': 'http://add-anime.net/video/MDUGWYKNGBD8/One-Piece-687', 'only_matching': True, From fc11ad383311d721179483eea2e289e3a236e457 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 03:21:03 +0700 Subject: [PATCH 210/225] [drtv:live] Bypass geo restriction --- youtube_dl/extractor/drtv.py | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py index e966d7483..b879f2c2b 100644 --- a/youtube_dl/extractor/drtv.py +++ b/youtube_dl/extractor/drtv.py @@ -156,6 +156,7 @@ class DRTVIE(InfoExtractor): class DRTVLiveIE(InfoExtractor): IE_NAME = 'drtv:live' _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv|TV)/live/(?P[\da-z-]+)' + _GEO_COUNTRIES = ['DK'] _TEST = { 'url': 'https://www.dr.dk/tv/live/dr1', 'info_dict': { From 96182695e4e37795a30ab143129c91dab18a9865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 03:23:01 +0700 Subject: [PATCH 211/225] [drtv] Add geo countries to GeoRestrictedError --- youtube_dl/extractor/drtv.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py index b879f2c2b..e4917014a 100644 --- a/youtube_dl/extractor/drtv.py +++ b/youtube_dl/extractor/drtv.py @@ -15,6 +15,8 @@ from ..utils import ( class DRTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv/se|nyheder|radio/ondemand)/(?:[^/]+/)*(?P[\da-z-]+)(?:[/#?]|$)' + _GEO_BYPASS = False + _GEO_COUNTRIES = ['DK'] IE_NAME = 'drtv' _TESTS = [{ 'url': 'https://www.dr.dk/tv/se/boern/ultra/klassen-ultra/klassen-darlig-taber-10', @@ -137,7 +139,7 @@ class DRTVIE(InfoExtractor): if not formats and restricted_to_denmark: self.raise_geo_restricted( 'Unfortunately, DR is not allowed to show this program outside Denmark.', - expected=True) + countries=self._GEO_COUNTRIES) self._sort_formats(formats) From 4b5de77bdb7765df4797bf068592926285ba709a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 03:57:46 +0700 Subject: [PATCH 212/225] [utils] Process bytestrings in urljoin (closes #12369) --- test/test_utils.py | 3 +++ youtube_dl/utils.py | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/test/test_utils.py b/test/test_utils.py index aefd94518..173c49514 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -455,6 +455,9 @@ class TestUtil(unittest.TestCase): def test_urljoin(self): self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') + self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') + self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') + self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt') self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 8738aa249..d293c7498 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1748,11 +1748,16 @@ def base_url(url): def urljoin(base, path): + if isinstance(path, bytes): + path = path.decode('utf-8') if not isinstance(path, compat_str) or not path: return None if re.match(r'^(?:https?:)?//', path): return path - if not isinstance(base, compat_str) or not re.match(r'^(?:https?:)?//', base): + if isinstance(base, bytes): + base = base.decode('utf-8') + if not isinstance(base, compat_str) or not re.match( + r'^(?:https?:)?//', base): return None return compat_urlparse.urljoin(base, path) From 3f116b189bb990529a1a18ba7a3829b1592cfecd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 04:01:21 +0700 Subject: [PATCH 213/225] [ChangeLog] Actualize --- ChangeLog | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ChangeLog b/ChangeLog index 2f3ec1b47..c1f43a625 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,17 @@ +version + +Core ++ [utils] Process bytestrings in urljoin (#12369) +* [extractor/common] Improve height extraction and extract bitrate +* [extractor/common] Move jwplayer formats extraction in separate method ++ [external:ffmpeg] Limit test download size to 10KiB (#12362) + +Extractors ++ [drtv] Add geo countries to GeoRestrictedError ++ [drtv:live] Bypass geo restriction ++ [tunepk] Add extractor (#12197, #12243) + + version 2017.03.05 Extractors From 16647026264b35a40ecd56f5d9392d0643a2066c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 04:04:39 +0700 Subject: [PATCH 214/225] release 2017.03.06 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- docs/supportedsites.md | 1 + youtube_dl/version.py | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 988d0d81b..a36d7322d 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.05*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.05** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.06*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.06** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.03.05 +[debug] youtube-dl version 2017.03.06 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index c1f43a625..648e04856 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.03.06 Core + [utils] Process bytestrings in urljoin (#12369) diff --git a/docs/supportedsites.md b/docs/supportedsites.md index f02c4dea0..85c59ca81 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -798,6 +798,7 @@ - **tunein:program** - **tunein:station** - **tunein:topic** + - **TunePk** - **Turbo** - **Tutv** - **tv.dfb.de** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 215f9a3c1..71a984913 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.03.05' +__version__ = '2017.03.06' From da92da4b886a0e44fe28591ddf5b746fba1c9ade Mon Sep 17 00:00:00 2001 From: denneboomyo Date: Mon, 6 Mar 2017 11:00:17 +0100 Subject: [PATCH 215/225] Openload fix extraction (#12357) * Fix extraction --- youtube_dl/extractor/openload.py | 43 +++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py index fc7ff43a6..25f6a9aca 100644 --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -75,22 +75,37 @@ class OpenloadIE(InfoExtractor): ']+id="[^"]+"[^>]*>([0-9A-Za-z]+)', webpage, 'openload ID') - first_char = int(ol_id[0]) - urlcode = [] - num = 1 + video_url_chars = [] - while num < len(ol_id): - i = ord(ol_id[num]) - key = 0 - if i <= 90: - key = i - 65 - elif i >= 97: - key = 25 + i - 97 - urlcode.append((key, compat_chr(int(ol_id[num + 2:num + 5]) // int(ol_id[num + 1]) - first_char))) - num += 5 + first_char = ord(ol_id[0]) + key = first_char - 55 + maxKey = max(2, key) + key = min(maxKey, len(ol_id) - 14) + t = ol_id[key:key + 12] - video_url = 'https://openload.co/stream/' + ''.join( - [value for _, value in sorted(urlcode, key=lambda x: x[0])]) + hashMap = {} + v = ol_id.replace(t, "") + h = 0 + + while h < len(t): + f = t[h:h + 2] + i = int(f, 16) + hashMap[h / 2] = i + h += 2 + + h = 0 + + while h < len(v): + B = v[h:h + 2] + i = int(B, 16) + index = (h / 2) % 6 + A = hashMap[index] + i = i ^ A + video_url_chars.append(compat_chr(i)) + h += 2 + + video_url = 'https://openload.co/stream/%s?mime=true' + video_url = video_url % (''.join(video_url_chars)) title = self._og_search_title(webpage, default=None) or self._search_regex( r']+class=["\']title["\'][^>]*>([^<]+)', webpage, From 92cb5763f440d0ca1627f8120a7ce29598eb9484 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 6 Mar 2017 18:04:19 +0800 Subject: [PATCH 216/225] [ChangeLog] Update after #12357 --- ChangeLog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 648e04856..5fb4c20af 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version + +Extractors +* [openload] Fix extraction (#10408, #12357) + + version 2017.03.06 Core From 54a3a8827baf71c553723b6766e676369e9c743c Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 9 Jan 2017 02:36:39 +0800 Subject: [PATCH 217/225] [__init__] Metadata should be added after conversion Fixes #5594 --- ChangeLog | 4 ++++ youtube_dl/__init__.py | 8 +++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5fb4c20af..ad65505c5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,9 @@ version +Core +* [__init__] Metadata are now added after conversion (#5594) + + Extractors * [openload] Fix extraction (#10408, #12357) diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 0c401baa6..ad5f13d2b 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -242,14 +242,11 @@ def _real_main(argv=None): # PostProcessors postprocessors = [] - # Add the metadata pp first, the other pps will copy it if opts.metafromtitle: postprocessors.append({ 'key': 'MetadataFromTitle', 'titleformat': opts.metafromtitle }) - if opts.addmetadata: - postprocessors.append({'key': 'FFmpegMetadata'}) if opts.extractaudio: postprocessors.append({ 'key': 'FFmpegExtractAudio', @@ -279,6 +276,11 @@ def _real_main(argv=None): }) if not already_have_thumbnail: opts.writethumbnail = True + # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and + # FFmpegExtractAudioPP as containers before conversion may not support + # metadata (3gp, webm, etc.) + if opts.addmetadata: + postprocessors.append({'key': 'FFmpegMetadata'}) # XAttrMetadataPP should be run after post-processors that may change file # contents if opts.xattrs: From e30ccf7047eb3b8dff8b778790f9b084e6d7f42e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 6 Mar 2017 23:05:38 +0700 Subject: [PATCH 218/225] [soundcloud] Update client id (closes #12376) --- youtube_dl/extractor/soundcloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index b3aa4ce26..0ee4a8ff8 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -121,7 +121,7 @@ class SoundcloudIE(InfoExtractor): }, ] - _CLIENT_ID = 'fDoItMDbsbZz8dY16ZzARCZmzgHBPotA' + _CLIENT_ID = '2t9loNQH90kzJcsFCODdigxfp325aq4z' _IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf' @staticmethod From 80146dcc6c27b46fb8340d3285d95f2f7674fb0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 03:57:54 +0700 Subject: [PATCH 219/225] [ChangeLog] Actualize --- ChangeLog | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index ad65505c5..622086440 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,10 +1,10 @@ version Core -* [__init__] Metadata are now added after conversion (#5594) - +* Metadata are now added after conversion (#5594) Extractors +* [soundcloud] Update client id (#12376) * [openload] Fix extraction (#10408, #12357) From dccd0ab35d1acc45e36241c505b5325d96ca501d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 03:59:22 +0700 Subject: [PATCH 220/225] release 2017.03.07 --- .github/ISSUE_TEMPLATE.md | 6 +++--- ChangeLog | 2 +- youtube_dl/version.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index a36d7322d..eb69696c8 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -6,8 +6,8 @@ --- -### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.06*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. -- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.06** +### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.03.07*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. +- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2017.03.07** ### Before submitting an *issue* make sure you have: - [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections @@ -35,7 +35,7 @@ $ youtube-dl -v [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 -[debug] youtube-dl version 2017.03.06 +[debug] youtube-dl version 2017.03.07 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/ChangeLog b/ChangeLog index 622086440..601aad902 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2017.03.07 Core * Metadata are now added after conversion (#5594) diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 71a984913..bd451bf81 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2017.03.06' +__version__ = '2017.03.07' From d7d7f84c9565d682119d081324c26eb361e05aa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 04:03:52 +0700 Subject: [PATCH 221/225] Credit @benages for redbull.tv (#11948) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index fd1b8c9ea..74abda016 100644 --- a/AUTHORS +++ b/AUTHORS @@ -207,3 +207,4 @@ Marek Rusinowski Tobias Gruetzmacher Olivier Bilodeau Lars Vierbergen +Juanjo Benages From 9df53ea36ec84c6ab5a4a672d120f7850e2363ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 04:04:49 +0700 Subject: [PATCH 222/225] Credit @puxlit for twitch 2fa (#11974) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index 74abda016..273a6a034 100644 --- a/AUTHORS +++ b/AUTHORS @@ -208,3 +208,4 @@ Tobias Gruetzmacher Olivier Bilodeau Lars Vierbergen Juanjo Benages +Xiao Di Guan From fe646a2f106fef0651158c460dd766c236e2f5db Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Tue, 7 Mar 2017 15:34:06 +0800 Subject: [PATCH 223/225] [twitch] PEP8 --- youtube_dl/extractor/twitch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py index ed36336bd..2daf9dfac 100644 --- a/youtube_dl/extractor/twitch.py +++ b/youtube_dl/extractor/twitch.py @@ -104,7 +104,7 @@ class TwitchBaseIE(InfoExtractor): login_page, handle, 'Logging in as %s' % username, { 'username': username, 'password': password, - }) + }) if re.search(r'(?i)]+id="two-factor-submit"', redirect_page) is not None: # TODO: Add mechanism to request an SMS or phone call From 2e76bdc850ed5d5ffe95578b576b8fb66dcea8f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 22:59:33 +0700 Subject: [PATCH 224/225] [brightcove:legacy] Relax videoPlayer validation check (closes #12381) --- youtube_dl/extractor/brightcove.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 66c8cb219..46ef8e605 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -193,7 +193,13 @@ class BrightcoveLegacyIE(InfoExtractor): if videoPlayer is not None: if isinstance(videoPlayer, list): videoPlayer = videoPlayer[0] - if not (videoPlayer.isdigit() or videoPlayer.startswith('ref:')): + videoPlayer = videoPlayer.strip() + # UUID is also possible for videoPlayer (e.g. + # http://www.popcornflix.com/hoodies-vs-hooligans/7f2d2b87-bbf2-4623-acfb-ea942b4f01dd + # or http://www8.hp.com/cn/zh/home.html) + if not (re.match( + r'^(?:\d+|[\da-fA-F]{8}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{4}-?[\da-fA-F]{12})$', + videoPlayer) or videoPlayer.startswith('ref:')): return None params['@videoPlayer'] = videoPlayer linkBase = find_param('linkBaseURL') From b68a812ea839e44148516a34a15193189e58ba77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Tue, 7 Mar 2017 23:00:21 +0700 Subject: [PATCH 225/225] [extractor/generic] Add test for brigthcove UUID-like videoPlayer --- youtube_dl/extractor/generic.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index ebab9509d..bc22421ae 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -449,6 +449,23 @@ class GenericIE(InfoExtractor): }, }], }, + { + # Brightcove with UUID in videoPlayer + 'url': 'http://www8.hp.com/cn/zh/home.html', + 'info_dict': { + 'id': '5255815316001', + 'ext': 'mp4', + 'title': 'Sprocket Video - China', + 'description': 'Sprocket Video - China', + 'uploader': 'HP-Video Gallery', + 'timestamp': 1482263210, + 'upload_date': '20161220', + 'uploader_id': '1107601872001', + }, + 'params': { + 'skip_download': True, # m3u8 download + }, + }, # ooyala video { 'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',