diff --git a/AUTHORS b/AUTHORS index 117b9c219..d5418dd37 100644 --- a/AUTHORS +++ b/AUTHORS @@ -129,3 +129,4 @@ Mister Hat Peter Ding jackyzy823 George Brighton +Remita Amine diff --git a/README.md b/README.md index e3452c9e1..93e7fb06f 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,7 @@ which means you can modify it, redistribute it or use it however you like. --playlist-reverse Download playlist videos in reverse order --xattr-set-filesize Set file xattribute ytdl.filesize with expected filesize (experimental) --hls-prefer-native Use the native HLS downloader instead of ffmpeg (experimental) - --external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,wget + --external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,httpie,wget --external-downloader-args ARGS Give these arguments to the external downloader ## Filesystem Options: @@ -190,8 +190,8 @@ which means you can modify it, redistribute it or use it however you like. --all-formats Download all available video formats --prefer-free-formats Prefer free video formats unless a specific one is requested -F, --list-formats List all available formats - --youtube-skip-dash-manifest Do not download the DASH manifest on YouTube videos - --merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.Ignored if no + --youtube-skip-dash-manifest Do not download the DASH manifests and related data on YouTube videos + --merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no merge is required ## Subtitle Options: diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 9a50fbd1c..0ca06c71d 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -283,6 +283,7 @@ - **Motherless** - **Motorsport**: motorsport.com - **MovieClips** + - **MovieFap** - **Moviezine** - **movshare**: MovShare - **MPORA** @@ -383,6 +384,7 @@ - **Pyvideo** - **qqmusic** - **qqmusic:album** + - **qqmusic:playlist** - **qqmusic:singer** - **qqmusic:toplist** - **QuickVid** @@ -440,6 +442,8 @@ - **smotri:broadcast**: Smotri.com broadcasts - **smotri:community**: Smotri.com community videos - **smotri:user**: Smotri.com user videos + - **SnagFilms** + - **SnagFilmsEmbed** - **Snotr** - **Sohu** - **soompi** @@ -502,6 +506,7 @@ - **TheOnion** - **ThePlatform** - **TheSixtyOne** + - **ThisAmericanLife** - **ThisAV** - **THVideo** - **THVideoPlaylist** @@ -542,6 +547,7 @@ - **twitch:stream** - **twitch:video** - **twitch:vod** + - **TwitterCard** - **Ubu** - **udemy** - **udemy:course** diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index d44339200..a64b457bd 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -323,6 +323,7 @@ from .musicvault import MusicVaultIE from .muzu import MuzuTVIE from .myspace import MySpaceIE, MySpaceAlbumIE from .myspass import MySpassIE +from .myvi import MyviIE from .myvideo import MyVideoIE from .myvidster import MyVidsterIE from .nationalgeographic import NationalGeographicIE @@ -432,6 +433,7 @@ from .qqmusic import ( QQMusicSingerIE, QQMusicAlbumIE, QQMusicToplistIE, + QQMusicPlaylistIE, ) from .quickvid import QuickVidIE from .r7 import R7IE @@ -569,6 +571,7 @@ from .tf1 import TF1IE from .theonion import TheOnionIE from .theplatform import ThePlatformIE from .thesixtyone import TheSixtyOneIE +from .thisamericanlife import ThisAmericanLifeIE from .thisav import ThisAVIE from .tinypic import TinyPicIE from .tlc import TlcIE, TlcDeIE @@ -732,6 +735,7 @@ from .yandexmusic import ( YandexMusicPlaylistIE, ) from .yesjapan import YesJapanIE +from .yinyuetai import YinYueTaiIE from .ynet import YnetIE from .youjizz import YouJizzIE from .youku import YoukuIE diff --git a/youtube_dl/extractor/clipsyndicate.py b/youtube_dl/extractor/clipsyndicate.py index d07d544ea..8306d6fb7 100644 --- a/youtube_dl/extractor/clipsyndicate.py +++ b/youtube_dl/extractor/clipsyndicate.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..utils import ( find_xpath_attr, @@ -10,9 +8,9 @@ from ..utils import ( class ClipsyndicateIE(InfoExtractor): - _VALID_URL = r'http://www\.clipsyndicate\.com/video/play(list/\d+)?/(?P\d+)' + _VALID_URL = r'http://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P\d+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe', 'md5': '4d7d549451bad625e0ff3d7bd56d776c', 'info_dict': { @@ -22,11 +20,13 @@ class ClipsyndicateIE(InfoExtractor): 'duration': 612, 'thumbnail': 're:^https?://.+\.jpg', }, - } + }, { + 'url': 'http://chic.clipsyndicate.com/video/play/5844117/shark_attack', + 'only_matching': True, + }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) js_player = self._download_webpage( 'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id, video_id, 'Downlaoding player') diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 81623bfe3..d859aea52 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -846,7 +846,8 @@ class InfoExtractor(object): def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, entry_protocol='m3u8', preference=None, - m3u8_id=None, note=None, errnote=None): + m3u8_id=None, note=None, errnote=None, + fatal=True): formats = [{ 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), @@ -866,7 +867,10 @@ class InfoExtractor(object): m3u8_doc = self._download_webpage( m3u8_url, video_id, note=note or 'Downloading m3u8 information', - errnote=errnote or 'Failed to download m3u8 information') + errnote=errnote or 'Failed to download m3u8 information', + fatal=fatal) + if m3u8_doc is False: + return m3u8_doc last_info = None last_media = None kv_rex = re.compile( diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 41f0c736d..73f1e22ef 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -27,7 +27,7 @@ from ..aes import ( class CrunchyrollIE(InfoExtractor): - _VALID_URL = r'https?://(?:(?Pwww|m)\.)?(?Pcrunchyroll\.(?:com|fr)/(?:[^/]*/[^/?&]*?|media/\?id=)(?P[0-9]+))(?:[/?&]|$)' + _VALID_URL = r'https?://(?:(?Pwww|m)\.)?(?Pcrunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P[0-9]+))(?:[/?&]|$)' _NETRC_MACHINE = 'crunchyroll' _TESTS = [{ 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', @@ -45,6 +45,22 @@ class CrunchyrollIE(InfoExtractor): # rtmp 'skip_download': True, }, + }, { + 'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1', + 'info_dict': { + 'id': '589804', + 'ext': 'flv', + 'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', + 'description': 'md5:fe2743efedb49d279552926d0bd0cd9e', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'Danny Choo Network', + 'upload_date': '20120213', + }, + 'params': { + # rtmp + 'skip_download': True, + }, + }, { 'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', 'only_matching': True, @@ -251,16 +267,17 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] video_format = fmt + 'p' - streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') - # urlencode doesn't work! - streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format + streamdata_req = compat_urllib_request.Request( + 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' + % (stream_id, stream_format, stream_quality), + compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') - streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) streamdata = self._download_xml( streamdata_req, video_id, note='Downloading media info for %s' % video_format) - video_url = streamdata.find('./host').text - video_play_path = streamdata.find('./file').text + stream_info = streamdata.find('./{default}preload/stream_info') + video_url = stream_info.find('./host').text + video_play_path = stream_info.find('./file').text formats.append({ 'url': video_url, 'play_path': video_play_path, diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 96f0ed9ad..8852f0add 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -254,22 +254,30 @@ class DailymotionUserIE(DailymotionPlaylistIE): class DailymotionCloudIE(DailymotionBaseInfoExtractor): - _VALID_URL = r'http://api\.dmcloud\.net/embed/[^/]+/(?P[^/?]+)' + _VALID_URL_PREFIX = r'http://api\.dmcloud\.net/(?:player/)?embed/' + _VALID_URL = r'%s[^/]+/(?P[^/?]+)' % _VALID_URL_PREFIX + _VALID_EMBED_URL = r'%s[^/]+/[^\'"]+' % _VALID_URL_PREFIX - _TEST = { + _TESTS = [{ # From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html # Tested at FranceTvInfo_2 'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1', 'only_matching': True, - } + }, { + # http://www.francetvinfo.fr/societe/larguez-les-amarres-le-cobaturage-se-developpe_980101.html + 'url': 'http://api.dmcloud.net/player/embed/4e7343f894a6f677b10006b4/559545469473996d31429f06?auth=1467430263-0-90tglw2l-a3a4b64ed41efe48d7fccad85b8b8fda&autoplay=1', + 'only_matching': True, + }] @classmethod def _extract_dmcloud_url(self, webpage): - mobj = re.search(r']+src=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage) + mobj = re.search(r']+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage) if mobj: return mobj.group(1) - mobj = re.search(r']+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage) + mobj = re.search( + r']+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, + webpage) if mobj: return mobj.group(1) diff --git a/youtube_dl/extractor/drtuber.py b/youtube_dl/extractor/drtuber.py index 37c5c181f..639f9182c 100644 --- a/youtube_dl/extractor/drtuber.py +++ b/youtube_dl/extractor/drtuber.py @@ -36,25 +36,24 @@ class DrTuberIE(InfoExtractor): r'([^<]+)', r'([^<]+) - \d+'], + [r'<p[^>]+class="title_substrate">([^<]+)</p>', r'<title>([^<]+) - \d+'], webpage, 'title') thumbnail = self._html_search_regex( r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) - like_count = str_to_int(self._html_search_regex( - r'<span id="rate_likes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>', - webpage, 'like count', fatal=False)) - dislike_count = str_to_int(self._html_search_regex( - r'<span id="rate_dislikes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>', - webpage, 'like count', fatal=False)) - comment_count = str_to_int(self._html_search_regex( - r'<span class="comments_count">([\d,\.]+)</span>', - webpage, 'comment count', fatal=False)) + def extract_count(id_, name): + return str_to_int(self._html_search_regex( + r'<span[^>]+(?:class|id)="%s"[^>]*>([\d,\.]+)</span>' % id_, + webpage, '%s count' % name, fatal=False)) + + like_count = extract_count('rate_likes', 'like') + dislike_count = extract_count('rate_dislikes', 'dislike') + comment_count = extract_count('comments_count', 'comment') cats_str = self._search_regex( - r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False) + r'<div[^>]+class="categories_list">(.+?)</div>', webpage, 'categories', fatal=False) categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str) return { diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 32e41d13e..392ad3648 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -37,6 +37,7 @@ from .rutv import RUTVIE from .tvc import TVCIE from .sportbox import SportBoxEmbedIE from .smotri import SmotriIE +from .myvi import MyviIE from .condenast import CondeNastIE from .udn import UDNEmbedIE from .senateisvp import SenateISVPIE @@ -338,6 +339,17 @@ class GenericIE(InfoExtractor): 'skip_download': True, }, }, + # Myvi.ru embed + { + 'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1', + 'info_dict': { + 'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e', + 'ext': 'mp4', + 'title': 'Ужастики, русский трейлер (2015)', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 153, + } + }, # XHamster embed { 'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8', @@ -669,6 +681,18 @@ class GenericIE(InfoExtractor): 'title': 'John Carlson Postgame 2/25/15', }, }, + # Kaltura embed (different embed code) + { + 'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014', + 'info_dict': { + 'id': '1_a52wc67y', + 'ext': 'flv', + 'upload_date': '20150127', + 'uploader_id': 'PremierMedia', + 'timestamp': int, + 'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014', + }, + }, # Eagle.Platform embed (generic URL) { 'url': 'http://lenta.ru/news/2015/03/06/navalny/', @@ -1413,6 +1437,11 @@ class GenericIE(InfoExtractor): if smotri_url: return self.url_result(smotri_url, 'Smotri') + # Look for embedded Myvi.ru player + myvi_url = MyviIE._extract_url(webpage) + if myvi_url: + return self.url_result(myvi_url) + # Look for embeded soundcloud player mobj = re.search( r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"', @@ -1492,8 +1521,8 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url'), 'Zapiks') # Look for Kaltura embeds - mobj = re.search( - r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) + mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) or + re.search(r'(?s)(["\'])(?:https?:)?//cdnapisec\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?\1.*?entry_id\s*:\s*(["\'])(?P<id>[^\2]+?)\2', webpage)) if mobj is not None: return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura') diff --git a/youtube_dl/extractor/gfycat.py b/youtube_dl/extractor/gfycat.py index 397f1d42e..884700c52 100644 --- a/youtube_dl/extractor/gfycat.py +++ b/youtube_dl/extractor/gfycat.py @@ -6,12 +6,13 @@ from ..utils import ( int_or_none, float_or_none, qualities, + ExtractorError, ) class GfycatIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?P<id>[^/?#]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?gfycat\.com/(?:ifr/)?(?P<id>[^/?#]+)' + _TESTS = [{ 'url': 'http://gfycat.com/DeadlyDecisiveGermanpinscher', 'info_dict': { 'id': 'DeadlyDecisiveGermanpinscher', @@ -27,14 +28,33 @@ class GfycatIE(InfoExtractor): 'categories': list, 'age_limit': 0, } - } + }, { + 'url': 'http://gfycat.com/ifr/JauntyTimelyAmazontreeboa', + 'info_dict': { + 'id': 'JauntyTimelyAmazontreeboa', + 'ext': 'mp4', + 'title': 'JauntyTimelyAmazontreeboa', + 'timestamp': 1411720126, + 'upload_date': '20140926', + 'uploader': 'anonymous', + 'duration': 3.52, + 'view_count': int, + 'like_count': int, + 'dislike_count': int, + 'categories': list, + 'age_limit': 0, + } + }] def _real_extract(self, url): video_id = self._match_id(url) gfy = self._download_json( 'http://gfycat.com/cajax/get/%s' % video_id, - video_id, 'Downloading video info')['gfyItem'] + video_id, 'Downloading video info') + if 'error' in gfy: + raise ExtractorError('Gfycat said: ' + gfy['error'], expected=True) + gfy = gfy['gfyItem'] title = gfy.get('title') or gfy['gfyName'] description = gfy.get('description') diff --git a/youtube_dl/extractor/hentaistigma.py b/youtube_dl/extractor/hentaistigma.py index 63d87b74c..f5aa73d18 100644 --- a/youtube_dl/extractor/hentaistigma.py +++ b/youtube_dl/extractor/hentaistigma.py @@ -1,7 +1,5 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor @@ -19,20 +17,19 @@ class HentaiStigmaIE(InfoExtractor): } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( - r'<h2 class="posttitle"><a[^>]*>([^<]+)</a>', + r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>', webpage, 'title') wrap_url = self._html_search_regex( - r'<iframe src="([^"]+mp4)"', webpage, 'wrapper url') + r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url') wrap_webpage = self._download_webpage(wrap_url, video_id) video_url = self._html_search_regex( - r'clip:\s*{\s*url: "([^"]*)"', wrap_webpage, 'video url') + r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url') return { 'id': video_id, diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 3f7d6666c..16677f179 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..utils import parse_iso8601 class HowcastIE(InfoExtractor): @@ -13,29 +12,31 @@ class HowcastIE(InfoExtractor): 'info_dict': { 'id': '390161', 'ext': 'mp4', - 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.', 'title': 'How to Tie a Square Knot Properly', - } + 'description': 'md5:dbe792e5f6f1489027027bf2eba188a3', + 'timestamp': 1276081287, + 'upload_date': '20100609', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) + video_id = self._match_id(url) - video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - self.report_extraction(video_id) - - video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)', - webpage, 'video URL') - - video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', - webpage, 'description', fatal=False) + embed_code = self._search_regex( + r'<iframe[^>]+src="[^"]+\bembed_code=([^\b]+)\b', + webpage, 'ooyala embed code') return { + '_type': 'url_transparent', + 'ie_key': 'Ooyala', + 'url': 'ooyala:%s' % embed_code, 'id': video_id, - 'url': video_url, - 'title': self._og_search_title(webpage), - 'description': video_description, - 'thumbnail': self._og_search_thumbnail(webpage), + 'timestamp': parse_iso8601(self._html_search_meta( + 'article:published_time', webpage, 'timestamp')), } diff --git a/youtube_dl/extractor/ina.py b/youtube_dl/extractor/ina.py index 0847074ee..65712abc2 100644 --- a/youtube_dl/extractor/ina.py +++ b/youtube_dl/extractor/ina.py @@ -7,7 +7,7 @@ from .common import InfoExtractor class InaIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)' + _VALID_URL = r'https?://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)' _TEST = { 'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html', 'md5': 'a667021bf2b41f8dc6049479d9bb38a3', diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py index 117a7faf6..91a1b3ccb 100644 --- a/youtube_dl/extractor/infoq.py +++ b/youtube_dl/extractor/infoq.py @@ -5,6 +5,7 @@ import base64 from .common import InfoExtractor from ..compat import ( compat_urllib_parse, + compat_urlparse, ) @@ -45,7 +46,7 @@ class InfoQIE(InfoExtractor): video_id, extension = video_filename.split('.') http_base = self._search_regex( - r'EXPRESSINSTALL_SWF\s*=\s*"(https?://[^/"]+/)', webpage, + r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage, 'HTTP base URL') formats = [{ @@ -55,7 +56,7 @@ class InfoQIE(InfoExtractor): 'play_path': playpath, }, { 'format_id': 'http', - 'url': http_base + real_id, + 'url': compat_urlparse.urljoin(url, http_base) + real_id, }] self._sort_formats(formats) diff --git a/youtube_dl/extractor/myvi.py b/youtube_dl/extractor/myvi.py new file mode 100644 index 000000000..4c65be122 --- /dev/null +++ b/youtube_dl/extractor/myvi.py @@ -0,0 +1,60 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .vimple import SprutoBaseIE + + +class MyviIE(SprutoBaseIE): + _VALID_URL = r'''(?x) + https?:// + myvi\.(?:ru/player|tv)/ + (?: + (?: + embed/html| + flash| + api/Video/Get + )/| + content/preloader\.swf\?.*\bid= + ) + (?P<id>[\da-zA-Z_-]+) + ''' + _TESTS = [{ + 'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', + 'md5': '571bbdfba9f9ed229dc6d34cc0f335bf', + 'info_dict': { + 'id': 'f16b2bbd-cde8-481c-a981-7cd48605df43', + 'ext': 'mp4', + 'title': 'хозяин жизни', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 25, + }, + }, { + 'url': 'http://myvi.ru/player/content/preloader.swf?id=oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wOYf1WFpPfc_bWTKGVf_Zafr0', + 'only_matching': True, + }, { + 'url': 'http://myvi.ru/player/api/Video/Get/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0', + 'only_matching': True, + }, { + 'url': 'http://myvi.tv/embed/html/oTGTNWdyz4Zwy_u1nraolwZ1odenTd9WkTnRfIL9y8VOgHYqOHApE575x4_xxS9Vn0?ap=0', + 'only_matching': True, + }, { + 'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30', + 'only_matching': True, + }] + + @classmethod + def _extract_url(cls, webpage): + mobj = re.search( + r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//myvi\.(?:ru/player|tv)/(?:embed/html|flash)/[^"]+)\1', webpage) + if mobj: + return mobj.group('url') + + def _real_extract(self, url): + video_id = self._match_id(url) + + spruto = self._download_json( + 'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData'] + + return self._extract_spruto(spruto, video_id) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 5d8448571..62d12b7a6 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -16,8 +16,24 @@ class NPOBaseIE(InfoExtractor): token_page = self._download_webpage( 'http://ida.omroep.nl/npoplayer/i.js', video_id, note='Downloading token') - return self._search_regex( + token = self._search_regex( r'npoplayer\.token = "(.+?)"', token_page, 'token') + # Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js + token_l = list(token) + first = second = None + for i in range(5, len(token_l) - 4): + if token_l[i].isdigit(): + if first is None: + first = i + elif second is None: + second = i + if first is None or second is None: + first = 12 + second = 13 + + token_l[first], token_l[second] = token_l[second], token_l[first] + + return ''.join(token_l) class NPOIE(NPOBaseIE): @@ -92,7 +108,7 @@ class NPOIE(NPOBaseIE): def _get_info(self, video_id): metadata = self._download_json( - 'http://e.omroep.nl/metadata/aflevering/%s' % video_id, + 'http://e.omroep.nl/metadata/%s' % video_id, video_id, # We have to remove the javascript callback transform_source=strip_jsonp, diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index cc70c2950..9e4581cf9 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -13,7 +13,7 @@ from ..utils import ( class NRKIE(InfoExtractor): - _VALID_URL = r'(?:nrk:|http://(?:www\.)?nrk\.no/video/PS\*)(?P<id>\d+)' + _VALID_URL = r'(?:nrk:|https?://(?:www\.)?nrk\.no/video/PS\*)(?P<id>\d+)' _TESTS = [ { @@ -76,7 +76,7 @@ class NRKIE(InfoExtractor): class NRKPlaylistIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?nrk\.no/(?!video)(?:[^/]+/)+(?P<id>[^/]+)' + _VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video)(?:[^/]+/)+(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763', @@ -116,11 +116,11 @@ class NRKPlaylistIE(InfoExtractor): class NRKTVIE(InfoExtractor): - _VALID_URL = r'(?P<baseurl>http://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?' + _VALID_URL = r'(?P<baseurl>https?://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?' _TESTS = [ { - 'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014', + 'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014', 'md5': 'adf2c5454fa2bf032f47a9f8fb351342', 'info_dict': { 'id': 'MUHH48000314', @@ -132,7 +132,7 @@ class NRKTVIE(InfoExtractor): }, }, { - 'url': 'http://tv.nrk.no/program/mdfp15000514', + 'url': 'https://tv.nrk.no/program/mdfp15000514', 'md5': '383650ece2b25ecec996ad7b5bb2a384', 'info_dict': { 'id': 'mdfp15000514', @@ -145,7 +145,7 @@ class NRKTVIE(InfoExtractor): }, { # single playlist video - 'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2', + 'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2', 'md5': 'adbd1dbd813edaf532b0a253780719c2', 'info_dict': { 'id': 'MSPO40010515-part2', @@ -157,7 +157,7 @@ class NRKTVIE(InfoExtractor): 'skip': 'Only works from Norway', }, { - 'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015', + 'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015', 'playlist': [ { 'md5': '9480285eff92d64f06e02a5367970a7a', diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py index 143a76696..fec5d65ad 100644 --- a/youtube_dl/extractor/pbs.py +++ b/youtube_dl/extractor/pbs.py @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import unicode_literals import re @@ -35,6 +36,9 @@ class PBSIE(InfoExtractor): 'description': 'md5:ba0c207295339c8d6eced00b7c363c6a', 'duration': 3190, }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, }, { 'url': 'http://www.pbs.org/wgbh/pages/frontline/losing-iraq/', @@ -46,6 +50,9 @@ class PBSIE(InfoExtractor): 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e', 'duration': 5050, }, + 'params': { + 'skip_download': True, # requires ffmpeg + } }, { 'url': 'http://www.pbs.org/newshour/bb/education-jan-june12-cyberschools_02-23/', @@ -68,7 +75,10 @@ class PBSIE(InfoExtractor): 'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full', 'duration': 6559, 'thumbnail': 're:^https?://.*\.jpg$', - } + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, }, { 'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html', @@ -82,7 +92,10 @@ class PBSIE(InfoExtractor): 'duration': 3172, 'thumbnail': 're:^https?://.*\.jpg$', 'upload_date': '20140122', - } + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, }, { 'url': 'http://www.pbs.org/wgbh/pages/frontline/united-states-of-secrets/', @@ -90,6 +103,21 @@ class PBSIE(InfoExtractor): 'id': 'united-states-of-secrets', }, 'playlist_count': 2, + }, + { + 'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/', + 'info_dict': { + 'id': '2280706814', + 'display_id': 'player', + 'ext': 'mp4', + 'title': 'Death and the Civil War', + 'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.', + 'duration': 6705, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, } ] @@ -123,7 +151,7 @@ class PBSIE(InfoExtractor): return media_id, presumptive_id, upload_date url = self._search_regex( - r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>', + r'<iframe\s+[^>]*\s+src=["\']([^\'"]+partnerplayer[^\'"]+)["\']', webpage, 'player URL') mobj = re.match(self._VALID_URL, url) @@ -196,6 +224,14 @@ class PBSIE(InfoExtractor): rating_str = rating_str.rpartition('-')[2] age_limit = US_RATINGS.get(rating_str) + subtitles = {} + closed_captions_url = info.get('closed_captions_url') + if closed_captions_url: + subtitles['en'] = [{ + 'ext': 'ttml', + 'url': closed_captions_url, + }] + return { 'id': video_id, 'display_id': display_id, @@ -206,4 +242,5 @@ class PBSIE(InfoExtractor): 'age_limit': age_limit, 'upload_date': upload_date, 'formats': formats, + 'subtitles': subtitles, } diff --git a/youtube_dl/extractor/qqmusic.py b/youtube_dl/extractor/qqmusic.py index bafa81c21..476432330 100644 --- a/youtube_dl/extractor/qqmusic.py +++ b/youtube_dl/extractor/qqmusic.py @@ -9,6 +9,7 @@ from .common import InfoExtractor from ..utils import ( strip_jsonp, unescapeHTML, + clean_html, ) from ..compat import compat_urllib_request @@ -26,6 +27,20 @@ class QQMusicIE(InfoExtractor): 'upload_date': '20141227', 'creator': '林俊杰', 'description': 'md5:d327722d0361576fde558f1ac68a7065', + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'note': 'There is no mp3-320 version of this song.', + 'url': 'http://y.qq.com/#type=song&mid=004MsGEo3DdNxV', + 'md5': 'fa3926f0c585cda0af8fa4f796482e3e', + 'info_dict': { + 'id': '004MsGEo3DdNxV', + 'ext': 'mp3', + 'title': '如果', + 'upload_date': '20050626', + 'creator': '李季美', + 'description': 'md5:46857d5ed62bc4ba84607a805dccf437', + 'thumbnail': 're:^https?://.*\.jpg$', } }] @@ -68,6 +83,14 @@ class QQMusicIE(InfoExtractor): if lrc_content: lrc_content = lrc_content.replace('\\n', '\n') + thumbnail_url = None + albummid = self._search_regex( + [r'albummid:\'([0-9a-zA-Z]+)\'', r'"albummid":"([0-9a-zA-Z]+)"'], + detail_info_page, 'album mid', default=None) + if albummid: + thumbnail_url = "http://i.gtimg.cn/music/photo/mid_album_500/%s/%s/%s.jpg" \ + % (albummid[-2:-1], albummid[-1], albummid) + guid = self.m_r_get_ruin() vkey = self._download_json( @@ -85,6 +108,7 @@ class QQMusicIE(InfoExtractor): 'preference': details['preference'], 'abr': details.get('abr'), }) + self._check_formats(formats, mid) self._sort_formats(formats) return { @@ -94,6 +118,7 @@ class QQMusicIE(InfoExtractor): 'upload_date': publish_time, 'creator': singer, 'description': lrc_content, + 'thumbnail': thumbnail_url, } @@ -163,31 +188,40 @@ class QQMusicAlbumIE(QQPlaylistBaseIE): IE_NAME = 'qqmusic:album' _VALID_URL = r'http://y.qq.com/#type=album&mid=(?P<id>[0-9A-Za-z]+)' - _TEST = { - 'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1&play=0', + _TESTS = [{ + 'url': 'http://y.qq.com/#type=album&mid=000gXCTb2AhRR1', 'info_dict': { 'id': '000gXCTb2AhRR1', 'title': '我们都是这样长大的', - 'description': 'md5:d216c55a2d4b3537fe4415b8767d74d6', + 'description': 'md5:179c5dce203a5931970d306aa9607ea6', }, 'playlist_count': 4, - } + }, { + 'url': 'http://y.qq.com/#type=album&mid=002Y5a3b3AlCu3', + 'info_dict': { + 'id': '002Y5a3b3AlCu3', + 'title': '그리고...', + 'description': 'md5:a48823755615508a95080e81b51ba729', + }, + 'playlist_count': 8, + }] def _real_extract(self, url): mid = self._match_id(url) - album_page = self._download_webpage( - self.qq_static_url('album', mid), mid, 'Download album page') + album = self._download_json( + 'http://i.y.qq.com/v8/fcg-bin/fcg_v8_album_info_cp.fcg?albummid=%s&format=json' % mid, + mid, 'Download album page')['data'] - entries = self.get_entries_from_page(album_page) - - album_name = self._html_search_regex( - r"albumname\s*:\s*'([^']+)',", album_page, 'album name', - default=None) - - album_detail = self._html_search_regex( - r'<div class="album_detail close_detail">\s*<p>((?:[^<>]+(?:<br />)?)+)</p>', - album_page, 'album details', default=None) + entries = [ + self.url_result( + 'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid'] + ) for song in album['list'] + ] + album_name = album.get('name') + album_detail = album.get('desc') + if album_detail is not None: + album_detail = album_detail.strip() return self.playlist_result(entries, mid, album_name, album_detail) @@ -243,3 +277,36 @@ class QQMusicToplistIE(QQPlaylistBaseIE): list_name = topinfo.get('ListName') list_description = topinfo.get('info') return self.playlist_result(entries, list_id, list_name, list_description) + + +class QQMusicPlaylistIE(QQPlaylistBaseIE): + IE_NAME = 'qqmusic:playlist' + _VALID_URL = r'http://y\.qq\.com/#type=taoge&id=(?P<id>[0-9]+)' + + _TEST = { + 'url': 'http://y.qq.com/#type=taoge&id=3462654915', + 'info_dict': { + 'id': '3462654915', + 'title': '韩国5月新歌精选下旬', + 'description': 'md5:d2c9d758a96b9888cf4fe82f603121d4', + }, + 'playlist_count': 40, + } + + def _real_extract(self, url): + list_id = self._match_id(url) + + list_json = self._download_json( + 'http://i.y.qq.com/qzone-music/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0&disstid=%s' + % list_id, list_id, 'Download list page', + transform_source=strip_jsonp)['cdlist'][0] + + entries = [ + self.url_result( + 'http://y.qq.com/#type=song&mid=' + song['songmid'], 'QQMusic', song['songmid'] + ) for song in list_json['songlist'] + ] + + list_name = list_json.get('dissname') + list_description = clean_html(unescapeHTML(list_json.get('desc'))) + return self.playlist_result(entries, list_id, list_name, list_description) diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py index 41d202c28..a4d3d73ff 100644 --- a/youtube_dl/extractor/rtlnl.py +++ b/youtube_dl/extractor/rtlnl.py @@ -43,6 +43,10 @@ class RtlNlIE(InfoExtractor): 'upload_date': '20150215', 'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.', } + }, { + # encrypted m3u8 streams, georestricted + 'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7', + 'only_matching': True, }, { 'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0', 'only_matching': True, @@ -51,7 +55,7 @@ class RtlNlIE(InfoExtractor): def _real_extract(self, url): uuid = self._match_id(url) info = self._download_json( - 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=flash/' % uuid, + 'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid, uuid) material = info['material'][0] @@ -59,9 +63,14 @@ class RtlNlIE(InfoExtractor): subtitle = material['title'] or info['episodes'][0]['name'] description = material.get('synopsis') or info['episodes'][0]['synopsis'] + meta = info.get('meta', {}) + # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118) - videopath = material['videopath'].replace('.f4m', '.m3u8') - m3u8_url = 'http://manifest.us.rtl.nl' + videopath + # NB: nowadays, recent ffmpeg and avconv can handle these encrypted streams, so + # this adaptive -> flash workaround is not required in general, but it also + # allows bypassing georestriction therefore is retained for now. + videopath = material['videopath'].replace('/adaptive/', '/flash/') + m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4') @@ -82,7 +91,7 @@ class RtlNlIE(InfoExtractor): self._sort_formats(formats) thumbnails = [] - meta = info.get('meta', {}) + for p in ('poster_base_url', '"thumb_base_url"'): if not meta.get(p): continue diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py index 08a5c4314..27f4033c5 100644 --- a/youtube_dl/extractor/spiegeltv.py +++ b/youtube_dl/extractor/spiegeltv.py @@ -77,11 +77,13 @@ class SpiegeltvIE(InfoExtractor): 'rtmp_live': True, }) elif determine_ext(endpoint) == 'm3u8': - formats.extend(self._extract_m3u8_formats( + m3u8_formats = self._extract_m3u8_formats( endpoint.replace('[video]', play_path), video_id, 'm4v', preference=1, # Prefer hls since it allows to workaround georestriction - m3u8_id='hls')) + m3u8_id='hls', fatal=False) + if m3u8_formats is not False: + formats.extend(m3u8_formats) else: formats.append({ 'url': endpoint, diff --git a/youtube_dl/extractor/thisamericanlife.py b/youtube_dl/extractor/thisamericanlife.py new file mode 100644 index 000000000..36493a5de --- /dev/null +++ b/youtube_dl/extractor/thisamericanlife.py @@ -0,0 +1,40 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class ThisAmericanLifeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)' + _TESTS = [{ + 'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one', + 'md5': '8f7d2da8926298fdfca2ee37764c11ce', + 'info_dict': { + 'id': '487', + 'ext': 'm4a', + 'title': '487: Harper High School, Part One', + 'description': 'md5:ee40bdf3fb96174a9027f76dbecea655', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, { + 'url': 'http://www.thisamericanlife.org/play_full.php?play=487', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id) + + return { + 'id': video_id, + 'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id), + 'protocol': 'm3u8_native', + 'ext': 'm4a', + 'acodec': 'aac', + 'vcodec': 'none', + 'abr': 64, + 'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True), + 'description': self._html_search_meta(r'description', webpage, 'description'), + 'thumbnail': self._og_search_thumbnail(webpage), + } diff --git a/youtube_dl/extractor/vimple.py b/youtube_dl/extractor/vimple.py index aa3d6ddfd..92321d66e 100644 --- a/youtube_dl/extractor/vimple.py +++ b/youtube_dl/extractor/vimple.py @@ -4,7 +4,29 @@ from .common import InfoExtractor from ..utils import int_or_none -class VimpleIE(InfoExtractor): +class SprutoBaseIE(InfoExtractor): + def _extract_spruto(self, spruto, video_id): + playlist = spruto['playlist'][0] + title = playlist['title'] + video_id = playlist.get('videoId') or video_id + thumbnail = playlist.get('posterUrl') or playlist.get('thumbnailUrl') + duration = int_or_none(playlist.get('duration')) + + formats = [{ + 'url': f['url'], + } for f in playlist['video']] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + } + + +class VimpleIE(SprutoBaseIE): IE_DESC = 'Vimple - one-click video hosting' _VALID_URL = r'https?://(?:player\.vimple\.ru/iframe|vimple\.ru)/(?P<id>[\da-f-]{32,36})' _TESTS = [ @@ -30,25 +52,9 @@ class VimpleIE(InfoExtractor): webpage = self._download_webpage( 'http://player.vimple.ru/iframe/%s' % video_id, video_id) - playlist = self._parse_json( + spruto = self._parse_json( self._search_regex( r'sprutoData\s*:\s*({.+?}),\r\n', webpage, 'spruto data'), - video_id)['playlist'][0] + video_id) - title = playlist['title'] - video_id = playlist.get('videoId') or video_id - thumbnail = playlist.get('posterUrl') or playlist.get('thumbnailUrl') - duration = int_or_none(playlist.get('duration')) - - formats = [{ - 'url': f['url'], - } for f in playlist['video']] - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'thumbnail': thumbnail, - 'duration': duration, - 'formats': formats, - } + return self._extract_spruto(spruto, video_id) diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index f2ae109f9..23d153031 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -21,7 +21,17 @@ from ..utils import ( class VKIE(InfoExtractor): IE_NAME = 'vk.com' - _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>[^s].*?)(?:\?|%2F|$))' + _VALID_URL = r'''(?x) + https?:// + (?: + (?:m\.)?vk\.com/video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)| + (?: + (?:m\.)?vk\.com/(?:.+?\?.*?z=)?video| + (?:www\.)?biqle\.ru/watch/ + ) + (?P<videoid>[^s].*?)(?:\?|%2F|$) + ) + ''' _NETRC_MACHINE = 'vk' _TESTS = [ @@ -109,11 +119,31 @@ class VKIE(InfoExtractor): }, 'skip': 'Only works from Russia', }, + { + # youtube embed + 'url': 'https://vk.com/video276849682_170681728', + 'info_dict': { + 'id': 'V3K4mi0SYkc', + 'ext': 'mp4', + 'title': "DSWD Awards 'Children's Joy Foundation, Inc.' Certificate of Registration and License to Operate", + 'description': 'md5:bf9c26cfa4acdfb146362682edd3827a', + 'duration': 179, + 'upload_date': '20130116', + 'uploader': "Children's Joy Foundation", + 'uploader_id': 'thecjf', + 'view_count': int, + }, + }, { # removed video, just testing that we match the pattern 'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a', 'only_matching': True, }, + { + # vk wrapper + 'url': 'http://www.biqle.ru/watch/847655_160197695', + 'only_matching': True, + } ] def _login(self): @@ -153,9 +183,14 @@ class VKIE(InfoExtractor): if not video_id: video_id = '%s_%s' % (mobj.group('oid'), mobj.group('id')) - info_url = 'http://vk.com/al_video.php?act=show&al=1&module=video&video=%s' % video_id + info_url = 'https://vk.com/al_video.php?act=show&al=1&module=video&video=%s' % video_id info_page = self._download_webpage(info_url, video_id) + if re.search(r'<!>/login\.php\?.*\bact=security_check', info_page): + raise ExtractorError( + 'You are trying to log in from an unusual location. You should confirm ownership at vk.com to log in with this IP.', + expected=True) + ERRORS = { r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<': 'Video %s has been removed from public access due to rightholder complaint.', @@ -175,10 +210,11 @@ class VKIE(InfoExtractor): if re.search(error_re, info_page): raise ExtractorError(error_msg % video_id, expected=True) - m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page) - if m_yt is not None: - self.to_screen('Youtube video detected') - return self.url_result(m_yt.group(1), 'Youtube') + youtube_url = self._search_regex( + r'<iframe[^>]+src="((?:https?:)?//www.youtube.com/embed/[^"]+)"', + info_page, 'youtube iframe', default=None) + if youtube_url: + return self.url_result(youtube_url, 'Youtube') m_rutube = re.search( r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page) diff --git a/youtube_dl/extractor/yinyuetai.py b/youtube_dl/extractor/yinyuetai.py new file mode 100644 index 000000000..fa6b40816 --- /dev/null +++ b/youtube_dl/extractor/yinyuetai.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class YinYueTaiIE(InfoExtractor): + IE_NAME = 'yinyuetai:video' + _VALID_URL = r'https?://v\.yinyuetai\.com/video(?:/h5)?/(?P<id>[0-9]+)' + _TESTS = [{ + 'url': 'http://v.yinyuetai.com/video/2322376', + 'md5': '6e3abe28d38e3a54b591f9f040595ce0', + 'info_dict': { + 'id': '2322376', + 'ext': 'mp4', + 'title': '少女时代_PARTY_Music Video Teaser', + 'creator': '少女时代', + 'duration': 25, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + }, { + 'url': 'http://v.yinyuetai.com/video/h5/2322376', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + info = self._download_json( + 'http://ext.yinyuetai.com/main/get-h-mv-info?json=true&videoId=%s' % video_id, video_id, + 'Downloading mv info')['videoInfo']['coreVideoInfo'] + + if info['error']: + raise ExtractorError(info['errorMsg'], expected=True) + + formats = [{ + 'url': format_info['videoUrl'], + 'format_id': format_info['qualityLevel'], + 'format': format_info.get('qualityLevelName'), + 'filesize': format_info.get('fileSize'), + # though URLs ends with .flv, the downloaded files are in fact mp4 + 'ext': 'mp4', + 'tbr': format_info.get('bitrate'), + } for format_info in info['videoUrlModels']] + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': info['videoName'], + 'thumbnail': info.get('bigHeadImage'), + 'creator': info.get('artistNames'), + 'duration': info.get('duration'), + 'formats': formats, + } diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 8b43e274b..3c629d38a 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -520,6 +520,20 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'skip_download': 'requires avconv', } }, + # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097) + { + 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y', + 'info_dict': { + 'id': 'FIl7x6_3R5Y', + 'ext': 'mp4', + 'title': 'md5:7b81415841e02ecd4313668cde88737a', + 'description': 'md5:116377fd2963b81ec4ce64b542173306', + 'upload_date': '20150625', + 'uploader_id': 'dorappi2000', + 'uploader': 'dorappi2000', + 'formats': 'mincount:33', + }, + } ] def __init__(self, *args, **kwargs): @@ -784,7 +798,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.') def _parse_dash_manifest( - self, video_id, dash_manifest_url, player_url, age_gate): + self, video_id, dash_manifest_url, player_url, age_gate, fatal=True): def decrypt_sig(mobj): s = mobj.group(1) dec_s = self._decrypt_signature(s, video_id, player_url, age_gate) @@ -793,7 +807,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): dash_doc = self._download_xml( dash_manifest_url, video_id, note='Downloading DASH manifest', - errnote='Could not download DASH manifest') + errnote='Could not download DASH manifest', + fatal=fatal) + + if dash_doc is False: + return [] formats = [] for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'): @@ -826,6 +844,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): except StopIteration: full_info = self._formats.get(format_id, {}).copy() full_info.update(f) + codecs = r.attrib.get('codecs') + if codecs: + if full_info.get('acodec') == 'none' and 'vcodec' not in full_info: + full_info['vcodec'] = codecs + elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info: + full_info['acodec'] = codecs formats.append(full_info) else: existing_format.update(f) @@ -855,6 +879,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): else: player_url = None + dash_mpds = [] + + def add_dash_mpd(video_info): + dash_mpd = video_info.get('dashmpd') + if dash_mpd and dash_mpd[0] not in dash_mpds: + dash_mpds.append(dash_mpd[0]) + # Get video info embed_webpage = None if re.search(r'player-age-gate-content">', video_webpage) is not None: @@ -875,24 +906,29 @@ class YoutubeIE(YoutubeBaseInfoExtractor): note='Refetching age-gated info webpage', errnote='unable to download video info webpage') video_info = compat_parse_qs(video_info_webpage) + add_dash_mpd(video_info) else: age_gate = False - try: - # Try looking directly into the video webpage - mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage) - if not mobj: - raise ValueError('Could not find ytplayer.config') # caught below + video_info = None + # Try looking directly into the video webpage + mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage) + if mobj: json_code = uppercase_escape(mobj.group(1)) ytplayer_config = json.loads(json_code) args = ytplayer_config['args'] - # Convert to the same format returned by compat_parse_qs - video_info = dict((k, [v]) for k, v in args.items()) - if not args.get('url_encoded_fmt_stream_map'): - raise ValueError('No stream_map present') # caught below - except ValueError: - # We fallback to the get_video_info pages (used by the embed page) + if args.get('url_encoded_fmt_stream_map'): + # Convert to the same format returned by compat_parse_qs + video_info = dict((k, [v]) for k, v in args.items()) + add_dash_mpd(video_info) + if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True): + # We also try looking in get_video_info since it may contain different dashmpd + # URL that points to a DASH manifest with possibly different itag set (some itags + # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH + # manifest pointed by get_video_info's dashmpd). + # The general idea is to take a union of itags of both DASH manifests (for example + # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093) self.report_video_info_webpage_download(video_id) - for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: + for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']: video_info_url = ( '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' % (proto, video_id, el_type)) @@ -900,8 +936,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): video_info_url, video_id, note=False, errnote='unable to download video info webpage') - video_info = compat_parse_qs(video_info_webpage) - if 'token' in video_info: + get_video_info = compat_parse_qs(video_info_webpage) + add_dash_mpd(get_video_info) + if not video_info: + video_info = get_video_info + if 'token' in get_video_info: break if 'token' not in video_info: if 'reason' in video_info: @@ -964,15 +1003,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor): video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) # upload date - upload_date = None - mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage) - if mobj is None: - mobj = re.search( - r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>', - video_webpage) - if mobj is not None: - upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) - upload_date = unified_strdate(upload_date) + upload_date = self._html_search_meta( + 'datePublished', video_webpage, 'upload date', default=None) + if not upload_date: + upload_date = self._search_regex( + [r'(?s)id="eow-date.*?>(.*?)</span>', + r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'], + video_webpage, 'upload date', default=None) + if upload_date: + upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) + upload_date = unified_strdate(upload_date) m_cat_container = self._search_regex( r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', @@ -1125,24 +1165,32 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # Look for the DASH manifest if self._downloader.params.get('youtube_include_dash_manifest', True): - dash_mpd = video_info.get('dashmpd') - if dash_mpd: - dash_manifest_url = dash_mpd[0] + dash_mpd_fatal = True + for dash_manifest_url in dash_mpds: + dash_formats = {} try: - dash_formats = self._parse_dash_manifest( - video_id, dash_manifest_url, player_url, age_gate) + for df in self._parse_dash_manifest( + video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal): + # Do not overwrite DASH format found in some previous DASH manifest + if df['format_id'] not in dash_formats: + dash_formats[df['format_id']] = df + # Additional DASH manifests may end up in HTTP Error 403 therefore + # allow them to fail without bug report message if we already have + # some DASH manifest succeeded. This is temporary workaround to reduce + # burst of bug reports until we figure out the reason and whether it + # can be fixed at all. + dash_mpd_fatal = False except (ExtractorError, KeyError) as e: self.report_warning( 'Skipping DASH manifest: %r' % e, video_id) - else: + if dash_formats: # Remove the formats we found through non-DASH, they # contain less info and it can be wrong, because we use # fixed values (for example the resolution). See # https://github.com/rg3/youtube-dl/issues/5774 for an # example. - dash_keys = set(df['format_id'] for df in dash_formats) - formats = [f for f in formats if f['format_id'] not in dash_keys] - formats.extend(dash_formats) + formats = [f for f in formats if f['format_id'] not in dash_formats.keys()] + formats.extend(dash_formats.values()) # Check for malformed aspect ratio stretched_m = re.search( diff --git a/youtube_dl/options.py b/youtube_dl/options.py index e7d067642..4762e1e3c 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -346,7 +346,7 @@ def parseOpts(overrideArguments=None): video_format.add_option( '--youtube-skip-dash-manifest', action='store_false', dest='youtube_include_dash_manifest', - help='Do not download the DASH manifest on YouTube videos') + help='Do not download the DASH manifests and related data on YouTube videos') video_format.add_option( '--merge-output-format', action='store', dest='merge_output_format', metavar='FORMAT', default=None, diff --git a/youtube_dl/version.py b/youtube_dl/version.py index a225e03a1..3364647ed 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2015.06.25' +__version__ = '2015.07.07'