From b76f0e58f750fd420ac3078c7183b2de66da562c Mon Sep 17 00:00:00 2001 From: The Hatsune Daishi Date: Sat, 29 Feb 2020 18:33:09 +0900 Subject: [PATCH 01/12] [options] Remove duplicate short option -v for --version (#24162) --- youtube_dl/options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 1ffabc62b..8826b382c 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -134,7 +134,7 @@ def parseOpts(overrideArguments=None): action='help', help='Print this help text and exit') general.add_option( - '-v', '--version', + '--version', action='version', help='Print program version and exit') general.add_option( From e2f8bf5888274b95513b430e0f20261120699b4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 17:29:30 +0700 Subject: [PATCH 02/12] [extractor/common] Convert ISM manifest to unicode before processing on python 2 (#24152) --- youtube_dl/extractor/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index eaae5e484..ab7d473d0 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2341,7 +2341,7 @@ class InfoExtractor(object): return [] ism_doc, urlh = res - return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id) + return self._parse_ism_formats(ism_doc, compat_str(urlh.geturl()), ism_id) def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None): """ From fca6dba8b80286ae6d3ca0a60c4799c220a52650 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 19:08:44 +0700 Subject: [PATCH 03/12] [YoutubeDL] Force redirect URL to unicode on python 2 --- youtube_dl/YoutubeDL.py | 4 +++- youtube_dl/utils.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index b09cb0a79..19370f62b 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -92,6 +92,7 @@ from .utils import ( YoutubeDLCookieJar, YoutubeDLCookieProcessor, YoutubeDLHandler, + YoutubeDLRedirectHandler, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER @@ -2343,6 +2344,7 @@ class YoutubeDL(object): debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) + redirect_handler = YoutubeDLRedirectHandler() data_handler = compat_urllib_request_DataHandler() # When passing our own FileHandler instance, build_opener won't add the @@ -2356,7 +2358,7 @@ class YoutubeDL(object): file_handler.file_open = file_open opener = compat_urllib_request.build_opener( - proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler) + proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index f6204692a..8ccf25489 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -2795,6 +2795,15 @@ class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): https_response = http_response +class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): + if sys.version_info[0] < 3: + def redirect_request(self, req, fp, code, msg, headers, newurl): + # On python 2 urlh.geturl() may sometimes return redirect URL + # as byte string instead of unicode. This workaround allows + # to force it always return unicode. + return compat_urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, compat_str(newurl)) + + def extract_timezone(date_str): m = re.search( r'^.{8,}?(?PZ$| ?(?P\+|-)(?P[0-9]{2}):?(?P[0-9]{2})$)', From 7947a1f7dbc6ba47a6f22ab67fd330e57c0ef87c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 19:17:27 +0700 Subject: [PATCH 04/12] Remove no longer needed compat_str around geturl --- youtube_dl/extractor/common.py | 2 +- youtube_dl/extractor/eporner.py | 3 +-- youtube_dl/extractor/generic.py | 6 +++--- youtube_dl/extractor/lecturio.py | 3 +-- youtube_dl/extractor/linuxacademy.py | 5 ++--- youtube_dl/extractor/mediaset.py | 3 +-- youtube_dl/extractor/mediasite.py | 2 +- youtube_dl/extractor/platzi.py | 2 +- youtube_dl/extractor/safari.py | 5 ++--- youtube_dl/extractor/teachable.py | 3 +-- youtube_dl/extractor/tumblr.py | 3 +-- youtube_dl/extractor/vimeo.py | 2 +- 12 files changed, 16 insertions(+), 23 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index ab7d473d0..eaae5e484 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2341,7 +2341,7 @@ class InfoExtractor(object): return [] ism_doc, urlh = res - return self._parse_ism_formats(ism_doc, compat_str(urlh.geturl()), ism_id) + return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id) def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None): """ diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py index c050bf9df..fe42821c7 100644 --- a/youtube_dl/extractor/eporner.py +++ b/youtube_dl/extractor/eporner.py @@ -4,7 +4,6 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_str from ..utils import ( encode_base_n, ExtractorError, @@ -55,7 +54,7 @@ class EpornerIE(InfoExtractor): webpage, urlh = self._download_webpage_handle(url, display_id) - video_id = self._match_id(compat_str(urlh.geturl())) + video_id = self._match_id(urlh.geturl()) hash = self._search_regex( r'hash\s*:\s*["\']([\da-f]{32})', webpage, 'hash') diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 04c026984..d1ec56be9 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -2287,7 +2287,7 @@ class GenericIE(InfoExtractor): if head_response is not False: # Check for redirect - new_url = compat_str(head_response.geturl()) + new_url = head_response.geturl() if url != new_url: self.report_following_redirect(new_url) if force_videoid: @@ -2387,12 +2387,12 @@ class GenericIE(InfoExtractor): return self.playlist_result( self._parse_xspf( doc, video_id, xspf_url=url, - xspf_base_url=compat_str(full_response.geturl())), + xspf_base_url=full_response.geturl()), video_id) elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag): info_dict['formats'] = self._parse_mpd_formats( doc, - mpd_base_url=compat_str(full_response.geturl()).rpartition('/')[0], + mpd_base_url=full_response.geturl().rpartition('/')[0], mpd_url=url) self._sort_formats(info_dict['formats']) return info_dict diff --git a/youtube_dl/extractor/lecturio.py b/youtube_dl/extractor/lecturio.py index 6ed7da4ab..1b2dcef46 100644 --- a/youtube_dl/extractor/lecturio.py +++ b/youtube_dl/extractor/lecturio.py @@ -4,7 +4,6 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_str from ..utils import ( clean_html, determine_ext, @@ -36,7 +35,7 @@ class LecturioBaseIE(InfoExtractor): self._LOGIN_URL, None, 'Downloading login popup') def is_logged(url_handle): - return self._LOGIN_URL not in compat_str(url_handle.geturl()) + return self._LOGIN_URL not in url_handle.geturl() # Already logged in if is_logged(urlh): diff --git a/youtube_dl/extractor/linuxacademy.py b/youtube_dl/extractor/linuxacademy.py index a78c6556e..23ca965d9 100644 --- a/youtube_dl/extractor/linuxacademy.py +++ b/youtube_dl/extractor/linuxacademy.py @@ -8,7 +8,6 @@ from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_HTTPError, - compat_str, ) from ..utils import ( ExtractorError, @@ -99,7 +98,7 @@ class LinuxAcademyIE(InfoExtractor): 'sso': 'true', }) - login_state_url = compat_str(urlh.geturl()) + login_state_url = urlh.geturl() try: login_page = self._download_webpage( @@ -129,7 +128,7 @@ class LinuxAcademyIE(InfoExtractor): }) access_token = self._search_regex( - r'access_token=([^=&]+)', compat_str(urlh.geturl()), + r'access_token=([^=&]+)', urlh.geturl(), 'access token') self._download_webpage( diff --git a/youtube_dl/extractor/mediaset.py b/youtube_dl/extractor/mediaset.py index 027a790b8..933df1495 100644 --- a/youtube_dl/extractor/mediaset.py +++ b/youtube_dl/extractor/mediaset.py @@ -6,7 +6,6 @@ import re from .theplatform import ThePlatformBaseIE from ..compat import ( compat_parse_qs, - compat_str, compat_urllib_parse_urlparse, ) from ..utils import ( @@ -114,7 +113,7 @@ class MediasetIE(ThePlatformBaseIE): continue urlh = ie._request_webpage( embed_url, video_id, note='Following embed URL redirect') - embed_url = compat_str(urlh.geturl()) + embed_url = urlh.geturl() program_guid = _program_guid(_qs(embed_url)) if program_guid: entries.append(embed_url) diff --git a/youtube_dl/extractor/mediasite.py b/youtube_dl/extractor/mediasite.py index 694a264d6..d6eb15740 100644 --- a/youtube_dl/extractor/mediasite.py +++ b/youtube_dl/extractor/mediasite.py @@ -129,7 +129,7 @@ class MediasiteIE(InfoExtractor): query = mobj.group('query') webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer? - redirect_url = compat_str(urlh.geturl()) + redirect_url = urlh.geturl() # XXX: might have also extracted UrlReferrer and QueryString from the html service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex( diff --git a/youtube_dl/extractor/platzi.py b/youtube_dl/extractor/platzi.py index 602207beb..23c8256b5 100644 --- a/youtube_dl/extractor/platzi.py +++ b/youtube_dl/extractor/platzi.py @@ -46,7 +46,7 @@ class PlatziBaseIE(InfoExtractor): headers={'Referer': self._LOGIN_URL}) # login succeeded - if 'platzi.com/login' not in compat_str(urlh.geturl()): + if 'platzi.com/login' not in urlh.geturl(): return login_error = self._webpage_read_content( diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py index 4942437c7..2cc665122 100644 --- a/youtube_dl/extractor/safari.py +++ b/youtube_dl/extractor/safari.py @@ -8,7 +8,6 @@ from .common import InfoExtractor from ..compat import ( compat_parse_qs, - compat_str, compat_urlparse, ) from ..utils import ( @@ -39,13 +38,13 @@ class SafariBaseIE(InfoExtractor): 'Downloading login page') def is_logged(urlh): - return 'learning.oreilly.com/home/' in compat_str(urlh.geturl()) + return 'learning.oreilly.com/home/' in urlh.geturl() if is_logged(urlh): self.LOGGED_IN = True return - redirect_url = compat_str(urlh.geturl()) + redirect_url = urlh.geturl() parsed_url = compat_urlparse.urlparse(redirect_url) qs = compat_parse_qs(parsed_url.query) next_uri = compat_urlparse.urljoin( diff --git a/youtube_dl/extractor/teachable.py b/youtube_dl/extractor/teachable.py index cca89a4a8..4316a6962 100644 --- a/youtube_dl/extractor/teachable.py +++ b/youtube_dl/extractor/teachable.py @@ -4,7 +4,6 @@ import re from .common import InfoExtractor from .wistia import WistiaIE -from ..compat import compat_str from ..utils import ( clean_html, ExtractorError, @@ -58,7 +57,7 @@ class TeachableBaseIE(InfoExtractor): self._logged_in = True return - login_url = compat_str(urlh.geturl()) + login_url = urlh.geturl() login_form = self._hidden_inputs(login_page) diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py index edbb0aa69..ae584ad69 100644 --- a/youtube_dl/extractor/tumblr.py +++ b/youtube_dl/extractor/tumblr.py @@ -4,7 +4,6 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, @@ -151,7 +150,7 @@ class TumblrIE(InfoExtractor): url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id) webpage, urlh = self._download_webpage_handle(url, video_id) - redirect_url = compat_str(urlh.geturl()) + redirect_url = urlh.geturl() if 'tumblr.com/safe-mode' in redirect_url or redirect_url.startswith('/safe-mode'): raise ExtractorError( 'This Tumblr may contain sensitive media. ' diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index f378aa283..1da4ced96 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -591,7 +591,7 @@ class VimeoIE(VimeoBaseInfoExtractor): # Retrieve video webpage to extract further information webpage, urlh = self._download_webpage_handle( url, video_id, headers=headers) - redirect_url = compat_str(urlh.geturl()) + redirect_url = urlh.geturl() except ExtractorError as ee: if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: errmsg = ee.cause.read() From 886d9859590c423b1d036d4a2e6f43ee639560d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 21:58:22 +0700 Subject: [PATCH 05/12] [youjizz] Fix extraction (closes #24181) --- youtube_dl/extractor/youjizz.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/youjizz.py b/youtube_dl/extractor/youjizz.py index dff69fcb7..88aabd272 100644 --- a/youtube_dl/extractor/youjizz.py +++ b/youtube_dl/extractor/youjizz.py @@ -44,7 +44,7 @@ class YouJizzIE(InfoExtractor): encodings = self._parse_json( self._search_regex( - r'encodings\s*=\s*(\[.+?\]);\n', webpage, 'encodings', + r'[Ee]ncodings\s*=\s*(\[.+?\]);\n', webpage, 'encodings', default='[]'), video_id, fatal=False) for encoding in encodings: From ea17979d83e6dcd61de0901fc55e2672d577e5a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 22:08:43 +0700 Subject: [PATCH 06/12] [test_subtitles] Remove obsolete test --- test/test_subtitles.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/test/test_subtitles.py b/test/test_subtitles.py index 7d57a628e..17aaaf20d 100644 --- a/test/test_subtitles.py +++ b/test/test_subtitles.py @@ -26,7 +26,6 @@ from youtube_dl.extractor import ( ThePlatformIE, ThePlatformFeedIE, RTVEALaCartaIE, - FunnyOrDieIE, DemocracynowIE, ) @@ -322,18 +321,6 @@ class TestRtveSubtitles(BaseTestSubtitles): self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca') -class TestFunnyOrDieSubtitles(BaseTestSubtitles): - url = 'http://www.funnyordie.com/videos/224829ff6d/judd-apatow-will-direct-your-vine' - IE = FunnyOrDieIE - - def test_allsubtitles(self): - self.DL.params['writesubtitles'] = True - self.DL.params['allsubtitles'] = True - subtitles = self.getSubtitles() - self.assertEqual(set(subtitles.keys()), set(['en'])) - self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4') - - class TestDemocracynowSubtitles(BaseTestSubtitles): url = 'http://www.democracynow.org/shows/2015/7/3' IE = DemocracynowIE From b4cbdbd4b37a496c6539694bebd0ec50ef02c864 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 23:06:36 +0700 Subject: [PATCH 07/12] [zdf:channel] Fix tests --- youtube_dl/extractor/zdf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index 145c123a4..656864b2e 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -244,14 +244,14 @@ class ZDFChannelIE(ZDFBaseIE): 'id': 'das-aktuelle-sportstudio', 'title': 'das aktuelle sportstudio | ZDF', }, - 'playlist_count': 21, + 'playlist_mincount': 23, }, { 'url': 'https://www.zdf.de/dokumentation/planet-e', 'info_dict': { 'id': 'planet-e', 'title': 'planet e.', }, - 'playlist_count': 4, + 'playlist_mincount': 50, }, { 'url': 'https://www.zdf.de/filme/taunuskrimi/', 'only_matching': True, From 278355bae4b4aed9cd3bf09bdf251fcb7396e303 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 23:09:13 +0700 Subject: [PATCH 08/12] [zapiks] Fix test --- youtube_dl/extractor/zapiks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/youtube_dl/extractor/zapiks.py b/youtube_dl/extractor/zapiks.py index bacb82eee..f6496f516 100644 --- a/youtube_dl/extractor/zapiks.py +++ b/youtube_dl/extractor/zapiks.py @@ -29,7 +29,6 @@ class ZapiksIE(InfoExtractor): 'timestamp': 1359044972, 'upload_date': '20130124', 'view_count': int, - 'comment_count': int, }, }, { From e88b450771027e3a36e7b3721e0bc1f105331cf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 29 Feb 2020 23:51:34 +0700 Subject: [PATCH 09/12] [xtube] Fix metadata extraction (closes #21073, closes #22455) --- youtube_dl/extractor/xtube.py | 46 +++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index c6c0b3291..994cf1d0a 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -47,7 +47,7 @@ class XTubeIE(InfoExtractor): 'display_id': 'A-Super-Run-Part-1-YT', 'ext': 'flv', 'title': 'A Super Run - Part 1 (YT)', - 'description': 'md5:ca0d47afff4a9b2942e4b41aa970fd93', + 'description': 'md5:4cc3af1aa1b0413289babc88f0d4f616', 'uploader': 'tshirtguy59', 'duration': 579, 'view_count': int, @@ -87,10 +87,24 @@ class XTubeIE(InfoExtractor): 'Cookie': 'age_verified=1; cookiesAccepted=1', }) - sources = self._parse_json(self._search_regex( - r'(["\'])?sources\1?\s*:\s*(?P{.+?}),', - webpage, 'sources', group='sources'), video_id, - transform_source=js_to_json) + title, thumbnail, duration = [None] * 3 + + config = self._parse_json(self._search_regex( + r'playerConf\s*=\s*({.+?})\s*,\s*\n', webpage, 'config', + default='{}'), video_id, transform_source=js_to_json, fatal=False) + if config: + config = config.get('mainRoll') + if isinstance(config, dict): + title = config.get('title') + thumbnail = config.get('poster') + duration = int_or_none(config.get('duration')) + sources = config.get('sources') + + if isinstance(sources, dict): + sources = self._parse_json(self._search_regex( + r'(["\'])?sources\1?\s*:\s*(?P{.+?}),', + webpage, 'sources', group='sources'), video_id, + transform_source=js_to_json) formats = [] for format_id, format_url in sources.items(): @@ -102,20 +116,25 @@ class XTubeIE(InfoExtractor): self._remove_duplicate_formats(formats) self._sort_formats(formats) - title = self._search_regex( - (r'

\s*(?P[^<]+?)\s*</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'), - webpage, 'title', group='title') - description = self._search_regex( + if not title: + title = self._search_regex( + (r'<h1>\s*(?P<title>[^<]+?)\s*</h1>', r'videoTitle\s*:\s*(["\'])(?P<title>.+?)\1'), + webpage, 'title', group='title') + description = self._og_search_description( + webpage, default=None) or self._html_search_meta( + 'twitter:description', webpage, default=None) or self._search_regex( r'</h1>\s*<p>([^<]+)', webpage, 'description', fatal=False) uploader = self._search_regex( (r'<input[^>]+name="contentOwnerId"[^>]+value="([^"]+)"', r'<span[^>]+class="nickname"[^>]*>([^<]+)'), webpage, 'uploader', fatal=False) - duration = parse_duration(self._search_regex( - r'<dt>Runtime:?</dt>\s*<dd>([^<]+)</dd>', - webpage, 'duration', fatal=False)) + if not duration: + duration = parse_duration(self._search_regex( + r'<dt>Runtime:?</dt>\s*<dd>([^<]+)</dd>', + webpage, 'duration', fatal=False)) view_count = str_to_int(self._search_regex( - r'<dt>Views:?</dt>\s*<dd>([\d,\.]+)</dd>', + (r'["\']viewsCount["\'][^>]*>(\d+)\s+views', + r'<dt>Views:?</dt>\s*<dd>([\d,\.]+)</dd>'), webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( r'>Comments? \(([\d,\.]+)\)<', @@ -126,6 +145,7 @@ class XTubeIE(InfoExtractor): 'display_id': display_id, 'title': title, 'description': description, + 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, 'view_count': view_count, From 838f051c4b9a0c974da32e79c011bbf85fa186fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sat, 29 Feb 2020 23:51:56 +0700 Subject: [PATCH 10/12] [xtube:user] Fix test --- youtube_dl/extractor/xtube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 994cf1d0a..47caec1de 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -164,7 +164,7 @@ class XTubeUserIE(InfoExtractor): 'id': 'greenshowers-4056496', 'age_limit': 18, }, - 'playlist_mincount': 155, + 'playlist_mincount': 154, } def _real_extract(self, url): From f8cbd8c96367871db974621d5a92b324acb57938 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 1 Mar 2020 01:04:51 +0700 Subject: [PATCH 11/12] [telecinco] Fix extraction (refs #24195) --- youtube_dl/extractor/telecinco.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py index d37e1b055..d79ab1e82 100644 --- a/youtube_dl/extractor/telecinco.py +++ b/youtube_dl/extractor/telecinco.py @@ -135,7 +135,7 @@ class TelecincoIE(InfoExtractor): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) article = self._parse_json(self._search_regex( - r'window\.\$REACTBASE_STATE\.article\s*=\s*({.+})', + r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=\s*({.+})', webpage, 'article'), display_id)['article'] title = article.get('title') description = clean_html(article.get('leadParagraph')) From 6d475d01d8f1fc9f5e92ac2caa986453bee95ae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= <dstftw@gmail.com> Date: Sun, 1 Mar 2020 03:09:19 +0700 Subject: [PATCH 12/12] [telecinco] Add support for article opening videos --- youtube_dl/extractor/telecinco.py | 44 ++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py index d79ab1e82..9ba3da341 100644 --- a/youtube_dl/extractor/telecinco.py +++ b/youtube_dl/extractor/telecinco.py @@ -11,6 +11,7 @@ from ..utils import ( determine_ext, int_or_none, str_or_none, + try_get, urljoin, ) @@ -24,7 +25,7 @@ class TelecincoIE(InfoExtractor): 'info_dict': { 'id': '1876350223', 'title': 'Bacalao con kokotxas al pil-pil', - 'description': 'md5:1382dacd32dd4592d478cbdca458e5bb', + 'description': 'md5:716caf5601e25c3c5ab6605b1ae71529', }, 'playlist': [{ 'md5': 'adb28c37238b675dad0f042292f209a7', @@ -55,6 +56,26 @@ class TelecincoIE(InfoExtractor): 'description': 'md5:2771356ff7bfad9179c5f5cd954f1477', 'duration': 50, }, + }, { + # video in opening's content + 'url': 'https://www.telecinco.es/vivalavida/fiorella-sobrina-edmundo-arrocet-entrevista_18_2907195140.html', + 'info_dict': { + 'id': '2907195140', + 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"', + 'description': 'md5:73f340a7320143d37ab895375b2bf13a', + }, + 'playlist': [{ + 'md5': 'adb28c37238b675dad0f042292f209a7', + 'info_dict': { + 'id': 'TpI2EttSDAReWpJ1o0NVh2', + 'ext': 'mp4', + 'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"', + 'duration': 1015, + }, + }], + 'params': { + 'skip_download': True, + }, }, { 'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html', 'only_matching': True, @@ -138,14 +159,25 @@ class TelecincoIE(InfoExtractor): r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=\s*({.+})', webpage, 'article'), display_id)['article'] title = article.get('title') - description = clean_html(article.get('leadParagraph')) + description = clean_html(article.get('leadParagraph')) or '' if article.get('editorialType') != 'VID': entries = [] - for p in article.get('body', []): - content = p.get('content') - if p.get('type') != 'video' or not content: + body = [article.get('opening')] + body.extend(try_get(article, lambda x: x['body'], list) or []) + for p in body: + if not isinstance(p, dict): continue - entries.append(self._parse_content(content, url)) + content = p.get('content') + if not content: + continue + type_ = p.get('type') + if type_ == 'paragraph': + content_str = str_or_none(content) + if content_str: + description += content_str + continue + if type_ == 'video' and isinstance(content, dict): + entries.append(self._parse_content(content, url)) return self.playlist_result( entries, str_or_none(article.get('id')), title, description) content = article['opening']['content']