From b7df8f90a7d749ce609e7ff344464db73329117a Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Sun, 12 May 2019 10:23:36 +0100 Subject: [PATCH 01/12] [yahoo:gyao] add X-User-Agent header to dam proxy requests(closes #21071) --- youtube_dl/extractor/yahoo.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index 6576c1d69..a3b5f00c8 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -511,6 +511,8 @@ class YahooGyaOPlayerIE(InfoExtractor): 'https://gyao.yahoo.co.jp/dam/v1/videos/' + video_id, video_id, query={ 'fields': 'longDescription,title,videoId', + }, headers={ + 'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web', }) return { '_type': 'url_transparent', From e6a25fea23c4ee1db2461e77b457d01dc922a0a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sun, 12 May 2019 23:23:29 +0700 Subject: [PATCH 02/12] [svtplay] Update API URL (closes #21075) --- youtube_dl/extractor/svt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube_dl/extractor/svt.py b/youtube_dl/extractor/svt.py index 7aa1b5919..0901c3163 100644 --- a/youtube_dl/extractor/svt.py +++ b/youtube_dl/extractor/svt.py @@ -185,7 +185,7 @@ class SVTPlayIE(SVTPlayBaseIE): def _extract_by_video_id(self, video_id, webpage=None): data = self._download_json( - 'https://api.svt.se/video/%s' % video_id, + 'https://api.svt.se/videoplayer-api/video/%s' % video_id, video_id, headers=self.geo_verification_headers()) info_dict = self._extract_video(data, video_id) if not info_dict.get('title'): From 0e0bfd334c59bd9a0bf4e24e88237041b7ff411d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 13 May 2019 00:11:48 +0700 Subject: [PATCH 03/12] [tele5] Relax _VALID_URL (closes #21020, closes #21063) --- youtube_dl/extractor/tele5.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/tele5.py b/youtube_dl/extractor/tele5.py index 25573e49f..0b346d7be 100644 --- a/youtube_dl/extractor/tele5.py +++ b/youtube_dl/extractor/tele5.py @@ -7,7 +7,7 @@ from ..compat import compat_urlparse class Tele5IE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:mediathek|tv)/(?P[^?#&]+)' + _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P[^/?#&]+)' _TESTS = [{ 'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416', 'info_dict': { @@ -21,10 +21,22 @@ class Tele5IE(InfoExtractor): 'skip_download': True, }, }, { - 'url': 'https://www.tele5.de/tv/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191', + 'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191', 'only_matching': True, }, { - 'url': 'https://www.tele5.de/tv/dark-matter/videos', + 'url': 'https://www.tele5.de/video-clip/?ve_id=1609440', + 'only_matching': True, + }, { + 'url': 'https://www.tele5.de/filme/schlefaz-dragon-crusaders/', + 'only_matching': True, + }, { + 'url': 'https://www.tele5.de/filme/making-of/avengers-endgame/', + 'only_matching': True, + }, { + 'url': 'https://www.tele5.de/star-trek/raumschiff-voyager/ganze-folge/das-vinculum/', + 'only_matching': True, + }, { + 'url': 'https://www.tele5.de/anders-ist-sevda/', 'only_matching': True, }] From a277dd33ebc7b3a3d4a7cf603ccdbe2d9ec5d73b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 13 May 2019 00:13:00 +0700 Subject: [PATCH 04/12] [tele5] Improve video id extraction --- youtube_dl/extractor/tele5.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/youtube_dl/extractor/tele5.py b/youtube_dl/extractor/tele5.py index 0b346d7be..33a72083b 100644 --- a/youtube_dl/extractor/tele5.py +++ b/youtube_dl/extractor/tele5.py @@ -48,8 +48,9 @@ class Tele5IE(InfoExtractor): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( - r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)', - webpage, 'video id') + (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](\d+)', + r'\s+id\s*=\s*["\']player_(\d{6,})', + r'\bdata-id\s*=\s*["\'](\d{6,})'), webpage, 'video id') return self.url_result( 'https://api.nexx.cloud/v3/759/videos/byid/%s' % video_id, From 583174284065a41a70220ecc8de0d31d3aea5070 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Wed, 15 May 2019 10:38:33 +0100 Subject: [PATCH 05/12] [vrv] extract captions(closes #19238) --- youtube_dl/extractor/vrv.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/youtube_dl/extractor/vrv.py b/youtube_dl/extractor/vrv.py index 33530fe8a..b698bf66c 100644 --- a/youtube_dl/extractor/vrv.py +++ b/youtube_dl/extractor/vrv.py @@ -198,14 +198,15 @@ class VRVIE(VRVBaseIE): self._sort_formats(formats) subtitles = {} - for subtitle in streams_json.get('subtitles', {}).values(): - subtitle_url = subtitle.get('url') - if not subtitle_url: - continue - subtitles.setdefault(subtitle.get('locale', 'en-US'), []).append({ - 'url': subtitle_url, - 'ext': subtitle.get('format', 'ass'), - }) + for k in ('captions', 'subtitles'): + for subtitle in streams_json.get(k, {}).values(): + subtitle_url = subtitle.get('url') + if not subtitle_url: + continue + subtitles.setdefault(subtitle.get('locale', 'en-US'), []).append({ + 'url': subtitle_url, + 'ext': subtitle.get('format', 'ass'), + }) thumbnails = [] for thumbnail in video_data.get('images', {}).get('thumbnails', []): From 170d6444406a0401fb20ca612e3d19a7f972af15 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Wed, 15 May 2019 14:58:57 +0100 Subject: [PATCH 06/12] [canvas] add support for vrtnieuws and sporza site ids and extract AES HLS formats --- youtube_dl/extractor/canvas.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/youtube_dl/extractor/canvas.py b/youtube_dl/extractor/canvas.py index 174fd9e2b..c506bc5dd 100644 --- a/youtube_dl/extractor/canvas.py +++ b/youtube_dl/extractor/canvas.py @@ -17,7 +17,7 @@ from ..utils import ( class CanvasIE(InfoExtractor): - _VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?Pcanvas|een|ketnet|vrtvideo)/assets/(?P[^/?#&]+)' + _VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?Pcanvas|een|ketnet|vrt(?:video|nieuws)|sporza)/assets/(?P[^/?#&]+)' _TESTS = [{ 'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', 'md5': '90139b746a0a9bd7bb631283f6e2a64e', @@ -35,6 +35,10 @@ class CanvasIE(InfoExtractor): 'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', 'only_matching': True, }] + _HLS_ENTRY_PROTOCOLS_MAP = { + 'HLS': 'm3u8_native', + 'HLS_AES': 'm3u8', + } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -52,9 +56,9 @@ class CanvasIE(InfoExtractor): format_url, format_type = target.get('url'), target.get('type') if not format_url or not format_type: continue - if format_type == 'HLS': + if format_type in self._HLS_ENTRY_PROTOCOLS_MAP: formats.extend(self._extract_m3u8_formats( - format_url, video_id, 'mp4', entry_protocol='m3u8_native', + format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type], m3u8_id=format_type, fatal=False)) elif format_type == 'HDS': formats.extend(self._extract_f4m_formats( From 82e91d20a0f698b13412dd7b200663c7485791bb Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Wed, 15 May 2019 15:02:51 +0100 Subject: [PATCH 07/12] [vrt] fix extraction(closes #20527) --- youtube_dl/extractor/vrt.py | 193 ++++++++++++------------------------ 1 file changed, 63 insertions(+), 130 deletions(-) diff --git a/youtube_dl/extractor/vrt.py b/youtube_dl/extractor/vrt.py index 444295d68..422025267 100644 --- a/youtube_dl/extractor/vrt.py +++ b/youtube_dl/extractor/vrt.py @@ -5,150 +5,83 @@ import re from .common import InfoExtractor from ..utils import ( + extract_attributes, float_or_none, + get_element_by_class, + strip_or_none, + unified_timestamp, ) class VRTIE(InfoExtractor): - IE_DESC = 'deredactie.be, sporza.be, cobra.be and cobra.canvas.be' - _VALID_URL = r'https?://(?:deredactie|sporza|cobra(?:\.canvas)?)\.be/cm/(?:[^/]+/)+(?P[^/]+)/*' - _TESTS = [ - # deredactie.be - { - 'url': 'http://deredactie.be/cm/vrtnieuws/videozone/programmas/journaal/EP_141025_JOL', - 'md5': '4cebde1eb60a53782d4f3992cbd46ec8', - 'info_dict': { - 'id': '2129880', - 'ext': 'flv', - 'title': 'Het journaal L - 25/10/14', - 'description': None, - 'timestamp': 1414271750.949, - 'upload_date': '20141025', - 'duration': 929, - }, - 'skip': 'HTTP Error 404: Not Found', + IE_DESC = 'VRT NWS, Flanders News, Flandern Info and Sporza' + _VALID_URL = r'https?://(?:www\.)?(?Pvrt\.be/vrtnws|sporza\.be)/[a-z]{2}/\d{4}/\d{2}/\d{2}/(?P[^/?&#]+)' + _TESTS = [{ + 'url': 'https://www.vrt.be/vrtnws/nl/2019/05/15/beelden-van-binnenkant-notre-dame-een-maand-na-de-brand/', + 'md5': 'e1663accf5cf13f375f3cd0d10476669', + 'info_dict': { + 'id': 'pbs-pub-7855fc7b-1448-49bc-b073-316cb60caa71$vid-2ca50305-c38a-4762-9890-65cbd098b7bd', + 'ext': 'mp4', + 'title': 'Beelden van binnenkant Notre-Dame, één maand na de brand', + 'description': 'Op maandagavond 15 april ging een deel van het dakgebinte van de Parijse kathedraal in vlammen op.', + 'timestamp': 1557924660, + 'upload_date': '20190515', + 'duration': 31.2, }, - # sporza.be - { - 'url': 'http://sporza.be/cm/sporza/videozone/programmas/extratime/EP_141020_Extra_time', - 'md5': '11f53088da9bf8e7cfc42456697953ff', - 'info_dict': { - 'id': '2124639', - 'ext': 'flv', - 'title': 'Bekijk Extra Time van 20 oktober', - 'description': 'md5:83ac5415a4f1816c6a93f8138aef2426', - 'timestamp': 1413835980.560, - 'upload_date': '20141020', - 'duration': 3238, - }, - 'skip': 'HTTP Error 404: Not Found', + }, { + 'url': 'https://sporza.be/nl/2019/05/15/de-belgian-cats-zijn-klaar-voor-het-ek/', + 'md5': '910bba927566e9ab992278f647eb4b75', + 'info_dict': { + 'id': 'pbs-pub-f2c86a46-8138-413a-a4b9-a0015a16ce2c$vid-1f112b31-e58e-4379-908d-aca6d80f8818', + 'ext': 'mp4', + 'title': 'De Belgian Cats zijn klaar voor het EK mét Ann Wauters', + 'timestamp': 1557923760, + 'upload_date': '20190515', + 'duration': 115.17, }, - # cobra.be - { - 'url': 'http://cobra.be/cm/cobra/videozone/rubriek/film-videozone/141022-mv-ellis-cafecorsari', - 'md5': '78a2b060a5083c4f055449a72477409d', - 'info_dict': { - 'id': '2126050', - 'ext': 'flv', - 'title': 'Bret Easton Ellis in Café Corsari', - 'description': 'md5:f699986e823f32fd6036c1855a724ee9', - 'timestamp': 1413967500.494, - 'upload_date': '20141022', - 'duration': 661, - }, - 'skip': 'HTTP Error 404: Not Found', - }, - { - # YouTube video - 'url': 'http://deredactie.be/cm/vrtnieuws/videozone/nieuws/cultuurenmedia/1.2622957', - 'md5': 'b8b93da1df1cea6c8556255a796b7d61', - 'info_dict': { - 'id': 'Wji-BZ0oCwg', - 'ext': 'mp4', - 'title': 'ROGUE ONE: A STAR WARS STORY Official Teaser Trailer', - 'description': 'md5:8e468944dce15567a786a67f74262583', - 'uploader': 'Star Wars', - 'uploader_id': 'starwars', - 'upload_date': '20160407', - }, - 'add_ie': ['Youtube'], - }, - { - 'url': 'http://cobra.canvas.be/cm/cobra/videozone/rubriek/film-videozone/1.2377055', - 'info_dict': { - 'id': '2377055', - 'ext': 'mp4', - 'title': 'Cafe Derby', - 'description': 'Lenny Van Wesemael debuteert met de langspeelfilm Café Derby. Een waar gebeurd maar ook verzonnen verhaal.', - 'upload_date': '20150626', - 'timestamp': 1435305240.769, - }, - 'params': { - # m3u8 download - 'skip_download': True, - } - } - ] + }, { + 'url': 'https://www.vrt.be/vrtnws/en/2019/05/15/belgium_s-eurovision-entry-falls-at-the-first-hurdle/', + 'only_matching': True, + }, { + 'url': 'https://www.vrt.be/vrtnws/de/2019/05/15/aus-fuer-eliott-im-halbfinale-des-eurosongfestivals/', + 'only_matching': True, + }] + _CLIENT_MAP = { + 'vrt.be/vrtnws': 'vrtnieuws', + 'sporza.be': 'sporza', + } def _real_extract(self, url): - video_id = self._match_id(url) + site, display_id = re.match(self._VALID_URL, url).groups() + webpage = self._download_webpage(url, display_id) + attrs = extract_attributes(self._search_regex( + r'(<[^>]+class="vrtvideo"[^>]*>)', webpage, 'vrt video')) - webpage = self._download_webpage(url, video_id) + asset_id = attrs['data-videoid'] + publication_id = attrs.get('data-publicationid') + if publication_id: + asset_id = publication_id + '$' + asset_id + client = attrs.get('data-client') or self._CLIENT_MAP[site] - video_id = self._search_regex( - r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False) - - src = self._search_regex( - r'data-video-src="([^"]+)"', webpage, 'video src', default=None) - - video_type = self._search_regex( - r'data-video-type="([^"]+)"', webpage, 'video type', default=None) - - if video_type == 'YouTubeVideo': - return self.url_result(src, 'Youtube') - - formats = [] - - mobj = re.search( - r'data-video-iphone-server="(?P[^"]+)"\s+data-video-iphone-path="(?P[^"]+)"', - webpage) - if mobj: - formats.extend(self._extract_m3u8_formats( - '%s/%s' % (mobj.group('server'), mobj.group('path')), - video_id, 'mp4', m3u8_id='hls', fatal=False)) - - if src: - formats = self._extract_wowza_formats(src, video_id) - if 'data-video-geoblocking="true"' not in webpage: - for f in formats: - if f['url'].startswith('rtsp://'): - http_format = f.copy() - http_format.update({ - 'url': f['url'].replace('rtsp://', 'http://').replace('vod.', 'download.').replace('/_definst_/', '/').replace('mp4:', ''), - 'format_id': f['format_id'].replace('rtsp', 'http'), - 'protocol': 'http', - }) - formats.append(http_format) - - if not formats and 'data-video-geoblocking="true"' in webpage: - self.raise_geo_restricted('This video is only available in Belgium') - - self._sort_formats(formats) - - title = self._og_search_title(webpage) - description = self._og_search_description(webpage, default=None) - thumbnail = self._og_search_thumbnail(webpage) - timestamp = float_or_none(self._search_regex( - r'data-video-sitestat-pubdate="(\d+)"', webpage, 'timestamp', fatal=False), 1000) - duration = float_or_none(self._search_regex( - r'data-video-duration="(\d+)"', webpage, 'duration', fatal=False), 1000) + title = strip_or_none(get_element_by_class( + 'vrt-title', webpage) or self._html_search_meta( + ['og:title', 'twitter:title', 'name'], webpage)) + description = self._html_search_meta( + ['og:description', 'twitter:description', 'description'], webpage) + if description == '…': + description = None + timestamp = unified_timestamp(self._html_search_meta( + 'article:published_time', webpage)) return { - 'id': video_id, + '_type': 'url_transparent', + 'id': asset_id, + 'display_id': display_id, 'title': title, 'description': description, - 'thumbnail': thumbnail, + 'thumbnail': attrs.get('data-posterimage'), 'timestamp': timestamp, - 'duration': duration, - 'formats': formats, + 'duration': float_or_none(attrs.get('data-duration'), 1000), + 'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (client, asset_id), + 'ie_key': 'Canvas', } From e3c1266f492d710e2acbf0d80f44f7f805eb5187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 18 May 2019 03:17:15 +0700 Subject: [PATCH 08/12] [extractor/common] Move workaround for applying first Set-Cookie header into a separate method --- youtube_dl/extractor/common.py | 23 +++++++++++++++++++++++ youtube_dl/extractor/vk.py | 22 +++------------------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 69c3bc755..f994953bc 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2817,6 +2817,29 @@ class InfoExtractor(object): self._downloader.cookiejar.add_cookie_header(req) return compat_cookies.SimpleCookie(req.get_header('Cookie')) + def _apply_first_set_cookie_header(self, url_handle, cookie): + # Some sites (e.g. [1-3]) may serve two cookies under the same name + # in Set-Cookie header and expect the first (old) one to be set rather + # than second (new). However, as of RFC6265 the newer one cookie + # should be set into cookie store what actually happens. + # We will workaround this issue by resetting the cookie to + # the first one manually. + # 1. https://new.vk.com/ + # 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201 + # 3. https://learning.oreilly.com/ + for header, cookies in url_handle.headers.items(): + if header.lower() != 'set-cookie': + continue + if sys.version_info[0] >= 3: + cookies = cookies.encode('iso-8859-1') + cookies = cookies.decode('utf-8') + cookie_value = re.search( + r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies) + if cookie_value: + value, domain = cookie_value.groups() + self._set_cookie(domain, cookie, value) + break + def get_testcases(self, include_onlymatching=False): t = getattr(self, '_TEST', None) if t: diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index b7ce2fb97..f57ed2288 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -3,7 +3,6 @@ from __future__ import unicode_literals import collections import re -import sys from .common import InfoExtractor from ..compat import compat_urlparse @@ -45,24 +44,9 @@ class VKBaseIE(InfoExtractor): 'pass': password.encode('cp1251'), }) - # https://new.vk.com/ serves two same remixlhk cookies in Set-Cookie header - # and expects the first one to be set rather than second (see - # https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201). - # As of RFC6265 the newer one cookie should be set into cookie store - # what actually happens. - # We will workaround this VK issue by resetting the remixlhk cookie to - # the first one manually. - for header, cookies in url_handle.headers.items(): - if header.lower() != 'set-cookie': - continue - if sys.version_info[0] >= 3: - cookies = cookies.encode('iso-8859-1') - cookies = cookies.decode('utf-8') - remixlhk = re.search(r'remixlhk=(.+?);.*?\bdomain=(.+?)(?:[,;]|$)', cookies) - if remixlhk: - value, domain = remixlhk.groups() - self._set_cookie(domain, 'remixlhk', value) - break + # vk serves two same remixlhk cookies in Set-Cookie header and expects + # first one to be actually set + self._apply_first_set_cookie_header(url_handle, 'remixlhk') login_page = self._download_webpage( 'https://login.vk.com/?act=login', None, From a9e03736dfb2038d6a569e6305f4ac727a8ac71b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Sat, 18 May 2019 03:23:40 +0700 Subject: [PATCH 09/12] [safari] Fix authentication (closes #21090) --- youtube_dl/extractor/safari.py | 87 ++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 35 deletions(-) diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py index c0d32a1b9..8d4806794 100644 --- a/youtube_dl/extractor/safari.py +++ b/youtube_dl/extractor/safari.py @@ -1,15 +1,18 @@ # coding: utf-8 from __future__ import unicode_literals +import json import re from .common import InfoExtractor +from ..compat import ( + compat_parse_qs, + compat_str, + compat_urlparse, +) from ..utils import ( ExtractorError, - sanitized_Request, - std_headers, - urlencode_postdata, update_url_query, ) @@ -31,44 +34,52 @@ class SafariBaseIE(InfoExtractor): if username is None: return - headers = std_headers.copy() - if 'Referer' not in headers: - headers['Referer'] = self._LOGIN_URL + _, urlh = self._download_webpage_handle( + 'https://learning.oreilly.com/accounts/login-check/', None, + 'Downloading login page') - login_page = self._download_webpage( - self._LOGIN_URL, None, 'Downloading login form', headers=headers) + def is_logged(urlh): + return 'learning.oreilly.com/home/' in compat_str(urlh.geturl()) - def is_logged(webpage): - return any(re.search(p, webpage) for p in ( - r'href=["\']/accounts/logout/', r'>Sign Out<')) - - if is_logged(login_page): + if is_logged(urlh): self.LOGGED_IN = True return - csrf = self._html_search_regex( - r"name='csrfmiddlewaretoken'\s+value='([^']+)'", - login_page, 'csrf token') + redirect_url = compat_str(urlh.geturl()) + parsed_url = compat_urlparse.urlparse(redirect_url) + qs = compat_parse_qs(parsed_url.query) + next_uri = compat_urlparse.urljoin( + 'https://api.oreilly.com', qs['next'][0]) - login_form = { - 'csrfmiddlewaretoken': csrf, - 'email': username, - 'password1': password, - 'login': 'Sign In', - 'next': '', - } + auth, urlh = self._download_json_handle( + 'https://www.oreilly.com/member/auth/login/', None, 'Logging in', + data=json.dumps({ + 'email': username, + 'password': password, + 'redirect_uri': next_uri, + }).encode(), headers={ + 'Content-Type': 'application/json', + 'Referer': redirect_url, + }, expected_status=400) - request = sanitized_Request( - self._LOGIN_URL, urlencode_postdata(login_form), headers=headers) - login_page = self._download_webpage( - request, None, 'Logging in') - - if not is_logged(login_page): + credentials = auth.get('credentials') + if (not auth.get('logged_in') and not auth.get('redirect_uri') + and credentials): raise ExtractorError( - 'Login failed; make sure your credentials are correct and try again.', - expected=True) + 'Unable to login: %s' % credentials, expected=True) - self.LOGGED_IN = True + # oreilly serves two same groot_sessionid cookies in Set-Cookie header + # and expects first one to be actually set + self._apply_first_set_cookie_header(urlh, 'groot_sessionid') + + _, urlh = self._download_webpage_handle( + auth.get('redirect_uri') or next_uri, None, 'Completing login',) + + if is_logged(urlh): + self.LOGGED_IN = True + return + + raise ExtractorError('Unable to log in') class SafariIE(SafariBaseIE): @@ -76,7 +87,7 @@ class SafariIE(SafariBaseIE): IE_DESC = 'safaribooksonline.com online video' _VALID_URL = r'''(?x) https?:// - (?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/ + (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+/(?P[^/]+)/(?P[^/?\#&]+)\.html| videos/[^/]+/[^/]+/(?P[^-]+-[^/?\#&]+) @@ -107,6 +118,9 @@ class SafariIE(SafariBaseIE): }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838/9780133392838-00_SeriesIntro', 'only_matching': True, + }, { + 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/00_SeriesIntro.html', + 'only_matching': True, }] _PARTNER_ID = '1926081' @@ -163,7 +177,7 @@ class SafariIE(SafariBaseIE): class SafariApiIE(SafariBaseIE): IE_NAME = 'safari:api' - _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/api/v1/book/(?P[^/]+)/chapter(?:-content)?/(?P[^/?#&]+)\.html' + _VALID_URL = r'https?://(?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/api/v1/book/(?P[^/]+)/chapter(?:-content)?/(?P[^/?#&]+)\.html' _TESTS = [{ 'url': 'https://www.safaribooksonline.com/api/v1/book/9780133392838/chapter/part00.html', @@ -188,7 +202,7 @@ class SafariCourseIE(SafariBaseIE): _VALID_URL = r'''(?x) https?:// (?: - (?:www\.)?(?:safaribooksonline|learning\.oreilly)\.com/ + (?:www\.)?(?:safaribooksonline|(?:learning\.)?oreilly)\.com/ (?: library/view/[^/]+| api/v1/book| @@ -219,6 +233,9 @@ class SafariCourseIE(SafariBaseIE): }, { 'url': 'https://learning.oreilly.com/videos/hadoop-fundamentals-livelessons/9780133392838', 'only_matching': True, + }, { + 'url': 'https://www.oreilly.com/library/view/hadoop-fundamentals-livelessons/9780133392838/', + 'only_matching': True, }] @classmethod From ce2fe4c01cceef4b636995275b573baf51587fa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 20 May 2019 23:23:18 +0700 Subject: [PATCH 10/12] [extractor/common] Add doc string for _apply_first_set_cookie_header --- youtube_dl/extractor/common.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index f994953bc..937237b3f 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -2818,15 +2818,19 @@ class InfoExtractor(object): return compat_cookies.SimpleCookie(req.get_header('Cookie')) def _apply_first_set_cookie_header(self, url_handle, cookie): - # Some sites (e.g. [1-3]) may serve two cookies under the same name - # in Set-Cookie header and expect the first (old) one to be set rather - # than second (new). However, as of RFC6265 the newer one cookie - # should be set into cookie store what actually happens. - # We will workaround this issue by resetting the cookie to - # the first one manually. - # 1. https://new.vk.com/ - # 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201 - # 3. https://learning.oreilly.com/ + """ + Apply first Set-Cookie header instead of the last. Experimental. + + Some sites (e.g. [1-3]) may serve two cookies under the same name + in Set-Cookie header and expect the first (old) one to be set rather + than second (new). However, as of RFC6265 the newer one cookie + should be set into cookie store what actually happens. + We will workaround this issue by resetting the cookie to + the first one manually. + 1. https://new.vk.com/ + 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201 + 3. https://learning.oreilly.com/ + """ for header, cookies in url_handle.headers.items(): if header.lower() != 'set-cookie': continue From 42c971341b804b758d12b7a85547be05160f1b3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 20 May 2019 23:24:27 +0700 Subject: [PATCH 11/12] [ChangeLog] Actualize [ci skip] --- ChangeLog | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/ChangeLog b/ChangeLog index 13cb6288d..eba7202dd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,22 @@ +version + +Core ++ [extractor/common] Move workaround for applying first Set-Cookie header + into a separate _apply_first_set_cookie_header method + +Extractors +* [safari] Fix authentication (#21090) +* [vk] Use _apply_first_set_cookie_header +* [vrt] Fix extraction (#20527) ++ [canvas] Add support for vrtnieuws and sporza site ids and extract + AES HLS formats ++ [vrv] Extract captions (#19238) +* [tele5] Improve video id extraction +* [tele5] Relax URL regular expression (#21020, #21063) +* [svtplay] Update API URL (#21075) ++ [yahoo:gyao] Add X-User-Agent header to dam proxy requests (#21071) + + version 2019.05.11 Core From 6ab30ff50bf6bd0585927cb73c7421bef184f87a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergey=20M=E2=80=A4?= Date: Mon, 20 May 2019 23:29:49 +0700 Subject: [PATCH 12/12] release 2019.05.20 --- .github/ISSUE_TEMPLATE/1_broken_site.md | 6 +++--- .github/ISSUE_TEMPLATE/2_site_support_request.md | 4 ++-- .github/ISSUE_TEMPLATE/3_site_feature_request.md | 4 ++-- .github/ISSUE_TEMPLATE/4_bug_report.md | 6 +++--- .github/ISSUE_TEMPLATE/5_feature_request.md | 4 ++-- ChangeLog | 2 +- docs/supportedsites.md | 2 +- youtube_dl/version.py | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/1_broken_site.md b/.github/ISSUE_TEMPLATE/1_broken_site.md index 6b931b3cf..dc303946e 100644 --- a/.github/ISSUE_TEMPLATE/1_broken_site.md +++ b/.github/ISSUE_TEMPLATE/1_broken_site.md @@ -18,7 +18,7 @@ title: '' - [ ] I'm reporting a broken site support -- [ ] I've verified that I'm running youtube-dl version **2019.05.11** +- [ ] I've verified that I'm running youtube-dl version **2019.05.20** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've searched the bugtracker for similar issues including closed ones @@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v < [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 - [debug] youtube-dl version 2019.05.11 + [debug] youtube-dl version 2019.05.20 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/.github/ISSUE_TEMPLATE/2_site_support_request.md b/.github/ISSUE_TEMPLATE/2_site_support_request.md index f2dc784a2..46e143c8a 100644 --- a/.github/ISSUE_TEMPLATE/2_site_support_request.md +++ b/.github/ISSUE_TEMPLATE/2_site_support_request.md @@ -19,7 +19,7 @@ labels: 'site-support-request' - [ ] I'm reporting a new site support request -- [ ] I've verified that I'm running youtube-dl version **2019.05.11** +- [ ] I've verified that I'm running youtube-dl version **2019.05.20** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that none of provided URLs violate any copyrights - [ ] I've searched the bugtracker for similar site support requests including closed ones diff --git a/.github/ISSUE_TEMPLATE/3_site_feature_request.md b/.github/ISSUE_TEMPLATE/3_site_feature_request.md index 39a0af13f..bc6c4694b 100644 --- a/.github/ISSUE_TEMPLATE/3_site_feature_request.md +++ b/.github/ISSUE_TEMPLATE/3_site_feature_request.md @@ -18,13 +18,13 @@ title: '' - [ ] I'm reporting a site feature request -- [ ] I've verified that I'm running youtube-dl version **2019.05.11** +- [ ] I've verified that I'm running youtube-dl version **2019.05.20** - [ ] I've searched the bugtracker for similar site feature requests including closed ones diff --git a/.github/ISSUE_TEMPLATE/4_bug_report.md b/.github/ISSUE_TEMPLATE/4_bug_report.md index 139f36ab8..bcc51f986 100644 --- a/.github/ISSUE_TEMPLATE/4_bug_report.md +++ b/.github/ISSUE_TEMPLATE/4_bug_report.md @@ -18,7 +18,7 @@ title: '' - [ ] I'm reporting a broken site support issue -- [ ] I've verified that I'm running youtube-dl version **2019.05.11** +- [ ] I've verified that I'm running youtube-dl version **2019.05.20** - [ ] I've checked that all provided URLs are alive and playable in a browser - [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped - [ ] I've searched the bugtracker for similar bug reports including closed ones @@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v < [debug] User config: [] [debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] [debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 - [debug] youtube-dl version 2019.05.11 + [debug] youtube-dl version 2019.05.20 [debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2 [debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 [debug] Proxy map: {} diff --git a/.github/ISSUE_TEMPLATE/5_feature_request.md b/.github/ISSUE_TEMPLATE/5_feature_request.md index d60da6db9..c8d16960e 100644 --- a/.github/ISSUE_TEMPLATE/5_feature_request.md +++ b/.github/ISSUE_TEMPLATE/5_feature_request.md @@ -19,13 +19,13 @@ labels: 'request' - [ ] I'm reporting a feature request -- [ ] I've verified that I'm running youtube-dl version **2019.05.11** +- [ ] I've verified that I'm running youtube-dl version **2019.05.20** - [ ] I've searched the bugtracker for similar feature requests including closed ones diff --git a/ChangeLog b/ChangeLog index eba7202dd..3babb6f48 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,4 @@ -version +version 2019.05.20 Core + [extractor/common] Move workaround for applying first Set-Cookie header diff --git a/docs/supportedsites.md b/docs/supportedsites.md index a8a9224cb..404a2f0a4 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -1071,7 +1071,7 @@ - **VoxMediaVolume** - **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl - **Vrak** - - **VRT**: deredactie.be, sporza.be, cobra.be and cobra.canvas.be + - **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza - **VrtNU**: VrtNU.be - **vrv** - **vrv:series** diff --git a/youtube_dl/version.py b/youtube_dl/version.py index e63527dbb..8df77378b 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2019.05.11' +__version__ = '2019.05.20'