| 
									
										
										
										
											2016-10-02 13:39:18 +02:00
										 |  |  |  | # coding: utf-8 | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  | from __future__ import unicode_literals | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | import re | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  | import json | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | import base64 | 
					
						
							|  |  |  |  | import zlib | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | from hashlib import sha1 | 
					
						
							|  |  |  |  | from math import pow, sqrt, floor | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  | from .common import InfoExtractor | 
					
						
							| 
									
										
										
										
											2014-12-13 12:24:42 +01:00
										 |  |  |  | from ..compat import ( | 
					
						
							| 
									
										
										
										
											2015-10-25 20:04:55 +01:00
										 |  |  |  |     compat_etree_fromstring, | 
					
						
							| 
									
										
										
										
											2016-03-26 01:46:57 +06:00
										 |  |  |  |     compat_urllib_parse_urlencode, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     compat_urllib_request, | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |     compat_urlparse, | 
					
						
							| 
									
										
										
										
											2014-12-13 12:24:42 +01:00
										 |  |  |  | ) | 
					
						
							|  |  |  |  | from ..utils import ( | 
					
						
							|  |  |  |  |     ExtractorError, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     bytes_to_intlist, | 
					
						
							|  |  |  |  |     intlist_to_bytes, | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |     int_or_none, | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |     lowercase_escape, | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |     remove_end, | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |     sanitized_Request, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     unified_strdate, | 
					
						
							| 
									
										
										
										
											2014-08-29 22:32:03 +01:00
										 |  |  |  |     urlencode_postdata, | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |     xpath_text, | 
					
						
							| 
									
										
										
										
											2016-04-29 11:46:42 +01:00
										 |  |  |  |     extract_attributes, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | ) | 
					
						
							|  |  |  |  | from ..aes import ( | 
					
						
							|  |  |  |  |     aes_cbc_decrypt, | 
					
						
							|  |  |  |  | ) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | class CrunchyrollBaseIE(InfoExtractor): | 
					
						
							| 
									
										
										
										
											2016-09-15 21:53:35 +07:00
										 |  |  |  |     _LOGIN_URL = 'https://www.crunchyroll.com/login' | 
					
						
							|  |  |  |  |     _LOGIN_FORM = 'login_form' | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     _NETRC_MACHINE = 'crunchyroll' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _login(self): | 
					
						
							|  |  |  |  |         (username, password) = self._get_login_info() | 
					
						
							|  |  |  |  |         if username is None: | 
					
						
							|  |  |  |  |             return | 
					
						
							| 
									
										
										
										
											2016-09-15 21:53:35 +07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         login_page = self._download_webpage( | 
					
						
							|  |  |  |  |             self._LOGIN_URL, None, 'Downloading login page') | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-19 02:50:06 +07:00
										 |  |  |  |         def is_logged(webpage): | 
					
						
							|  |  |  |  |             return '<title>Redirecting' in webpage | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         # Already logged in | 
					
						
							|  |  |  |  |         if is_logged(login_page): | 
					
						
							|  |  |  |  |             return | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-15 21:53:35 +07:00
										 |  |  |  |         login_form_str = self._search_regex( | 
					
						
							|  |  |  |  |             r'(?P<form><form[^>]+?id=(["\'])%s\2[^>]*>)' % self._LOGIN_FORM, | 
					
						
							|  |  |  |  |             login_page, 'login form', group='form') | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         post_url = extract_attributes(login_form_str).get('action') | 
					
						
							|  |  |  |  |         if not post_url: | 
					
						
							|  |  |  |  |             post_url = self._LOGIN_URL | 
					
						
							|  |  |  |  |         elif not post_url.startswith('http'): | 
					
						
							|  |  |  |  |             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         login_form = self._form_hidden_inputs(self._LOGIN_FORM, login_page) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         login_form.update({ | 
					
						
							|  |  |  |  |             'login_form[name]': username, | 
					
						
							|  |  |  |  |             'login_form[password]': password, | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         }) | 
					
						
							| 
									
										
										
										
											2016-09-15 21:53:35 +07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         response = self._download_webpage( | 
					
						
							|  |  |  |  |             post_url, None, 'Logging in', 'Wrong login info', | 
					
						
							|  |  |  |  |             data=urlencode_postdata(login_form), | 
					
						
							|  |  |  |  |             headers={'Content-Type': 'application/x-www-form-urlencoded'}) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         # Successful login | 
					
						
							| 
									
										
										
										
											2016-09-19 02:50:06 +07:00
										 |  |  |  |         if is_logged(response): | 
					
						
							| 
									
										
										
										
											2016-09-15 21:53:35 +07:00
										 |  |  |  |             return | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         error = self._html_search_regex( | 
					
						
							|  |  |  |  |             '(?s)<ul[^>]+class=["\']messages["\'][^>]*>(.+?)</ul>', | 
					
						
							|  |  |  |  |             response, 'error message', default=None) | 
					
						
							|  |  |  |  |         if error: | 
					
						
							|  |  |  |  |             raise ExtractorError('Unable to login: %s' % error, expected=True) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         raise ExtractorError('Unable to log in') | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _real_initialize(self): | 
					
						
							|  |  |  |  |         self._login() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-03-17 22:55:04 +06:00
										 |  |  |  |     def _download_webpage(self, url_or_request, *args, **kwargs): | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  |         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |                    else sanitized_Request(url_or_request)) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  |         # Accept-Language must be set explicitly to accept any language to avoid issues | 
					
						
							|  |  |  |  |         # similar to https://github.com/rg3/youtube-dl/issues/6797. | 
					
						
							|  |  |  |  |         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction | 
					
						
							|  |  |  |  |         # should be imposed or not (from what I can see it just takes the first language | 
					
						
							|  |  |  |  |         # ignoring the priority and requires it to correspond the IP). By the way this causes | 
					
						
							|  |  |  |  |         # Crunchyroll to not work in georestriction cases in some browsers that don't place | 
					
						
							|  |  |  |  |         # the locale lang first in header. However allowing any language seems to workaround the issue. | 
					
						
							|  |  |  |  |         request.add_header('Accept-Language', '*') | 
					
						
							| 
									
										
										
										
											2016-03-17 22:55:04 +06:00
										 |  |  |  |         return super(CrunchyrollBaseIE, self)._download_webpage(request, *args, **kwargs) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     @staticmethod | 
					
						
							|  |  |  |  |     def _add_skip_wall(url): | 
					
						
							|  |  |  |  |         parsed_url = compat_urlparse.urlparse(url) | 
					
						
							|  |  |  |  |         qs = compat_urlparse.parse_qs(parsed_url.query) | 
					
						
							|  |  |  |  |         # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message: | 
					
						
							|  |  |  |  |         # > This content may be inappropriate for some people. | 
					
						
							|  |  |  |  |         # > Are you sure you want to continue? | 
					
						
							|  |  |  |  |         # since it's not disabled by default in crunchyroll account's settings. | 
					
						
							|  |  |  |  |         # See https://github.com/rg3/youtube-dl/issues/7202. | 
					
						
							|  |  |  |  |         qs['skip_wall'] = ['1'] | 
					
						
							|  |  |  |  |         return compat_urlparse.urlunparse( | 
					
						
							| 
									
										
										
										
											2016-03-26 01:46:57 +06:00
										 |  |  |  |             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True))) | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | class CrunchyrollIE(CrunchyrollBaseIE): | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     _TESTS = [{ | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  |             'id': '645513', | 
					
						
							|  |  |  |  |             'ext': 'flv', | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', | 
					
						
							|  |  |  |  |             'description': 'md5:2d17137920c64f2f49981a7797d275ef', | 
					
						
							|  |  |  |  |             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', | 
					
						
							|  |  |  |  |             'uploader': 'Yomiuri Telecasting Corporation (YTV)', | 
					
						
							|  |  |  |  |             'upload_date': '20131013', | 
					
						
							| 
									
										
										
										
											2014-10-23 23:25:02 +02:00
										 |  |  |  |             'url': 're:(?!.*&)', | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         'params': { | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             # rtmp | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'skip_download': True, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': '589804', | 
					
						
							|  |  |  |  |             'ext': 'flv', | 
					
						
							|  |  |  |  |             'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |             'description': 'md5:2fbc01f90b87e8e9137296f37b461c12', | 
					
						
							| 
									
										
										
										
											2017-01-02 20:08:07 +08:00
										 |  |  |  |             'thumbnail': r're:^https?://.*\.jpg$', | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |             'uploader': 'Danny Choo Network', | 
					
						
							|  |  |  |  |             'upload_date': '20120213', | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'params': { | 
					
						
							|  |  |  |  |             # rtmp | 
					
						
							|  |  |  |  |             'skip_download': True, | 
					
						
							|  |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2016-10-15 14:36:07 +08:00
										 |  |  |  |         'skip': 'Video gone', | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/rezero-starting-life-in-another-world-/episode-5-the-morning-of-our-promise-is-still-distant-702409', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': '702409', | 
					
						
							|  |  |  |  |             'ext': 'mp4', | 
					
						
							|  |  |  |  |             'title': 'Re:ZERO -Starting Life in Another World- Episode 5 – The Morning of Our Promise Is Still Distant', | 
					
						
							|  |  |  |  |             'description': 'md5:97664de1ab24bbf77a9c01918cb7dca9', | 
					
						
							| 
									
										
										
										
											2017-01-02 20:08:07 +08:00
										 |  |  |  |             'thumbnail': r're:^https?://.*\.jpg$', | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  |             'uploader': 'TV TOKYO', | 
					
						
							|  |  |  |  |             'upload_date': '20160508', | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'params': { | 
					
						
							|  |  |  |  |             # m3u8 download | 
					
						
							|  |  |  |  |             'skip_download': True, | 
					
						
							|  |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2017-01-27 23:55:55 +07:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/konosuba-gods-blessing-on-this-wonderful-world/episode-1-give-me-deliverance-from-this-judicial-injustice-727589', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': '727589', | 
					
						
							|  |  |  |  |             'ext': 'mp4', | 
					
						
							|  |  |  |  |             'title': "KONOSUBA -God's blessing on this wonderful world! 2 Episode 1 – Give Me Deliverance from this Judicial Injustice!", | 
					
						
							|  |  |  |  |             'description': 'md5:cbcf05e528124b0f3a0a419fc805ea7d', | 
					
						
							|  |  |  |  |             'thumbnail': r're:^https?://.*\.jpg$', | 
					
						
							|  |  |  |  |             'uploader': 'Kadokawa Pictures Inc.', | 
					
						
							|  |  |  |  |             'upload_date': '20170118', | 
					
						
							|  |  |  |  |             'series': "KONOSUBA -God's blessing on this wonderful world!", | 
					
						
							|  |  |  |  |             'season_number': 2, | 
					
						
							|  |  |  |  |             'episode': 'Give Me Deliverance from this Judicial Injustice!', | 
					
						
							|  |  |  |  |             'episode_number': 1, | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'params': { | 
					
						
							|  |  |  |  |             # m3u8 download | 
					
						
							|  |  |  |  |             'skip_download': True, | 
					
						
							|  |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2015-10-18 07:06:47 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium available | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     }] | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |     _FORMAT_IDS = { | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         '360': ('60', '106'), | 
					
						
							|  |  |  |  |         '480': ('61', '106'), | 
					
						
							|  |  |  |  |         '720': ('62', '106'), | 
					
						
							|  |  |  |  |         '1080': ('80', '108'), | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _decrypt_subtitles(self, data, iv, id): | 
					
						
							| 
									
										
										
										
											2015-05-30 14:36:45 +06:00
										 |  |  |  |         data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) | 
					
						
							|  |  |  |  |         iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         id = int(id) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def obfuscate_key_aux(count, modulo, start): | 
					
						
							|  |  |  |  |             output = list(start) | 
					
						
							|  |  |  |  |             for _ in range(count): | 
					
						
							|  |  |  |  |                 output.append(output[-1] + output[-2]) | 
					
						
							|  |  |  |  |             # cut off start values | 
					
						
							|  |  |  |  |             output = output[2:] | 
					
						
							|  |  |  |  |             output = list(map(lambda x: x % modulo + 33, output)) | 
					
						
							|  |  |  |  |             return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def obfuscate_key(key): | 
					
						
							|  |  |  |  |             num1 = int(floor(pow(2, 25) * sqrt(6.9))) | 
					
						
							|  |  |  |  |             num2 = (num1 ^ key) << 5 | 
					
						
							|  |  |  |  |             num3 = key ^ num1 | 
					
						
							|  |  |  |  |             num4 = num3 ^ (num3 >> 3) ^ num2 | 
					
						
							|  |  |  |  |             prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest()) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             # Extend 160 Bit hash to 256 Bit | 
					
						
							|  |  |  |  |             return shaHash + [0] * 12 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         key = obfuscate_key(id) | 
					
						
							| 
									
										
										
										
											2014-11-23 20:41:03 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) | 
					
						
							|  |  |  |  |         return zlib.decompress(decrypted_data) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  |     def _convert_subtitles_to_srt(self, sub_root): | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         output = '' | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         for i, event in enumerate(sub_root.findall('./events/event'), 1): | 
					
						
							|  |  |  |  |             start = event.attrib['start'].replace('.', ',') | 
					
						
							|  |  |  |  |             end = event.attrib['end'].replace('.', ',') | 
					
						
							|  |  |  |  |             text = event.attrib['text'].replace('\\N', '\n') | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  |     def _convert_subtitles_to_ass(self, sub_root): | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  |         output = '' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def ass_bool(strvalue): | 
					
						
							|  |  |  |  |             assvalue = '0' | 
					
						
							|  |  |  |  |             if strvalue == '1': | 
					
						
							|  |  |  |  |                 assvalue = '-1' | 
					
						
							|  |  |  |  |             return assvalue | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         output = '[Script Info]\n' | 
					
						
							| 
									
										
										
										
											2016-02-14 15:37:17 +06:00
										 |  |  |  |         output += 'Title: %s\n' % sub_root.attrib['title'] | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  |         output += 'ScriptType: v4.00+\n' | 
					
						
							| 
									
										
										
										
											2016-02-14 15:37:17 +06:00
										 |  |  |  |         output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style'] | 
					
						
							|  |  |  |  |         output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x'] | 
					
						
							|  |  |  |  |         output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y'] | 
					
						
							| 
									
										
										
										
											2016-03-29 14:26:24 -03:00
										 |  |  |  |         output += """ScaledBorderAndShadow: no
 | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | [V4+ Styles] | 
					
						
							|  |  |  |  | Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding | 
					
						
							|  |  |  |  | """
 | 
					
						
							|  |  |  |  |         for style in sub_root.findall('./styles/style'): | 
					
						
							| 
									
										
										
										
											2016-02-14 15:37:17 +06:00
										 |  |  |  |             output += 'Style: ' + style.attrib['name'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['font_name'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['font_size'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['primary_colour'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['secondary_colour'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['outline_colour'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['back_colour'] | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib['bold']) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib['italic']) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib['underline']) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib['strikeout']) | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['scale_x'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['scale_y'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['spacing'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['angle'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['border_style'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['outline'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['shadow'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['alignment'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['margin_l'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['margin_r'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['margin_v'] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib['encoding'] | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  |             output += '\n' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         output += """
 | 
					
						
							|  |  |  |  | [Events] | 
					
						
							|  |  |  |  | Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | 
					
						
							|  |  |  |  | """
 | 
					
						
							|  |  |  |  |         for event in sub_root.findall('./events/event'): | 
					
						
							|  |  |  |  |             output += 'Dialogue: 0' | 
					
						
							| 
									
										
										
										
											2016-02-14 15:37:17 +06:00
										 |  |  |  |             output += ',' + event.attrib['start'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['end'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['style'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['name'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['margin_l'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['margin_r'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['margin_v'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['effect'] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib['text'] | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  |             output += '\n' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |     def _extract_subtitles(self, subtitle): | 
					
						
							| 
									
										
										
										
											2015-10-25 20:04:55 +01:00
										 |  |  |  |         sub_root = compat_etree_fromstring(subtitle) | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |         return [{ | 
					
						
							|  |  |  |  |             'ext': 'srt', | 
					
						
							|  |  |  |  |             'data': self._convert_subtitles_to_srt(sub_root), | 
					
						
							|  |  |  |  |         }, { | 
					
						
							|  |  |  |  |             'ext': 'ass', | 
					
						
							|  |  |  |  |             'data': self._convert_subtitles_to_ass(sub_root), | 
					
						
							|  |  |  |  |         }] | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |     def _get_subtitles(self, video_id, webpage): | 
					
						
							|  |  |  |  |         subtitles = {} | 
					
						
							| 
									
										
										
										
											2015-10-22 20:34:11 +06:00
										 |  |  |  |         for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage): | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |             sub_page = self._download_webpage( | 
					
						
							|  |  |  |  |                 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, | 
					
						
							|  |  |  |  |                 video_id, note='Downloading subtitles for ' + sub_name) | 
					
						
							|  |  |  |  |             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) | 
					
						
							|  |  |  |  |             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) | 
					
						
							|  |  |  |  |             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) | 
					
						
							|  |  |  |  |             if not id or not iv or not data: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  |             subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8') | 
					
						
							|  |  |  |  |             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) | 
					
						
							|  |  |  |  |             if not lang_code: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |             subtitles[lang_code] = self._extract_subtitles(subtitle) | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |         return subtitles | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-11-23 20:41:03 +01:00
										 |  |  |  |     def _real_extract(self, url): | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         mobj = re.match(self._VALID_URL, url) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         video_id = mobj.group('video_id') | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         if mobj.group('prefix') == 'm': | 
					
						
							|  |  |  |  |             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage') | 
					
						
							|  |  |  |  |             webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url') | 
					
						
							|  |  |  |  |         else: | 
					
						
							|  |  |  |  |             webpage_url = 'http://www.' + mobj.group('url') | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage') | 
					
						
							| 
									
										
										
										
											2015-08-26 20:47:57 +06:00
										 |  |  |  |         note_m = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'<div class="showmedia-trailer-notice">(.+?)</div>', | 
					
						
							|  |  |  |  |             webpage, 'trailer-notice', default='') | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         if note_m: | 
					
						
							|  |  |  |  |             raise ExtractorError(note_m) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  |         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage) | 
					
						
							|  |  |  |  |         if mobj: | 
					
						
							|  |  |  |  |             msg = json.loads(mobj.group('msg')) | 
					
						
							|  |  |  |  |             if msg.get('type') == 'error': | 
					
						
							|  |  |  |  |                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) | 
					
						
							| 
									
										
										
										
											2015-08-26 20:47:57 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         if 'To view this, please log in to verify you are 18 or older.' in webpage: | 
					
						
							| 
									
										
										
										
											2015-08-26 21:27:57 +06:00
										 |  |  |  |             self.raise_login_required() | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-07 19:25:59 +06:00
										 |  |  |  |         video_title = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>', | 
					
						
							|  |  |  |  |             webpage, 'video_title') | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         video_title = re.sub(r' {2,}', ' ', video_title) | 
					
						
							| 
									
										
										
										
											2015-11-07 19:29:09 +06:00
										 |  |  |  |         video_description = self._html_search_regex( | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |             r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id, | 
					
						
							|  |  |  |  |             webpage, 'description', default=None) | 
					
						
							|  |  |  |  |         if video_description: | 
					
						
							|  |  |  |  |             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:37:53 +06:00
										 |  |  |  |         video_upload_date = self._html_search_regex( | 
					
						
							|  |  |  |  |             [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'], | 
					
						
							|  |  |  |  |             webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         if video_upload_date: | 
					
						
							|  |  |  |  |             video_upload_date = unified_strdate(video_upload_date) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:37:53 +06:00
										 |  |  |  |         video_uploader = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage, | 
					
						
							|  |  |  |  |             'video_uploader', fatal=False) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-29 11:46:42 +01:00
										 |  |  |  |         available_fmts = [] | 
					
						
							| 
									
										
										
										
											2016-04-29 19:39:27 +06:00
										 |  |  |  |         for a, fmt in re.findall(r'(<a[^>]+token=["\']showmedia\.([0-9]{3,4})p["\'][^>]+>)', webpage): | 
					
						
							| 
									
										
										
										
											2016-04-29 11:46:42 +01:00
										 |  |  |  |             attrs = extract_attributes(a) | 
					
						
							|  |  |  |  |             href = attrs.get('href') | 
					
						
							|  |  |  |  |             if href and '/freetrial' in href: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  |             available_fmts.append(fmt) | 
					
						
							|  |  |  |  |         if not available_fmts: | 
					
						
							| 
									
										
										
										
											2016-04-29 19:43:53 +06:00
										 |  |  |  |             for p in (r'token=["\']showmedia\.([0-9]{3,4})p"', r'showmedia\.([0-9]{3,4})p'): | 
					
						
							|  |  |  |  |                 available_fmts = re.findall(p, webpage) | 
					
						
							|  |  |  |  |                 if available_fmts: | 
					
						
							|  |  |  |  |                     break | 
					
						
							| 
									
										
										
										
											2016-04-28 18:42:20 +01:00
										 |  |  |  |         video_encode_ids = [] | 
					
						
							| 
									
										
										
										
											2016-04-29 11:46:42 +01:00
										 |  |  |  |         formats = [] | 
					
						
							|  |  |  |  |         for fmt in available_fmts: | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             stream_quality, stream_format = self._FORMAT_IDS[fmt] | 
					
						
							| 
									
										
										
										
											2014-11-23 21:23:05 +01:00
										 |  |  |  |             video_format = fmt + 'p' | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |             streamdata_req = sanitized_Request( | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' | 
					
						
							| 
									
										
										
										
											2016-04-28 18:42:20 +01:00
										 |  |  |  |                 % (video_id, stream_format, stream_quality), | 
					
						
							| 
									
										
										
										
											2016-03-26 01:46:57 +06:00
										 |  |  |  |                 compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8')) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | 
					
						
							| 
									
										
										
										
											2014-10-23 23:25:02 +02:00
										 |  |  |  |             streamdata = self._download_xml( | 
					
						
							|  |  |  |  |                 streamdata_req, video_id, | 
					
						
							|  |  |  |  |                 note='Downloading media info for %s' % video_format) | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |             stream_info = streamdata.find('./{default}preload/stream_info') | 
					
						
							| 
									
										
										
										
											2016-04-28 18:42:20 +01:00
										 |  |  |  |             video_encode_id = xpath_text(stream_info, './video_encode_id') | 
					
						
							|  |  |  |  |             if video_encode_id in video_encode_ids: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  |             video_encode_ids.append(video_encode_id) | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |             video_file = xpath_text(stream_info, './file') | 
					
						
							|  |  |  |  |             if not video_file: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  |             if video_file.startswith('http'): | 
					
						
							|  |  |  |  |                 formats.extend(self._extract_m3u8_formats( | 
					
						
							|  |  |  |  |                     video_file, video_id, 'mp4', entry_protocol='m3u8_native', | 
					
						
							|  |  |  |  |                     m3u8_id='hls', fatal=False)) | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-01-07 07:26:14 +02:00
										 |  |  |  |             video_url = xpath_text(stream_info, './host') | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  |             if not video_url: | 
					
						
							| 
									
										
										
										
											2016-01-07 07:26:14 +02:00
										 |  |  |  |                 continue | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             metadata = stream_info.find('./metadata') | 
					
						
							|  |  |  |  |             format_info = { | 
					
						
							|  |  |  |  |                 'format': video_format, | 
					
						
							|  |  |  |  |                 'format_id': video_format, | 
					
						
							|  |  |  |  |                 'height': int_or_none(xpath_text(metadata, './height')), | 
					
						
							|  |  |  |  |                 'width': int_or_none(xpath_text(metadata, './width')), | 
					
						
							|  |  |  |  |             } | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |             if '.fplive.net/' in video_url: | 
					
						
							|  |  |  |  |                 video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip()) | 
					
						
							|  |  |  |  |                 parsed_video_url = compat_urlparse.urlparse(video_url) | 
					
						
							|  |  |  |  |                 direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace( | 
					
						
							|  |  |  |  |                     netloc='v.lvlt.crcdn.net', | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  |                     path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_file.split(':')[-1]))) | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |                 if self._is_valid_url(direct_video_url, video_id, video_format): | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |                     format_info.update({ | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |                         'url': direct_video_url, | 
					
						
							|  |  |  |  |                     }) | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |                     formats.append(format_info) | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |                     continue | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             format_info.update({ | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |                 'url': video_url, | 
					
						
							| 
									
										
										
										
											2016-08-12 00:56:16 +07:00
										 |  |  |  |                 'play_path': video_file, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |                 'ext': 'flv', | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             }) | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             formats.append(format_info) | 
					
						
							| 
									
										
										
										
											2016-04-29 19:44:10 +06:00
										 |  |  |  |         self._sort_formats(formats) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-28 18:42:20 +01:00
										 |  |  |  |         metadata = self._download_xml( | 
					
						
							|  |  |  |  |             'http://www.crunchyroll.com/xml', video_id, | 
					
						
							|  |  |  |  |             note='Downloading media info', query={ | 
					
						
							|  |  |  |  |                 'req': 'RpcApiVideoPlayer_GetMediaMetadata', | 
					
						
							|  |  |  |  |                 'media_id': video_id, | 
					
						
							|  |  |  |  |             }) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |         subtitles = self.extract_subtitles(video_id, webpage) | 
					
						
							| 
									
										
										
										
											2014-09-25 17:57:38 +03:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-27 23:55:55 +07:00
										 |  |  |  |         # webpage provide more accurate data than series_title from XML | 
					
						
							|  |  |  |  |         series = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'id=["\']showmedia_about_episode_num[^>]+>\s*<a[^>]+>([^<]+)', | 
					
						
							|  |  |  |  |             webpage, 'series', default=xpath_text(metadata, 'series_title')) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         episode = xpath_text(metadata, 'episode_title') | 
					
						
							|  |  |  |  |         episode_number = int_or_none(xpath_text(metadata, 'episode_number')) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         season_number = int_or_none(self._search_regex( | 
					
						
							|  |  |  |  |             r'(?s)<h4[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h4>\s*<h4>\s*Season (\d+)', | 
					
						
							|  |  |  |  |             webpage, 'season number', default=None)) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         return { | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'id': video_id, | 
					
						
							|  |  |  |  |             'title': video_title, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'description': video_description, | 
					
						
							| 
									
										
										
										
											2016-04-28 18:42:20 +01:00
										 |  |  |  |             'thumbnail': xpath_text(metadata, 'episode_image_url'), | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'uploader': video_uploader, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'upload_date': video_upload_date, | 
					
						
							| 
									
										
										
										
											2017-01-27 23:55:55 +07:00
										 |  |  |  |             'series': series, | 
					
						
							|  |  |  |  |             'season_number': season_number, | 
					
						
							|  |  |  |  |             'episode': episode, | 
					
						
							|  |  |  |  |             'episode_number': episode_number, | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'subtitles': subtitles, | 
					
						
							|  |  |  |  |             'formats': formats, | 
					
						
							| 
									
										
										
										
											2014-02-25 20:51:51 +01:00
										 |  |  |  |         } | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): | 
					
						
							| 
									
										
										
										
											2016-02-14 15:37:17 +06:00
										 |  |  |  |     IE_NAME = 'crunchyroll:playlist' | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)' | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |     _TESTS = [{ | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', | 
					
						
							|  |  |  |  |             'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         'playlist_count': 13, | 
					
						
							| 
									
										
										
										
											2015-10-18 07:06:47 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium available | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/cosplay-complex-ova', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': 'cosplay-complex-ova', | 
					
						
							|  |  |  |  |             'title': 'Cosplay Complex OVA' | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'playlist_count': 3, | 
					
						
							|  |  |  |  |         'skip': 'Georestricted', | 
					
						
							|  |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14 | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |     }] | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _real_extract(self, url): | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         show_id = self._match_id(url) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         webpage = self._download_webpage(self._add_skip_wall(url), show_id) | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         title = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>', | 
					
						
							|  |  |  |  |             webpage, 'title') | 
					
						
							|  |  |  |  |         episode_paths = re.findall( | 
					
						
							|  |  |  |  |             r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"', | 
					
						
							|  |  |  |  |             webpage) | 
					
						
							|  |  |  |  |         entries = [ | 
					
						
							|  |  |  |  |             self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll') | 
					
						
							|  |  |  |  |             for ep in episode_paths | 
					
						
							|  |  |  |  |         ] | 
					
						
							|  |  |  |  |         entries.reverse() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |         return { | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |             '_type': 'playlist', | 
					
						
							|  |  |  |  |             'id': show_id, | 
					
						
							|  |  |  |  |             'title': title, | 
					
						
							|  |  |  |  |             'entries': entries, | 
					
						
							|  |  |  |  |         } |