| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | # encoding: utf-8 | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  | from __future__ import unicode_literals | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | import re | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  | import json | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | import base64 | 
					
						
							|  |  |  |  | import zlib | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | from hashlib import sha1 | 
					
						
							|  |  |  |  | from math import pow, sqrt, floor | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  | from .common import InfoExtractor | 
					
						
							| 
									
										
										
										
											2014-12-13 12:24:42 +01:00
										 |  |  |  | from ..compat import ( | 
					
						
							| 
									
										
										
										
											2015-10-25 20:04:55 +01:00
										 |  |  |  |     compat_etree_fromstring, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     compat_urllib_parse, | 
					
						
							| 
									
										
										
										
											2015-07-17 23:36:58 +06:00
										 |  |  |  |     compat_urllib_parse_unquote, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     compat_urllib_request, | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |     compat_urlparse, | 
					
						
							| 
									
										
										
										
											2014-12-13 12:24:42 +01:00
										 |  |  |  | ) | 
					
						
							|  |  |  |  | from ..utils import ( | 
					
						
							|  |  |  |  |     ExtractorError, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     bytes_to_intlist, | 
					
						
							|  |  |  |  |     intlist_to_bytes, | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |     int_or_none, | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |     lowercase_escape, | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |     remove_end, | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |     sanitized_Request, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     unified_strdate, | 
					
						
							| 
									
										
										
										
											2014-08-29 22:32:03 +01:00
										 |  |  |  |     urlencode_postdata, | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |     xpath_text, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | ) | 
					
						
							|  |  |  |  | from ..aes import ( | 
					
						
							|  |  |  |  |     aes_cbc_decrypt, | 
					
						
							|  |  |  |  | ) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | class CrunchyrollBaseIE(InfoExtractor): | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     _NETRC_MACHINE = 'crunchyroll' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _login(self): | 
					
						
							|  |  |  |  |         (username, password) = self._get_login_info() | 
					
						
							|  |  |  |  |         if username is None: | 
					
						
							|  |  |  |  |             return | 
					
						
							|  |  |  |  |         self.report_login() | 
					
						
							|  |  |  |  |         login_url = 'https://www.crunchyroll.com/?a=formhandler' | 
					
						
							|  |  |  |  |         data = urlencode_postdata({ | 
					
						
							|  |  |  |  |             'formname': 'RpcApiUser_Login', | 
					
						
							|  |  |  |  |             'name': username, | 
					
						
							|  |  |  |  |             'password': password, | 
					
						
							|  |  |  |  |         }) | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |         login_request = sanitized_Request(login_url, data) | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') | 
					
						
							|  |  |  |  |         self._download_webpage(login_request, None, False, 'Wrong login info') | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _real_initialize(self): | 
					
						
							|  |  |  |  |         self._login() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  |     def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None): | 
					
						
							|  |  |  |  |         request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |                    else sanitized_Request(url_or_request)) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  |         # Accept-Language must be set explicitly to accept any language to avoid issues | 
					
						
							|  |  |  |  |         # similar to https://github.com/rg3/youtube-dl/issues/6797. | 
					
						
							|  |  |  |  |         # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction | 
					
						
							|  |  |  |  |         # should be imposed or not (from what I can see it just takes the first language | 
					
						
							|  |  |  |  |         # ignoring the priority and requires it to correspond the IP). By the way this causes | 
					
						
							|  |  |  |  |         # Crunchyroll to not work in georestriction cases in some browsers that don't place | 
					
						
							|  |  |  |  |         # the locale lang first in header. However allowing any language seems to workaround the issue. | 
					
						
							|  |  |  |  |         request.add_header('Accept-Language', '*') | 
					
						
							|  |  |  |  |         return super(CrunchyrollBaseIE, self)._download_webpage( | 
					
						
							|  |  |  |  |             request, video_id, note, errnote, fatal, tries, timeout, encoding) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     @staticmethod | 
					
						
							|  |  |  |  |     def _add_skip_wall(url): | 
					
						
							|  |  |  |  |         parsed_url = compat_urlparse.urlparse(url) | 
					
						
							|  |  |  |  |         qs = compat_urlparse.parse_qs(parsed_url.query) | 
					
						
							|  |  |  |  |         # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message: | 
					
						
							|  |  |  |  |         # > This content may be inappropriate for some people. | 
					
						
							|  |  |  |  |         # > Are you sure you want to continue? | 
					
						
							|  |  |  |  |         # since it's not disabled by default in crunchyroll account's settings. | 
					
						
							|  |  |  |  |         # See https://github.com/rg3/youtube-dl/issues/7202. | 
					
						
							|  |  |  |  |         qs['skip_wall'] = ['1'] | 
					
						
							|  |  |  |  |         return compat_urlparse.urlunparse( | 
					
						
							|  |  |  |  |             parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | class CrunchyrollIE(CrunchyrollBaseIE): | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     _TESTS = [{ | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  |             'id': '645513', | 
					
						
							|  |  |  |  |             'ext': 'flv', | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!', | 
					
						
							|  |  |  |  |             'description': 'md5:2d17137920c64f2f49981a7797d275ef', | 
					
						
							|  |  |  |  |             'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg', | 
					
						
							|  |  |  |  |             'uploader': 'Yomiuri Telecasting Corporation (YTV)', | 
					
						
							|  |  |  |  |             'upload_date': '20131013', | 
					
						
							| 
									
										
										
										
											2014-10-23 23:25:02 +02:00
										 |  |  |  |             'url': 're:(?!.*&)', | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         'params': { | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             # rtmp | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'skip_download': True, | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/media-589804/culture-japan-1', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': '589804', | 
					
						
							|  |  |  |  |             'ext': 'flv', | 
					
						
							|  |  |  |  |             'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |             'description': 'md5:2fbc01f90b87e8e9137296f37b461c12', | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |             'thumbnail': 're:^https?://.*\.jpg$', | 
					
						
							|  |  |  |  |             'uploader': 'Danny Choo Network', | 
					
						
							|  |  |  |  |             'upload_date': '20120213', | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'params': { | 
					
						
							|  |  |  |  |             # rtmp | 
					
						
							|  |  |  |  |             'skip_download': True, | 
					
						
							|  |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2015-10-18 07:06:47 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium available | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2014-12-22 00:58:15 +06:00
										 |  |  |  |     }] | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |     _FORMAT_IDS = { | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         '360': ('60', '106'), | 
					
						
							|  |  |  |  |         '480': ('61', '106'), | 
					
						
							|  |  |  |  |         '720': ('62', '106'), | 
					
						
							|  |  |  |  |         '1080': ('80', '108'), | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |     } | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _decrypt_subtitles(self, data, iv, id): | 
					
						
							| 
									
										
										
										
											2015-05-30 14:36:45 +06:00
										 |  |  |  |         data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) | 
					
						
							|  |  |  |  |         iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         id = int(id) | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def obfuscate_key_aux(count, modulo, start): | 
					
						
							|  |  |  |  |             output = list(start) | 
					
						
							|  |  |  |  |             for _ in range(count): | 
					
						
							|  |  |  |  |                 output.append(output[-1] + output[-2]) | 
					
						
							|  |  |  |  |             # cut off start values | 
					
						
							|  |  |  |  |             output = output[2:] | 
					
						
							|  |  |  |  |             output = list(map(lambda x: x % modulo + 33, output)) | 
					
						
							|  |  |  |  |             return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def obfuscate_key(key): | 
					
						
							|  |  |  |  |             num1 = int(floor(pow(2, 25) * sqrt(6.9))) | 
					
						
							|  |  |  |  |             num2 = (num1 ^ key) << 5 | 
					
						
							|  |  |  |  |             num3 = key ^ num1 | 
					
						
							|  |  |  |  |             num4 = num3 ^ (num3 >> 3) ^ num2 | 
					
						
							|  |  |  |  |             prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2))) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest()) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             # Extend 160 Bit hash to 256 Bit | 
					
						
							|  |  |  |  |             return shaHash + [0] * 12 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         key = obfuscate_key(id) | 
					
						
							| 
									
										
										
										
											2014-11-23 20:41:03 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv)) | 
					
						
							|  |  |  |  |         return zlib.decompress(decrypted_data) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  |     def _convert_subtitles_to_srt(self, sub_root): | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         output = '' | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         for i, event in enumerate(sub_root.findall('./events/event'), 1): | 
					
						
							|  |  |  |  |             start = event.attrib['start'].replace('.', ',') | 
					
						
							|  |  |  |  |             end = event.attrib['end'].replace('.', ',') | 
					
						
							|  |  |  |  |             text = event.attrib['text'].replace('\\N', '\n') | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-29 21:19:20 +01:00
										 |  |  |  |     def _convert_subtitles_to_ass(self, sub_root): | 
					
						
							| 
									
										
										
										
											2014-08-30 12:48:56 +01:00
										 |  |  |  |         output = '' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         def ass_bool(strvalue): | 
					
						
							|  |  |  |  |             assvalue = '0' | 
					
						
							|  |  |  |  |             if strvalue == '1': | 
					
						
							|  |  |  |  |                 assvalue = '-1' | 
					
						
							|  |  |  |  |             return assvalue | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         output = '[Script Info]\n' | 
					
						
							|  |  |  |  |         output += 'Title: %s\n' % sub_root.attrib["title"] | 
					
						
							|  |  |  |  |         output += 'ScriptType: v4.00+\n' | 
					
						
							|  |  |  |  |         output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"] | 
					
						
							|  |  |  |  |         output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"] | 
					
						
							|  |  |  |  |         output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"] | 
					
						
							|  |  |  |  |         output += """ScaledBorderAndShadow: yes
 | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  | [V4+ Styles] | 
					
						
							|  |  |  |  | Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding | 
					
						
							|  |  |  |  | """
 | 
					
						
							|  |  |  |  |         for style in sub_root.findall('./styles/style'): | 
					
						
							|  |  |  |  |             output += 'Style: ' + style.attrib["name"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["font_name"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["font_size"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["primary_colour"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["secondary_colour"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["outline_colour"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["back_colour"] | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib["bold"]) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib["italic"]) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib["underline"]) | 
					
						
							|  |  |  |  |             output += ',' + ass_bool(style.attrib["strikeout"]) | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["scale_x"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["scale_y"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["spacing"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["angle"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["border_style"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["outline"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["shadow"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["alignment"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["margin_l"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["margin_r"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["margin_v"] | 
					
						
							|  |  |  |  |             output += ',' + style.attrib["encoding"] | 
					
						
							|  |  |  |  |             output += '\n' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         output += """
 | 
					
						
							|  |  |  |  | [Events] | 
					
						
							|  |  |  |  | Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | 
					
						
							|  |  |  |  | """
 | 
					
						
							|  |  |  |  |         for event in sub_root.findall('./events/event'): | 
					
						
							|  |  |  |  |             output += 'Dialogue: 0' | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["start"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["end"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["style"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["name"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["margin_l"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["margin_r"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["margin_v"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["effect"] | 
					
						
							|  |  |  |  |             output += ',' + event.attrib["text"] | 
					
						
							|  |  |  |  |             output += '\n' | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         return output | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |     def _extract_subtitles(self, subtitle): | 
					
						
							| 
									
										
										
										
											2015-10-25 20:04:55 +01:00
										 |  |  |  |         sub_root = compat_etree_fromstring(subtitle) | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |         return [{ | 
					
						
							|  |  |  |  |             'ext': 'srt', | 
					
						
							|  |  |  |  |             'data': self._convert_subtitles_to_srt(sub_root), | 
					
						
							|  |  |  |  |         }, { | 
					
						
							|  |  |  |  |             'ext': 'ass', | 
					
						
							|  |  |  |  |             'data': self._convert_subtitles_to_ass(sub_root), | 
					
						
							|  |  |  |  |         }] | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |     def _get_subtitles(self, video_id, webpage): | 
					
						
							|  |  |  |  |         subtitles = {} | 
					
						
							| 
									
										
										
										
											2015-10-22 20:34:11 +06:00
										 |  |  |  |         for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage): | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |             sub_page = self._download_webpage( | 
					
						
							|  |  |  |  |                 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, | 
					
						
							|  |  |  |  |                 video_id, note='Downloading subtitles for ' + sub_name) | 
					
						
							|  |  |  |  |             id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) | 
					
						
							|  |  |  |  |             iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) | 
					
						
							|  |  |  |  |             data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) | 
					
						
							|  |  |  |  |             if not id or not iv or not data: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							|  |  |  |  |             subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8') | 
					
						
							|  |  |  |  |             lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) | 
					
						
							|  |  |  |  |             if not lang_code: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							| 
									
										
										
										
											2015-05-30 14:12:58 +06:00
										 |  |  |  |             subtitles[lang_code] = self._extract_subtitles(subtitle) | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |         return subtitles | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-11-23 20:41:03 +01:00
										 |  |  |  |     def _real_extract(self, url): | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         mobj = re.match(self._VALID_URL, url) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         video_id = mobj.group('video_id') | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |         if mobj.group('prefix') == 'm': | 
					
						
							|  |  |  |  |             mobile_webpage = self._download_webpage(url, video_id, 'Downloading mobile webpage') | 
					
						
							|  |  |  |  |             webpage_url = self._search_regex(r'<link rel="canonical" href="([^"]+)" />', mobile_webpage, 'webpage_url') | 
					
						
							|  |  |  |  |         else: | 
					
						
							|  |  |  |  |             webpage_url = 'http://www.' + mobj.group('url') | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage') | 
					
						
							| 
									
										
										
										
											2015-08-26 20:47:57 +06:00
										 |  |  |  |         note_m = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'<div class="showmedia-trailer-notice">(.+?)</div>', | 
					
						
							|  |  |  |  |             webpage, 'trailer-notice', default='') | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         if note_m: | 
					
						
							|  |  |  |  |             raise ExtractorError(note_m) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  |         mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage) | 
					
						
							|  |  |  |  |         if mobj: | 
					
						
							|  |  |  |  |             msg = json.loads(mobj.group('msg')) | 
					
						
							|  |  |  |  |             if msg.get('type') == 'error': | 
					
						
							|  |  |  |  |                 raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) | 
					
						
							| 
									
										
										
										
											2015-08-26 20:47:57 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         if 'To view this, please log in to verify you are 18 or older.' in webpage: | 
					
						
							| 
									
										
										
										
											2015-08-26 21:27:57 +06:00
										 |  |  |  |             self.raise_login_required() | 
					
						
							| 
									
										
										
										
											2014-02-25 20:29:16 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-07 19:25:59 +06:00
										 |  |  |  |         video_title = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>', | 
					
						
							|  |  |  |  |             webpage, 'video_title') | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         video_title = re.sub(r' {2,}', ' ', video_title) | 
					
						
							| 
									
										
										
										
											2015-11-07 19:29:09 +06:00
										 |  |  |  |         video_description = self._html_search_regex( | 
					
						
							| 
									
										
										
										
											2015-11-07 20:02:39 +06:00
										 |  |  |  |             r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id, | 
					
						
							|  |  |  |  |             webpage, 'description', default=None) | 
					
						
							|  |  |  |  |         if video_description: | 
					
						
							|  |  |  |  |             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:37:53 +06:00
										 |  |  |  |         video_upload_date = self._html_search_regex( | 
					
						
							|  |  |  |  |             [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'], | 
					
						
							|  |  |  |  |             webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         if video_upload_date: | 
					
						
							|  |  |  |  |             video_upload_date = unified_strdate(video_upload_date) | 
					
						
							| 
									
										
										
										
											2015-09-08 14:37:53 +06:00
										 |  |  |  |         video_uploader = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage, | 
					
						
							|  |  |  |  |             'video_uploader', fatal=False) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-07-17 23:36:58 +06:00
										 |  |  |  |         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |         playerdata_req = sanitized_Request(playerdata_url) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) | 
					
						
							|  |  |  |  |         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | 
					
						
							|  |  |  |  |         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') | 
					
						
							| 
									
										
										
										
											2014-02-25 20:26:11 +07:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |         stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id') | 
					
						
							|  |  |  |  |         video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |         formats = [] | 
					
						
							| 
									
										
										
										
											2015-01-02 21:17:10 +01:00
										 |  |  |  |         for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage): | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             stream_quality, stream_format = self._FORMAT_IDS[fmt] | 
					
						
							| 
									
										
										
										
											2014-11-23 21:23:05 +01:00
										 |  |  |  |             video_format = fmt + 'p' | 
					
						
							| 
									
										
										
										
											2015-11-21 22:18:17 +06:00
										 |  |  |  |             streamdata_req = sanitized_Request( | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' | 
					
						
							|  |  |  |  |                 % (stream_id, stream_format, stream_quality), | 
					
						
							|  |  |  |  |                 compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | 
					
						
							| 
									
										
										
										
											2014-10-23 23:25:02 +02:00
										 |  |  |  |             streamdata = self._download_xml( | 
					
						
							|  |  |  |  |                 streamdata_req, video_id, | 
					
						
							|  |  |  |  |                 note='Downloading media info for %s' % video_format) | 
					
						
							| 
									
										
										
										
											2015-07-05 06:29:36 +06:00
										 |  |  |  |             stream_info = streamdata.find('./{default}preload/stream_info') | 
					
						
							| 
									
										
										
										
											2016-01-07 07:26:14 +02:00
										 |  |  |  |             video_url = xpath_text(stream_info, './host') | 
					
						
							|  |  |  |  |             video_play_path = xpath_text(stream_info, './file') | 
					
						
							|  |  |  |  |             if not video_url or not video_play_path: | 
					
						
							|  |  |  |  |                 continue | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             metadata = stream_info.find('./metadata') | 
					
						
							|  |  |  |  |             format_info = { | 
					
						
							|  |  |  |  |                 'format': video_format, | 
					
						
							|  |  |  |  |                 'format_id': video_format, | 
					
						
							|  |  |  |  |                 'height': int_or_none(xpath_text(metadata, './height')), | 
					
						
							|  |  |  |  |                 'width': int_or_none(xpath_text(metadata, './width')), | 
					
						
							|  |  |  |  |             } | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |             if '.fplive.net/' in video_url: | 
					
						
							|  |  |  |  |                 video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip()) | 
					
						
							|  |  |  |  |                 parsed_video_url = compat_urlparse.urlparse(video_url) | 
					
						
							|  |  |  |  |                 direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace( | 
					
						
							|  |  |  |  |                     netloc='v.lvlt.crcdn.net', | 
					
						
							|  |  |  |  |                     path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_play_path.split(':')[-1]))) | 
					
						
							|  |  |  |  |                 if self._is_valid_url(direct_video_url, video_id, video_format): | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |                     format_info.update({ | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |                         'url': direct_video_url, | 
					
						
							|  |  |  |  |                     }) | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |                     formats.append(format_info) | 
					
						
							| 
									
										
										
										
											2015-08-18 23:02:57 +06:00
										 |  |  |  |                     continue | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             format_info.update({ | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |                 'url': video_url, | 
					
						
							| 
									
										
										
										
											2014-10-23 23:25:02 +02:00
										 |  |  |  |                 'play_path': video_play_path, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |                 'ext': 'flv', | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |             }) | 
					
						
							| 
									
										
										
										
											2015-09-03 13:15:02 +02:00
										 |  |  |  |             formats.append(format_info) | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-02-15 18:21:42 +01:00
										 |  |  |  |         subtitles = self.extract_subtitles(video_id, webpage) | 
					
						
							| 
									
										
										
										
											2014-09-25 17:57:38 +03:00
										 |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-04 03:08:17 +01:00
										 |  |  |  |         return { | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'id': video_id, | 
					
						
							|  |  |  |  |             'title': video_title, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'description': video_description, | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'thumbnail': video_thumbnail, | 
					
						
							|  |  |  |  |             'uploader': video_uploader, | 
					
						
							| 
									
										
										
										
											2014-01-30 05:23:44 +07:00
										 |  |  |  |             'upload_date': video_upload_date, | 
					
						
							| 
									
										
										
										
											2014-11-23 21:20:46 +01:00
										 |  |  |  |             'subtitles': subtitles, | 
					
						
							|  |  |  |  |             'formats': formats, | 
					
						
							| 
									
										
										
										
											2014-02-25 20:51:51 +01:00
										 |  |  |  |         } | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-09-08 14:11:20 +06:00
										 |  |  |  | class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |     IE_NAME = "crunchyroll:playlist" | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)' | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  | 
 | 
					
						
							|  |  |  |  |     _TESTS = [{ | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', | 
					
						
							|  |  |  |  |             'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |         }, | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         'playlist_count': 13, | 
					
						
							| 
									
										
										
										
											2015-10-18 07:06:47 +06:00
										 |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium available | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/cosplay-complex-ova', | 
					
						
							|  |  |  |  |         'info_dict': { | 
					
						
							|  |  |  |  |             'id': 'cosplay-complex-ova', | 
					
						
							|  |  |  |  |             'title': 'Cosplay Complex OVA' | 
					
						
							|  |  |  |  |         }, | 
					
						
							|  |  |  |  |         'playlist_count': 3, | 
					
						
							|  |  |  |  |         'skip': 'Georestricted', | 
					
						
							|  |  |  |  |     }, { | 
					
						
							|  |  |  |  |         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14 | 
					
						
							|  |  |  |  |         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1', | 
					
						
							|  |  |  |  |         'only_matching': True, | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |     }] | 
					
						
							|  |  |  |  | 
 | 
					
						
							|  |  |  |  |     def _real_extract(self, url): | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         show_id = self._match_id(url) | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-10-18 06:57:57 +06:00
										 |  |  |  |         webpage = self._download_webpage(self._add_skip_wall(url), show_id) | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |         title = self._html_search_regex( | 
					
						
							|  |  |  |  |             r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>', | 
					
						
							|  |  |  |  |             webpage, 'title') | 
					
						
							|  |  |  |  |         episode_paths = re.findall( | 
					
						
							|  |  |  |  |             r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"', | 
					
						
							|  |  |  |  |             webpage) | 
					
						
							|  |  |  |  |         entries = [ | 
					
						
							|  |  |  |  |             self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll') | 
					
						
							|  |  |  |  |             for ep in episode_paths | 
					
						
							|  |  |  |  |         ] | 
					
						
							|  |  |  |  |         entries.reverse() | 
					
						
							|  |  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-19 22:47:05 -07:00
										 |  |  |  |         return { | 
					
						
							| 
									
										
										
										
											2014-10-26 17:28:09 +01:00
										 |  |  |  |             '_type': 'playlist', | 
					
						
							|  |  |  |  |             'id': show_id, | 
					
						
							|  |  |  |  |             'title': title, | 
					
						
							|  |  |  |  |             'entries': entries, | 
					
						
							|  |  |  |  |         } |