Fixed PEP8 issues except E501
This commit is contained in:
parent
ccb079ee67
commit
3b5ee5c51e
@ -9,16 +9,17 @@ import youtube_dl
|
||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
opts_flag = []
|
||||
for group in opt_parser.option_groups:
|
||||
for option in group.option_list:
|
||||
#for every long flag
|
||||
# for every long flag
|
||||
opts_flag.append(option.get_opt_string())
|
||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||
#just using the special char
|
||||
# just using the special char
|
||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||
f.write(filled_template)
|
||||
|
||||
|
@ -233,7 +233,9 @@ def rmtree(path):
|
||||
|
||||
#==============================================================================
|
||||
|
||||
|
||||
class BuildError(Exception):
|
||||
|
||||
def __init__(self, output, code=500):
|
||||
self.output = output
|
||||
self.code = code
|
||||
@ -247,6 +249,7 @@ class HTTPError(BuildError):
|
||||
|
||||
|
||||
class PythonBuilder(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pythonVersion = kwargs.pop('python', '2.7')
|
||||
try:
|
||||
@ -262,6 +265,7 @@ class PythonBuilder(object):
|
||||
|
||||
|
||||
class GITInfoBuilder(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self.user, self.repoName = kwargs['path'][:2]
|
||||
@ -281,6 +285,7 @@ class GITInfoBuilder(object):
|
||||
|
||||
|
||||
class GITBuilder(GITInfoBuilder):
|
||||
|
||||
def build(self):
|
||||
try:
|
||||
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
|
||||
@ -313,6 +318,7 @@ class YoutubeDLBuilder(object):
|
||||
|
||||
|
||||
class DownloadBuilder(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.handler = kwargs.pop('handler')
|
||||
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
|
||||
@ -341,6 +347,7 @@ class DownloadBuilder(object):
|
||||
|
||||
|
||||
class CleanupTempDir(object):
|
||||
|
||||
def build(self):
|
||||
try:
|
||||
rmtree(self.basePath)
|
||||
@ -351,6 +358,7 @@ class CleanupTempDir(object):
|
||||
|
||||
|
||||
class Null(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
@ -369,7 +377,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
|
||||
|
||||
|
||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
||||
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
|
||||
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
||||
|
||||
def do_GET(self):
|
||||
path = urlparse.urlparse(self.path)
|
||||
|
@ -11,18 +11,18 @@ except NameError:
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
del versions_info['signature']
|
||||
|
||||
print('Enter the PKCS1 private key, followed by a blank line:')
|
||||
privkey = b''
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except EOFError:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
try:
|
||||
line = input()
|
||||
except EOFError:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
||||
|
||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
||||
|
@ -5,7 +5,7 @@ from __future__ import with_statement
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import io # For Python 2 compatibilty
|
||||
import io # For Python 2 compatibilty
|
||||
import os
|
||||
import re
|
||||
|
||||
|
@ -39,8 +39,7 @@ now_iso = now.isoformat() + 'Z'
|
||||
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
versions = list(versions_info['versions'].keys())
|
||||
versions.sort()
|
||||
versions = sorted(versions_info['versions'].keys())
|
||||
|
||||
entries = []
|
||||
for v in versions:
|
||||
@ -73,4 +72,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
|
||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
||||
|
@ -9,6 +9,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
|
||||
|
||||
import youtube_dl
|
||||
|
||||
|
||||
def main():
|
||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
@ -21,7 +22,7 @@ def main():
|
||||
continue
|
||||
elif ie_desc is not None:
|
||||
ie_html += ': {}'.format(ie.IE_DESC)
|
||||
if ie.working() == False:
|
||||
if not ie.working():
|
||||
ie_html += ' (Currently broken)'
|
||||
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
@ -12,9 +13,9 @@ sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorr
|
||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
||||
|
||||
try:
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
|
@ -9,4 +9,4 @@ py2exe_options = {
|
||||
"dll_excludes": ['w9xpopen.exe']
|
||||
}
|
||||
|
||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
||||
setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)
|
||||
|
@ -1,17 +1,23 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
import sys
|
||||
import os
|
||||
import urllib2
|
||||
import json, hashlib
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
|
||||
def rsa_verify(message, signature, key):
|
||||
from struct import pack
|
||||
from hashlib import sha256
|
||||
from sys import version_info
|
||||
|
||||
def b(x):
|
||||
if version_info[0] == 2: return x
|
||||
else: return x.encode('latin1')
|
||||
assert(type(message) == type(b('')))
|
||||
if version_info[0] == 2:
|
||||
return x
|
||||
else:
|
||||
return x.encode('latin1')
|
||||
assert(isinstance(message, type(b(''))))
|
||||
block_size = 0
|
||||
n = key[0]
|
||||
while n:
|
||||
@ -23,13 +29,17 @@ def rsa_verify(message, signature, key):
|
||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||
signature >>= 8
|
||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||
if signature[0:2] != b('\x00\x01'): return False
|
||||
if signature[0:2] != b('\x00\x01'):
|
||||
return False
|
||||
signature = signature[2:]
|
||||
if not b('\x00') in signature: return False
|
||||
signature = signature[signature.index(b('\x00'))+1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
||||
if not b('\x00') in signature:
|
||||
return False
|
||||
signature = signature[signature.index(b('\x00')) + 1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||
return False
|
||||
signature = signature[19:]
|
||||
if signature != sha256(message).digest(): return False
|
||||
if signature != sha256(message).digest():
|
||||
return False
|
||||
return True
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
@ -92,7 +102,7 @@ echo Updating youtube-dl...
|
||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
||||
move /Y "%s.new" "%s"
|
||||
del "%s"
|
||||
\n""" %(exe, exe, bat))
|
||||
\n""" % (exe, exe, bat))
|
||||
b.close()
|
||||
|
||||
os.startfile(bat)
|
||||
|
@ -47,6 +47,7 @@ def report_warning(message):
|
||||
|
||||
|
||||
class FakeYDL(YoutubeDL):
|
||||
|
||||
def __init__(self, override=None):
|
||||
# Different instances of the downloader can't share the same dictionary
|
||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||
@ -66,11 +67,14 @@ class FakeYDL(YoutubeDL):
|
||||
def expect_warning(self, regex):
|
||||
# Silence an expected warning matching a regex
|
||||
old_report_warning = self.report_warning
|
||||
|
||||
def report_warning(self, message):
|
||||
if re.match(regex, message): return
|
||||
if re.match(regex, message):
|
||||
return
|
||||
old_report_warning(message)
|
||||
self.report_warning = types.MethodType(report_warning, self)
|
||||
|
||||
|
||||
def get_testcases():
|
||||
for ie in youtube_dl.extractor.gen_extractors():
|
||||
t = getattr(ie, '_TEST', None)
|
||||
|
@ -14,6 +14,7 @@ from youtube_dl.extractor import YoutubeIE
|
||||
|
||||
|
||||
class YDL(FakeYDL):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YDL, self).__init__(*args, **kwargs)
|
||||
self.downloaded_info_dicts = []
|
||||
@ -27,13 +28,14 @@ class YDL(FakeYDL):
|
||||
|
||||
|
||||
class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_prefer_free_formats(self):
|
||||
# Same resolution => download webm
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 460},
|
||||
{'ext': 'mp4', 'height': 460},
|
||||
{'ext': 'mp4', 'height': 460},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
yie = YoutubeIE(ydl)
|
||||
@ -236,6 +238,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
'ext': 'mp4',
|
||||
'width': None,
|
||||
}
|
||||
|
||||
def fname(templ):
|
||||
ydl = YoutubeDL({'outtmpl': templ})
|
||||
return ydl.prepare_filename(info)
|
||||
|
@ -32,6 +32,7 @@ def _download_restricted(url, filename, age):
|
||||
|
||||
|
||||
class TestAgeRestriction(unittest.TestCase):
|
||||
|
||||
def _assert_restricted(self, url, filename, age, old_age=None):
|
||||
self.assertTrue(_download_restricted(url, filename, old_age))
|
||||
self.assertFalse(_download_restricted(url, filename, age))
|
||||
|
@ -21,6 +21,7 @@ from youtube_dl.extractor import (
|
||||
|
||||
|
||||
class TestAllURLsMatching(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.ies = gen_extractors()
|
||||
|
||||
@ -33,19 +34,19 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
def test_youtube_playlist_matching(self):
|
||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
||||
assertPlaylist('PL63F0C78739B09958')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||
# Top tracks
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||
|
||||
def test_youtube_matching(self):
|
||||
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
|
||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||
|
@ -34,18 +34,23 @@ from youtube_dl.extractor import get_info_extractor
|
||||
|
||||
RETRIES = 3
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.to_stderr = self.to_screen
|
||||
self.processed_info_dicts = []
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
|
||||
def report_warning(self, message):
|
||||
# Don't accept warnings during tests
|
||||
raise ExtractorError(message)
|
||||
|
||||
def process_info(self, info_dict):
|
||||
self.processed_info_dicts.append(info_dict)
|
||||
return super(YoutubeDL, self).process_info(info_dict)
|
||||
|
||||
|
||||
def _file_md5(fn):
|
||||
with open(fn, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
@ -55,15 +60,19 @@ defs = get_testcases()
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
maxDiff = None
|
||||
|
||||
def setUp(self):
|
||||
self.defs = defs
|
||||
|
||||
### Dynamically generate tests
|
||||
# Dynamically generate tests
|
||||
|
||||
|
||||
def generator(test_case):
|
||||
|
||||
def test_template(self):
|
||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
||||
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
||||
|
||||
def print_skipping(reason):
|
||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||
if not ie.working():
|
||||
@ -73,7 +82,7 @@ def generator(test_case):
|
||||
info_dict = test_case.get('info_dict', {})
|
||||
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
||||
print_skipping('The output file cannot be know, the "file" '
|
||||
'key is missing or the info_dict is incomplete')
|
||||
'key is missing or the info_dict is incomplete')
|
||||
return
|
||||
if 'skip' in test_case:
|
||||
print_skipping(test_case['skip'])
|
||||
@ -88,6 +97,7 @@ def generator(test_case):
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_default_info_extractors()
|
||||
finished_hook_called = set()
|
||||
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
@ -97,6 +107,7 @@ def generator(test_case):
|
||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
|
||||
test_cases = test_case.get('playlist', [test_case])
|
||||
|
||||
def try_rm_tcs_files():
|
||||
for tc in test_cases:
|
||||
tc_filename = get_tc_filename(tc)
|
||||
@ -142,12 +153,12 @@ def generator(test_case):
|
||||
else:
|
||||
got = info_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# If checkable fields are missing from the test case, print the info_dict
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in info_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||
for key, value in info_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||
|
||||
@ -162,13 +173,13 @@ def generator(test_case):
|
||||
|
||||
return test_template
|
||||
|
||||
### And add them to TestDownload
|
||||
# And add them to TestDownload
|
||||
for n, test_case in enumerate(defs):
|
||||
test_method = generator(test_case)
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
i = 1
|
||||
while hasattr(TestDownload, tname):
|
||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
||||
i += 1
|
||||
test_method.__name__ = tname
|
||||
setattr(TestDownload, test_method.__name__, test_method)
|
||||
|
@ -11,12 +11,14 @@ try:
|
||||
except AttributeError:
|
||||
_DEV_NULL = open(os.devnull, 'wb')
|
||||
|
||||
|
||||
class TestExecution(unittest.TestCase):
|
||||
|
||||
def test_import(self):
|
||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||
|
||||
def test_module_exec(self):
|
||||
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
|
||||
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
|
||||
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
|
||||
def test_main_exec(self):
|
||||
|
@ -40,6 +40,7 @@ from youtube_dl.extractor import (
|
||||
|
||||
|
||||
class TestPlaylists(unittest.TestCase):
|
||||
|
||||
def assertIsPlaylist(self, info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
self.assertEqual(info['_type'], 'playlist')
|
||||
|
@ -21,6 +21,7 @@ from youtube_dl.extractor import (
|
||||
class BaseTestSubtitles(unittest.TestCase):
|
||||
url = None
|
||||
IE = None
|
||||
|
||||
def setUp(self):
|
||||
self.DL = FakeYDL()
|
||||
self.ie = self.IE(self.DL)
|
||||
|
@ -13,6 +13,7 @@ IGNORED_FILES = [
|
||||
|
||||
|
||||
class TestUnicodeLiterals(unittest.TestCase):
|
||||
|
||||
def test_all_files(self):
|
||||
print('Skipping this test (not yet fully implemented)')
|
||||
return
|
||||
|
@ -41,6 +41,7 @@ else:
|
||||
|
||||
|
||||
class TestUtil(unittest.TestCase):
|
||||
|
||||
def test_timeconvert(self):
|
||||
self.assertTrue(timeconvert('') is None)
|
||||
self.assertTrue(timeconvert('bougrg') is None)
|
||||
@ -109,14 +110,14 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||
self.assertEqual(orderedSet([]), [])
|
||||
self.assertEqual(orderedSet([1]), [1])
|
||||
#keep the list ordered
|
||||
# keep the list ordered
|
||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||
|
||||
def test_unescape_html(self):
|
||||
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
|
||||
|
||||
def test_daterange(self):
|
||||
_20century = DateRange("19000101","20000101")
|
||||
_20century = DateRange("19000101", "20000101")
|
||||
self.assertFalse("17890714" in _20century)
|
||||
_ac = DateRange("00010101")
|
||||
self.assertTrue("19690721" in _ac)
|
||||
|
@ -19,6 +19,7 @@ import youtube_dl.extractor
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
self.to_stderr = self.to_screen
|
||||
@ -31,19 +32,19 @@ params = get_params({
|
||||
})
|
||||
|
||||
|
||||
|
||||
TEST_ID = 'gr51aVj-mLg'
|
||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||
|
||||
|
||||
class TestAnnotations(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
||||
|
||||
def test_info_json(self):
|
||||
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
|
||||
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
||||
ie = youtube_dl.extractor.YoutubeIE()
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_info_extractor(ie)
|
||||
@ -59,19 +60,18 @@ class TestAnnotations(unittest.TestCase):
|
||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
||||
annotations = annotationsTag.findall('annotation')
|
||||
|
||||
#Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||
# Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||
for a in annotations:
|
||||
self.assertEqual(a.tag, 'annotation')
|
||||
if a.get('type') == 'text':
|
||||
textTag = a.find('TEXT')
|
||||
text = textTag.text
|
||||
self.assertTrue(text in expected) #assertIn only added in python 2.7
|
||||
#remove the first occurance, there could be more than one annotation with the same text
|
||||
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
||||
# remove the first occurance, there could be more than one annotation with the same text
|
||||
expected.remove(text)
|
||||
#We should have seen (and removed) all the expected annotation texts.
|
||||
# We should have seen (and removed) all the expected annotation texts.
|
||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
try_rm(ANNOTATIONS_FILE)
|
||||
|
||||
|
@ -18,6 +18,7 @@ import youtube_dl.extractor
|
||||
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
self.to_stderr = self.to_screen
|
||||
@ -41,6 +42,7 @@ For more information, contact phihag@phihag.de .'''
|
||||
|
||||
|
||||
class TestInfoJSON(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
@ -20,6 +20,7 @@ from youtube_dl.extractor import (
|
||||
|
||||
|
||||
class TestYoutubeLists(unittest.TestCase):
|
||||
|
||||
def assertIsPlaylist(self, info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
self.assertEqual(info['_type'], 'playlist')
|
||||
@ -31,7 +32,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'ytdl test PL')
|
||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||
self.assertEqual(ytie_results, ['bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||
|
||||
def test_youtube_playlist_noplaylist(self):
|
||||
dl = FakeYDL()
|
||||
@ -55,7 +56,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
self.assertTrue(len(result['entries']) >= 799)
|
||||
|
||||
def test_youtube_playlist_with_deleted(self):
|
||||
#651
|
||||
# 651
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
@ -83,10 +84,10 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
def test_youtube_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeChannelIE(dl)
|
||||
#test paginated channel
|
||||
# test paginated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
||||
self.assertTrue(len(result['entries']) > 90)
|
||||
#test autogenerated channel
|
||||
# test autogenerated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||
self.assertTrue(len(result['entries']) >= 18)
|
||||
|
||||
|
@ -37,6 +37,7 @@ _TESTS = [
|
||||
|
||||
|
||||
class TestSignature(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
||||
|
38
youtube-dl
38
youtube-dl
@ -1,21 +1,27 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
import json, hashlib
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
|
||||
def rsa_verify(message, signature, key):
|
||||
from struct import pack
|
||||
from hashlib import sha256
|
||||
from sys import version_info
|
||||
|
||||
def b(x):
|
||||
if version_info[0] == 2: return x
|
||||
else: return x.encode('latin1')
|
||||
assert(type(message) == type(b('')))
|
||||
if version_info[0] == 2:
|
||||
return x
|
||||
else:
|
||||
return x.encode('latin1')
|
||||
assert(isinstance(message, type(b(''))))
|
||||
block_size = 0
|
||||
n = key[0]
|
||||
while n:
|
||||
@ -27,13 +33,17 @@ def rsa_verify(message, signature, key):
|
||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||
signature >>= 8
|
||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||
if signature[0:2] != b('\x00\x01'): return False
|
||||
if signature[0:2] != b('\x00\x01'):
|
||||
return False
|
||||
signature = signature[2:]
|
||||
if not b('\x00') in signature: return False
|
||||
signature = signature[signature.index(b('\x00'))+1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
||||
if not b('\x00') in signature:
|
||||
return False
|
||||
signature = signature[signature.index(b('\x00')) + 1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||
return False
|
||||
signature = signature[19:]
|
||||
if signature != sha256(message).digest(): return False
|
||||
if signature != sha256(message).digest():
|
||||
return False
|
||||
return True
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
@ -41,9 +51,9 @@ sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorr
|
||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
|
||||
|
||||
try:
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
|
@ -5,6 +5,7 @@ from .downloader import get_suitable_downloader
|
||||
|
||||
# This class reproduces the old behaviour of FileDownloader
|
||||
class FileDownloader(RealFileDownloader):
|
||||
|
||||
def _do_download(self, filename, info_dict):
|
||||
real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
|
||||
for ph in self._progress_hooks:
|
||||
|
@ -61,6 +61,7 @@ from .version import __version__
|
||||
|
||||
|
||||
class YoutubeDL(object):
|
||||
|
||||
"""YoutubeDL class.
|
||||
|
||||
YoutubeDL objects are the ones responsible of downloading the
|
||||
@ -268,12 +269,12 @@ class YoutubeDL(object):
|
||||
return message
|
||||
|
||||
assert hasattr(self, '_output_process')
|
||||
assert type(message) == type('')
|
||||
assert isinstance(message, type(''))
|
||||
line_count = message.count('\n') + 1
|
||||
self._output_process.stdin.write((message + '\n').encode('utf-8'))
|
||||
self._output_process.stdin.flush()
|
||||
res = ''.join(self._output_channel.readline().decode('utf-8')
|
||||
for _ in range(line_count))
|
||||
for _ in range(line_count))
|
||||
return res[:-len('\n')]
|
||||
|
||||
def to_screen(self, message, skip_eol=False):
|
||||
@ -293,7 +294,7 @@ class YoutubeDL(object):
|
||||
|
||||
def to_stderr(self, message):
|
||||
"""Print message to stderr."""
|
||||
assert type(message) == type('')
|
||||
assert isinstance(message, type(''))
|
||||
if self.params.get('logger'):
|
||||
self.params['logger'].error(message)
|
||||
else:
|
||||
@ -491,7 +492,7 @@ class YoutubeDL(object):
|
||||
|
||||
try:
|
||||
ie_result = ie.extract(url)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
break
|
||||
if isinstance(ie_result, list):
|
||||
# Backwards compatibility: old IE result format
|
||||
@ -500,17 +501,17 @@ class YoutubeDL(object):
|
||||
'entries': ie_result,
|
||||
}
|
||||
self.add_extra_info(ie_result,
|
||||
{
|
||||
'extractor': ie.IE_NAME,
|
||||
'webpage_url': url,
|
||||
'webpage_url_basename': url_basename(url),
|
||||
'extractor_key': ie.ie_key(),
|
||||
})
|
||||
{
|
||||
'extractor': ie.IE_NAME,
|
||||
'webpage_url': url,
|
||||
'webpage_url_basename': url_basename(url),
|
||||
'extractor_key': ie.ie_key(),
|
||||
})
|
||||
if process:
|
||||
return self.process_ie_result(ie_result, download, extra_info)
|
||||
else:
|
||||
return ie_result
|
||||
except ExtractorError as de: # An error we somewhat expected
|
||||
except ExtractorError as de: # An error we somewhat expected
|
||||
self.report_error(compat_str(de), de.format_traceback())
|
||||
break
|
||||
except MaxDownloadsReached:
|
||||
@ -533,7 +534,7 @@ class YoutubeDL(object):
|
||||
Returns the resolved ie_result.
|
||||
"""
|
||||
|
||||
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
|
||||
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
|
||||
if result_type == 'video':
|
||||
self.add_extra_info(ie_result, extra_info)
|
||||
return self.process_video_result(ie_result, download=download)
|
||||
@ -624,12 +625,12 @@ class YoutubeDL(object):
|
||||
elif result_type == 'compat_list':
|
||||
def _fixup(r):
|
||||
self.add_extra_info(r,
|
||||
{
|
||||
'extractor': ie_result['extractor'],
|
||||
'webpage_url': ie_result['webpage_url'],
|
||||
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||
'extractor_key': ie_result['extractor_key'],
|
||||
})
|
||||
{
|
||||
'extractor': ie_result['extractor'],
|
||||
'webpage_url': ie_result['webpage_url'],
|
||||
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||
'extractor_key': ie_result['extractor_key'],
|
||||
})
|
||||
return r
|
||||
ie_result['entries'] = [
|
||||
self.process_ie_result(_fixup(r), download, extra_info)
|
||||
@ -736,7 +737,7 @@ class YoutubeDL(object):
|
||||
# Two formats have been requested like '137+139'
|
||||
format_1, format_2 = rf.split('+')
|
||||
formats_info = (self.select_format(format_1, formats),
|
||||
self.select_format(format_2, formats))
|
||||
self.select_format(format_2, formats))
|
||||
if all(formats_info):
|
||||
selected_format = {
|
||||
'requested_formats': formats_info,
|
||||
@ -912,10 +913,10 @@ class YoutubeDL(object):
|
||||
with open(thumb_filename, 'wb') as thumbf:
|
||||
shutil.copyfileobj(uf, thumbf)
|
||||
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
|
||||
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
||||
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self.report_warning('Unable to download thumbnail "%s": %s' %
|
||||
(info_dict['thumbnail'], compat_str(err)))
|
||||
(info_dict['thumbnail'], compat_str(err)))
|
||||
|
||||
if not self.params.get('skip_download', False):
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
|
||||
@ -934,8 +935,8 @@ class YoutubeDL(object):
|
||||
if not merger._get_executable():
|
||||
postprocessors = []
|
||||
self.report_warning('You have requested multiple '
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
' The formats won\'t be merged')
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
' The formats won\'t be merged')
|
||||
else:
|
||||
postprocessors = [merger]
|
||||
for f in info_dict['requested_formats']:
|
||||
@ -978,7 +979,7 @@ class YoutubeDL(object):
|
||||
|
||||
for url in url_list:
|
||||
try:
|
||||
#It also downloads the videos
|
||||
# It also downloads the videos
|
||||
self.extract_info(url)
|
||||
except UnavailableVideoError:
|
||||
self.report_error('unable to download video')
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__authors__ = (
|
||||
__authors__ = (
|
||||
'Ricardo Garcia Gonzalez',
|
||||
'Danny Colligan',
|
||||
'Benjamin Johnson',
|
||||
@ -154,7 +154,8 @@ def parseOpts(overrideArguments=None):
|
||||
if len(opts) > 1:
|
||||
opts.insert(1, ', ')
|
||||
|
||||
if option.takes_value(): opts.append(' %s' % option.metavar)
|
||||
if option.takes_value():
|
||||
opts.append(' %s' % option.metavar)
|
||||
|
||||
return "".join(opts)
|
||||
|
||||
@ -166,7 +167,7 @@ def parseOpts(overrideArguments=None):
|
||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||
try:
|
||||
i = opts.index(private_opt)
|
||||
opts[i+1] = '<PRIVATE>'
|
||||
opts[i + 1] = '<PRIVATE>'
|
||||
except ValueError:
|
||||
pass
|
||||
return opts
|
||||
@ -176,56 +177,57 @@ def parseOpts(overrideArguments=None):
|
||||
|
||||
# No need to wrap help messages if we're on a wide console
|
||||
columns = get_term_width()
|
||||
if columns: max_width = columns
|
||||
if columns:
|
||||
max_width = columns
|
||||
|
||||
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
||||
fmt.format_option_strings = _format_option_string
|
||||
|
||||
kw = {
|
||||
'version' : __version__,
|
||||
'formatter' : fmt,
|
||||
'usage' : '%prog [options] url [url...]',
|
||||
'conflict_handler' : 'resolve',
|
||||
'version': __version__,
|
||||
'formatter': fmt,
|
||||
'usage': '%prog [options] url [url...]',
|
||||
'conflict_handler': 'resolve',
|
||||
}
|
||||
|
||||
parser = optparse.OptionParser(**kw)
|
||||
|
||||
# option groups
|
||||
general = optparse.OptionGroup(parser, 'General Options')
|
||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||
general = optparse.OptionGroup(parser, 'General Options')
|
||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||
|
||||
general.add_option('-h', '--help',
|
||||
action='help', help='print this help text and exit')
|
||||
action='help', help='print this help text and exit')
|
||||
general.add_option('-v', '--version',
|
||||
action='version', help='print program version and exit')
|
||||
action='version', help='print program version and exit')
|
||||
general.add_option('-U', '--update',
|
||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||
general.add_option('-i', '--ignore-errors',
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||
general.add_option('--abort-on-error',
|
||||
action='store_false', dest='ignoreerrors',
|
||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||
action='store_false', dest='ignoreerrors',
|
||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||
general.add_option('--dump-user-agent',
|
||||
action='store_true', dest='dump_user_agent',
|
||||
help='display the current browser identification', default=False)
|
||||
action='store_true', dest='dump_user_agent',
|
||||
help='display the current browser identification', default=False)
|
||||
general.add_option('--user-agent',
|
||||
dest='user_agent', help='specify a custom user agent', metavar='UA')
|
||||
dest='user_agent', help='specify a custom user agent', metavar='UA')
|
||||
general.add_option('--referer',
|
||||
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
||||
metavar='REF', default=None)
|
||||
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
||||
metavar='REF', default=None)
|
||||
general.add_option('--list-extractors',
|
||||
action='store_true', dest='list_extractors',
|
||||
help='List all supported extractors and the URLs they would handle', default=False)
|
||||
action='store_true', dest='list_extractors',
|
||||
help='List all supported extractors and the URLs they would handle', default=False)
|
||||
general.add_option('--extractor-descriptions',
|
||||
action='store_true', dest='list_extractor_descriptions',
|
||||
help='Output descriptions of all supported extractors', default=False)
|
||||
action='store_true', dest='list_extractor_descriptions',
|
||||
help='Output descriptions of all supported extractors', default=False)
|
||||
general.add_option(
|
||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||
@ -243,14 +245,13 @@ def parseOpts(overrideArguments=None):
|
||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||
general.add_option('--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||
general.add_option(
|
||||
'--ignore-config',
|
||||
action='store_true',
|
||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||
|
||||
|
||||
selection.add_option(
|
||||
'--playlist-start',
|
||||
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
||||
@ -259,8 +260,8 @@ def parseOpts(overrideArguments=None):
|
||||
'--playlist-end',
|
||||
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
||||
help='playlist video to end at (default is last)')
|
||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX', help='download only matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX', help='skip download for matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
||||
dest='max_downloads', type=int, default=None,
|
||||
help='Abort after downloading NUMBER files')
|
||||
@ -298,196 +299,192 @@ def parseOpts(overrideArguments=None):
|
||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
||||
|
||||
authentication.add_option('-u', '--username',
|
||||
dest='username', metavar='USERNAME', help='account username')
|
||||
dest='username', metavar='USERNAME', help='account username')
|
||||
authentication.add_option('-p', '--password',
|
||||
dest='password', metavar='PASSWORD', help='account password')
|
||||
dest='password', metavar='PASSWORD', help='account password')
|
||||
authentication.add_option('-n', '--netrc',
|
||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||
authentication.add_option('--video-password',
|
||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
||||
|
||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
||||
|
||||
video_format.add_option('-f', '--format',
|
||||
action='store', dest='format', metavar='FORMAT', default=None,
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||
action='store', dest='format', metavar='FORMAT', default=None,
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||
video_format.add_option('--all-formats',
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
video_format.add_option('--prefer-free-formats',
|
||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
||||
video_format.add_option('--max-quality',
|
||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||
video_format.add_option('-F', '--list-formats',
|
||||
action='store_true', dest='listformats', help='list all available formats')
|
||||
action='store_true', dest='listformats', help='list all available formats')
|
||||
|
||||
subtitles.add_option('--write-sub', '--write-srt',
|
||||
action='store_true', dest='writesubtitles',
|
||||
help='write subtitle file', default=False)
|
||||
action='store_true', dest='writesubtitles',
|
||||
help='write subtitle file', default=False)
|
||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
||||
action='store_true', dest='writeautomaticsub',
|
||||
help='write automatic subtitle file (youtube only)', default=False)
|
||||
action='store_true', dest='writeautomaticsub',
|
||||
help='write automatic subtitle file (youtube only)', default=False)
|
||||
subtitles.add_option('--all-subs',
|
||||
action='store_true', dest='allsubtitles',
|
||||
help='downloads all the available subtitles of the video', default=False)
|
||||
action='store_true', dest='allsubtitles',
|
||||
help='downloads all the available subtitles of the video', default=False)
|
||||
subtitles.add_option('--list-subs',
|
||||
action='store_true', dest='listsubtitles',
|
||||
help='lists all available subtitles for the video', default=False)
|
||||
action='store_true', dest='listsubtitles',
|
||||
help='lists all available subtitles for the video', default=False)
|
||||
subtitles.add_option('--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_comma_separated_values_options_callback,
|
||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_comma_separated_values_options_callback,
|
||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||
|
||||
downloader.add_option('-r', '--rate-limit',
|
||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||
downloader.add_option('-R', '--retries',
|
||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||
downloader.add_option('--buffer-size',
|
||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
||||
downloader.add_option('--no-resize-buffer',
|
||||
action='store_true', dest='noresizebuffer',
|
||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||
action='store_true', dest='noresizebuffer',
|
||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
||||
|
||||
verbosity.add_option('-q', '--quiet',
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
verbosity.add_option('-s', '--simulate',
|
||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
||||
verbosity.add_option('--skip-download',
|
||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
||||
verbosity.add_option('-g', '--get-url',
|
||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
||||
verbosity.add_option('-e', '--get-title',
|
||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
||||
verbosity.add_option('--get-id',
|
||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
||||
verbosity.add_option('--get-thumbnail',
|
||||
action='store_true', dest='getthumbnail',
|
||||
help='simulate, quiet but print thumbnail URL', default=False)
|
||||
action='store_true', dest='getthumbnail',
|
||||
help='simulate, quiet but print thumbnail URL', default=False)
|
||||
verbosity.add_option('--get-description',
|
||||
action='store_true', dest='getdescription',
|
||||
help='simulate, quiet but print video description', default=False)
|
||||
action='store_true', dest='getdescription',
|
||||
help='simulate, quiet but print video description', default=False)
|
||||
verbosity.add_option('--get-duration',
|
||||
action='store_true', dest='getduration',
|
||||
help='simulate, quiet but print video length', default=False)
|
||||
action='store_true', dest='getduration',
|
||||
help='simulate, quiet but print video length', default=False)
|
||||
verbosity.add_option('--get-filename',
|
||||
action='store_true', dest='getfilename',
|
||||
help='simulate, quiet but print output filename', default=False)
|
||||
action='store_true', dest='getfilename',
|
||||
help='simulate, quiet but print output filename', default=False)
|
||||
verbosity.add_option('--get-format',
|
||||
action='store_true', dest='getformat',
|
||||
help='simulate, quiet but print output format', default=False)
|
||||
action='store_true', dest='getformat',
|
||||
help='simulate, quiet but print output format', default=False)
|
||||
verbosity.add_option('-j', '--dump-json',
|
||||
action='store_true', dest='dumpjson',
|
||||
help='simulate, quiet but print JSON information', default=False)
|
||||
action='store_true', dest='dumpjson',
|
||||
help='simulate, quiet but print JSON information', default=False)
|
||||
verbosity.add_option('--newline',
|
||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||
verbosity.add_option('--no-progress',
|
||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
||||
verbosity.add_option('--console-title',
|
||||
action='store_true', dest='consoletitle',
|
||||
help='display progress in console titlebar', default=False)
|
||||
action='store_true', dest='consoletitle',
|
||||
help='display progress in console titlebar', default=False)
|
||||
verbosity.add_option('-v', '--verbose',
|
||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||
verbosity.add_option('--dump-intermediate-pages',
|
||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||
help='print downloaded pages to debug problems (very verbose)')
|
||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||
help='print downloaded pages to debug problems (very verbose)')
|
||||
verbosity.add_option('--write-pages',
|
||||
action='store_true', dest='write_pages', default=False,
|
||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
||||
action='store_true', dest='write_pages', default=False,
|
||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
||||
verbosity.add_option('--youtube-print-sig-code',
|
||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
verbosity.add_option('--print-traffic',
|
||||
dest='debug_printtraffic', action='store_true', default=False,
|
||||
help='Display sent and read HTTP traffic')
|
||||
|
||||
dest='debug_printtraffic', action='store_true', default=False,
|
||||
help='Display sent and read HTTP traffic')
|
||||
|
||||
filesystem.add_option('-t', '--title',
|
||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
||||
filesystem.add_option('--id',
|
||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
||||
filesystem.add_option('-l', '--literal',
|
||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
||||
filesystem.add_option('-A', '--auto-number',
|
||||
action='store_true', dest='autonumber',
|
||||
help='number downloaded files starting from 00000', default=False)
|
||||
action='store_true', dest='autonumber',
|
||||
help='number downloaded files starting from 00000', default=False)
|
||||
filesystem.add_option('-o', '--output',
|
||||
dest='outtmpl', metavar='TEMPLATE',
|
||||
help=('output filename template. Use %(title)s to get the title, '
|
||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||
'%(autonumber)s to get an automatically incremented number, '
|
||||
'%(ext)s for the filename extension, '
|
||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||
dest='outtmpl', metavar='TEMPLATE',
|
||||
help=('output filename template. Use %(title)s to get the title, '
|
||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||
'%(autonumber)s to get an automatically incremented number, '
|
||||
'%(ext)s for the filename extension, '
|
||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||
filesystem.add_option('--autonumber-size',
|
||||
dest='autonumber_size', metavar='NUMBER',
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||
dest='autonumber_size', metavar='NUMBER',
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||
filesystem.add_option('--restrict-filenames',
|
||||
action='store_true', dest='restrictfilenames',
|
||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||
action='store_true', dest='restrictfilenames',
|
||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||
filesystem.add_option('-a', '--batch-file',
|
||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
||||
filesystem.add_option('--load-info',
|
||||
dest='load_info_filename', metavar='FILE',
|
||||
help='json file containing the video information (created with the "--write-json" option)')
|
||||
dest='load_info_filename', metavar='FILE',
|
||||
help='json file containing the video information (created with the "--write-json" option)')
|
||||
filesystem.add_option('-w', '--no-overwrites',
|
||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||
filesystem.add_option('-c', '--continue',
|
||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
||||
filesystem.add_option('--no-continue',
|
||||
action='store_false', dest='continue_dl',
|
||||
help='do not resume partially downloaded files (restart from beginning)')
|
||||
action='store_false', dest='continue_dl',
|
||||
help='do not resume partially downloaded files (restart from beginning)')
|
||||
filesystem.add_option('--cookies',
|
||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
||||
filesystem.add_option('--no-part',
|
||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
||||
filesystem.add_option('--no-mtime',
|
||||
action='store_false', dest='updatetime',
|
||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
||||
action='store_false', dest='updatetime',
|
||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
||||
filesystem.add_option('--write-description',
|
||||
action='store_true', dest='writedescription',
|
||||
help='write video description to a .description file', default=False)
|
||||
action='store_true', dest='writedescription',
|
||||
help='write video description to a .description file', default=False)
|
||||
filesystem.add_option('--write-info-json',
|
||||
action='store_true', dest='writeinfojson',
|
||||
help='write video metadata to a .info.json file', default=False)
|
||||
action='store_true', dest='writeinfojson',
|
||||
help='write video metadata to a .info.json file', default=False)
|
||||
filesystem.add_option('--write-annotations',
|
||||
action='store_true', dest='writeannotations',
|
||||
help='write video annotations to a .annotation file', default=False)
|
||||
action='store_true', dest='writeannotations',
|
||||
help='write video annotations to a .annotation file', default=False)
|
||||
filesystem.add_option('--write-thumbnail',
|
||||
action='store_true', dest='writethumbnail',
|
||||
help='write thumbnail image to disk', default=False)
|
||||
|
||||
action='store_true', dest='writethumbnail',
|
||||
help='write thumbnail image to disk', default=False)
|
||||
|
||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||
help='write metadata to the video file')
|
||||
help='write metadata to the video file')
|
||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||
|
||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||
|
||||
parser.add_option_group(general)
|
||||
parser.add_option_group(selection)
|
||||
@ -593,7 +590,6 @@ def _real_main(argv=None):
|
||||
compat_print(desc)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Conflicting, missing and erroneous options
|
||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||
parser.error(u'using .netrc conflicts with giving username/password')
|
||||
@ -657,21 +653,21 @@ def _real_main(argv=None):
|
||||
|
||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
if opts.allsubtitles and (opts.writeautomaticsub == False):
|
||||
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||
opts.writesubtitles = True
|
||||
|
||||
if sys.version_info < (3,):
|
||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||
if opts.outtmpl is not None:
|
||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
||||
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.useid and u'%(id)s.%(ext)s')
|
||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
||||
or u'%(title)s-%(id)s.%(ext)s')
|
||||
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
|
||||
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.useid and u'%(id)s.%(ext)s')
|
||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
||||
or u'%(title)s-%(id)s.%(ext)s')
|
||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||
parser.error(u'Cannot download a video and extract audio into the same'
|
||||
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||
|
@ -7,6 +7,7 @@ from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, counter):
|
||||
"""
|
||||
Decrypt with aes in counter mode
|
||||
@ -20,11 +21,11 @@ def aes_ctr_decrypt(data, key, counter):
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
decrypted_data=[]
|
||||
decrypted_data = []
|
||||
for i in range(block_count):
|
||||
counter_block = counter.next_value()
|
||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
decrypted_data += xor(block, cipher_counter_block)
|
||||
@ -32,6 +33,7 @@ def aes_ctr_decrypt(data, key, counter):
|
||||
|
||||
return decrypted_data
|
||||
|
||||
|
||||
def aes_cbc_decrypt(data, key, iv):
|
||||
"""
|
||||
Decrypt with aes in CBC mode
|
||||
@ -44,11 +46,11 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
|
||||
decrypted_data=[]
|
||||
decrypted_data = []
|
||||
previous_cipher_block = iv
|
||||
for i in range(block_count):
|
||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||
|
||||
decrypted_block = aes_decrypt(block, expanded_key)
|
||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||
@ -57,6 +59,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
|
||||
return decrypted_data
|
||||
|
||||
|
||||
def key_expansion(data):
|
||||
"""
|
||||
Generate key schedule
|
||||
@ -64,7 +67,7 @@ def key_expansion(data):
|
||||
@param {int[]} data 16/24/32-Byte cipher key
|
||||
@returns {int[]} 176/208/240-Byte expanded key
|
||||
"""
|
||||
data = data[:] # copy
|
||||
data = data[:] # copy
|
||||
rcon_iteration = 1
|
||||
key_size_bytes = len(data)
|
||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||
@ -73,24 +76,25 @@ def key_expansion(data):
|
||||
temp = data[-4:]
|
||||
temp = key_schedule_core(temp, rcon_iteration)
|
||||
rcon_iteration += 1
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
if key_size_bytes == 32:
|
||||
temp = data[-4:]
|
||||
temp = sub_bytes(temp)
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_encrypt(data, expanded_key):
|
||||
"""
|
||||
Encrypt one block with aes
|
||||
@ -102,15 +106,16 @@ def aes_encrypt(data, expanded_key):
|
||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
for i in range(1, rounds+1):
|
||||
for i in range(1, rounds + 1):
|
||||
data = sub_bytes(data)
|
||||
data = shift_rows(data)
|
||||
if i != rounds:
|
||||
data = mix_columns(data)
|
||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_decrypt(data, expanded_key):
|
||||
"""
|
||||
Decrypt one block with aes
|
||||
@ -122,7 +127,7 @@ def aes_decrypt(data, expanded_key):
|
||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||
|
||||
for i in range(rounds, 0, -1):
|
||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
||||
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||
if i != rounds:
|
||||
data = mix_columns_inv(data)
|
||||
data = shift_rows_inv(data)
|
||||
@ -131,6 +136,7 @@ def aes_decrypt(data, expanded_key):
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def aes_decrypt_text(data, password, key_size_bytes):
|
||||
"""
|
||||
Decrypt text
|
||||
@ -149,14 +155,15 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
data = bytes_to_intlist(base64.b64decode(data))
|
||||
password = bytes_to_intlist(password.encode('utf-8'))
|
||||
|
||||
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
|
||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
class Counter:
|
||||
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
@ -200,14 +207,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
|
||||
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
||||
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
|
||||
(0x1,0x2,0x3,0x1),
|
||||
(0x1,0x1,0x2,0x3),
|
||||
(0x3,0x1,0x1,0x2))
|
||||
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
|
||||
(0x9,0xE,0xB,0xD),
|
||||
(0xD,0x9,0xE,0xB),
|
||||
(0xB,0xD,0x9,0xE))
|
||||
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
|
||||
(0x1, 0x2, 0x3, 0x1),
|
||||
(0x1, 0x1, 0x2, 0x3),
|
||||
(0x3, 0x1, 0x1, 0x2))
|
||||
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
|
||||
(0x9, 0xE, 0xB, 0xD),
|
||||
(0xD, 0x9, 0xE, 0xB),
|
||||
(0xB, 0xD, 0x9, 0xE))
|
||||
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
||||
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
||||
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
||||
@ -241,15 +248,19 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
||||
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||
|
||||
|
||||
def sub_bytes(data):
|
||||
return [SBOX[x] for x in data]
|
||||
|
||||
|
||||
def sub_bytes_inv(data):
|
||||
return [SBOX_INV[x] for x in data]
|
||||
|
||||
|
||||
def rotate(data):
|
||||
return data[1:] + [data[0]]
|
||||
|
||||
|
||||
def key_schedule_core(data, rcon_iteration):
|
||||
data = rotate(data)
|
||||
data = sub_bytes(data)
|
||||
@ -257,14 +268,17 @@ def key_schedule_core(data, rcon_iteration):
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def xor(data1, data2):
|
||||
return [x^y for x, y in zip(data1, data2)]
|
||||
return [x ^ y for x, y in zip(data1, data2)]
|
||||
|
||||
|
||||
def rijndael_mul(a, b):
|
||||
if(a==0 or b==0):
|
||||
if(a == 0 or b == 0):
|
||||
return 0
|
||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||
|
||||
|
||||
def mix_column(data, matrix):
|
||||
data_mixed = []
|
||||
for row in range(4):
|
||||
@ -275,33 +289,38 @@ def mix_column(data, matrix):
|
||||
data_mixed.append(mixed)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||
data_mixed = []
|
||||
for i in range(4):
|
||||
column = data[i*4 : (i+1)*4]
|
||||
column = data[i * 4: (i + 1) * 4]
|
||||
data_mixed += mix_column(column, matrix)
|
||||
return data_mixed
|
||||
|
||||
|
||||
def mix_columns_inv(data):
|
||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||
|
||||
|
||||
def shift_rows(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
||||
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||
return data_shifted
|
||||
|
||||
|
||||
def shift_rows_inv(data):
|
||||
data_shifted = []
|
||||
for column in range(4):
|
||||
for row in range(4):
|
||||
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
|
||||
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||
return data_shifted
|
||||
|
||||
|
||||
def inc(data):
|
||||
data = data[:] # copy
|
||||
for i in range(len(data)-1,-1,-1):
|
||||
data = data[:] # copy
|
||||
for i in range(len(data) - 1, -1, -1):
|
||||
if data[i] == 255:
|
||||
data[i] = 0
|
||||
else:
|
||||
|
@ -11,6 +11,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FileDownloader(object):
|
||||
|
||||
"""File Downloader class.
|
||||
|
||||
File downloader objects are the ones responsible of downloading the
|
||||
@ -77,7 +78,7 @@ class FileDownloader(object):
|
||||
if total is None:
|
||||
return None
|
||||
dif = now - start
|
||||
if current == 0 or dif < 0.001: # One millisecond
|
||||
if current == 0 or dif < 0.001: # One millisecond
|
||||
return None
|
||||
rate = float(current) / dif
|
||||
return int((float(total) - float(current)) / rate)
|
||||
@ -91,7 +92,7 @@ class FileDownloader(object):
|
||||
@staticmethod
|
||||
def calc_speed(start, now, bytes):
|
||||
dif = now - start
|
||||
if bytes == 0 or dif < 0.001: # One millisecond
|
||||
if bytes == 0 or dif < 0.001: # One millisecond
|
||||
return None
|
||||
return float(bytes) / dif
|
||||
|
||||
@ -104,7 +105,7 @@ class FileDownloader(object):
|
||||
@staticmethod
|
||||
def best_block_size(elapsed_time, bytes):
|
||||
new_min = max(bytes / 2.0, 1.0)
|
||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||
if elapsed_time < 0.001:
|
||||
return int(new_max)
|
||||
rate = bytes / elapsed_time
|
||||
|
@ -21,6 +21,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FlvReader(io.BytesIO):
|
||||
|
||||
"""
|
||||
Reader for Flv files
|
||||
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
||||
@ -55,7 +56,7 @@ class FlvReader(io.BytesIO):
|
||||
if size == 1:
|
||||
real_size = self.read_unsigned_long_long()
|
||||
header_end = 16
|
||||
return real_size, box_type, self.read(real_size-header_end)
|
||||
return real_size, box_type, self.read(real_size - header_end)
|
||||
|
||||
def read_asrt(self):
|
||||
# version
|
||||
@ -180,7 +181,7 @@ def build_fragments_list(boot_info):
|
||||
n_frags = segment_run_entry[1]
|
||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||
first_frag_number = fragment_run_entry_table[0]['first']
|
||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
||||
for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
|
||||
res.append((1, frag_number))
|
||||
return res
|
||||
|
||||
@ -210,11 +211,13 @@ def _add_ns(prop):
|
||||
|
||||
|
||||
class HttpQuietDownloader(HttpFD):
|
||||
|
||||
def to_screen(self, *args, **kargs):
|
||||
pass
|
||||
|
||||
|
||||
class F4mFD(FileDownloader):
|
||||
|
||||
"""
|
||||
A downloader for f4m manifests or AdobeHDS.
|
||||
"""
|
||||
@ -225,12 +228,12 @@ class F4mFD(FileDownloader):
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
self.report_destination(filename)
|
||||
http_dl = HttpQuietDownloader(self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'test': self.params.get('test', False),
|
||||
})
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'test': self.params.get('test', False),
|
||||
})
|
||||
|
||||
doc = etree.fromstring(manifest)
|
||||
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
||||
@ -261,7 +264,7 @@ class F4mFD(FileDownloader):
|
||||
def frag_progress_hook(status):
|
||||
frag_total_bytes = status.get('total_bytes', 0)
|
||||
estimated_size = (state['downloaded_bytes'] +
|
||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||
if status['status'] == 'finished':
|
||||
state['downloaded_bytes'] += frag_total_bytes
|
||||
state['frag_counter'] += 1
|
||||
@ -271,13 +274,13 @@ class F4mFD(FileDownloader):
|
||||
frag_downloaded_bytes = status['downloaded_bytes']
|
||||
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||
frag_total_bytes)
|
||||
frag_total_bytes)
|
||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||
progress += frag_progress / float(total_frags)
|
||||
|
||||
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
||||
self.report_progress(progress, format_bytes(estimated_size),
|
||||
status.get('speed'), eta)
|
||||
status.get('speed'), eta)
|
||||
http_dl.add_progress_hook(frag_progress_hook)
|
||||
|
||||
frags_filenames = []
|
||||
|
@ -8,13 +8,14 @@ from ..utils import (
|
||||
|
||||
|
||||
class HlsFD(FileDownloader):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
||||
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
||||
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
||||
|
||||
for program in ['avconv', 'ffmpeg']:
|
||||
try:
|
||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class HttpFD(FileDownloader):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class MplayerFD(FileDownloader):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
self.report_destination(filename)
|
||||
|
@ -12,6 +12,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class RtmpFD(FileDownloader):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
def run_rtmpdump(args):
|
||||
start = time.time()
|
||||
@ -36,13 +37,13 @@ class RtmpFD(FileDownloader):
|
||||
continue
|
||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
||||
if mobj:
|
||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||
percent = float(mobj.group(2))
|
||||
if not resume_percent:
|
||||
resume_percent = percent
|
||||
resume_downloaded_data_len = downloaded_data_len
|
||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
||||
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
|
||||
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
|
||||
data_len = None
|
||||
if percent > 0:
|
||||
data_len = int(downloaded_data_len * 100 / percent)
|
||||
@ -62,7 +63,7 @@ class RtmpFD(FileDownloader):
|
||||
# no percent for live streams
|
||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||
if mobj:
|
||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||
time_now = time.time()
|
||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||
@ -78,7 +79,7 @@ class RtmpFD(FileDownloader):
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen(u'')
|
||||
cursor_in_new_line = True
|
||||
self.to_screen(u'[rtmpdump] '+line)
|
||||
self.to_screen(u'[rtmpdump] ' + line)
|
||||
proc.wait()
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen(u'')
|
||||
@ -157,7 +158,7 @@ class RtmpFD(FileDownloader):
|
||||
while (retval == 2 or retval == 1) and not test:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
if prevsize == cursize and retval == 1:
|
||||
|
@ -306,4 +306,4 @@ def gen_extractors():
|
||||
|
||||
def get_info_extractor(ie_name):
|
||||
"""Returns the info extractor class with the given ie_name"""
|
||||
return globals()[ie_name+'IE']
|
||||
return globals()[ie_name + 'IE']
|
||||
|
@ -79,7 +79,7 @@ class AddAnimeIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': video_title,
|
||||
'description': video_description
|
||||
|
@ -28,7 +28,7 @@ class AnitubeIE(InfoExtractor):
|
||||
webpage, u'key')
|
||||
|
||||
config_xml = self._download_xml('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
|
||||
key)
|
||||
key)
|
||||
|
||||
video_title = config_xml.find('title').text
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#coding: utf-8
|
||||
# coding: utf-8
|
||||
|
||||
import re
|
||||
|
||||
@ -19,7 +19,7 @@ class AparatIE(InfoExtractor):
|
||||
u'info_dict': {
|
||||
u"title": u"تیم گلکسی 11 - زومیت",
|
||||
},
|
||||
#u'skip': u'Extremely unreliable',
|
||||
# u'skip': u'Extremely unreliable',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -66,11 +66,13 @@ class AppleTrailersIE(InfoExtractor):
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
|
||||
|
||||
def fix_html(s):
|
||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', s)
|
||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||
|
||||
def _clean_json(m):
|
||||
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||
@ -82,7 +84,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
for li in doc.findall('./div/ul/li'):
|
||||
on_click = li.find('.//a').attrib['onClick']
|
||||
trailer_info_json = self._search_regex(self._JSON_RE,
|
||||
on_click, u'trailer info')
|
||||
on_click, u'trailer info')
|
||||
trailer_info = json.loads(trailer_info_json)
|
||||
title = trailer_info['title']
|
||||
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
||||
|
@ -19,6 +19,7 @@ from ..utils import (
|
||||
# is different for each one. The videos usually expire in 7 days, so we can't
|
||||
# add tests.
|
||||
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VIDEOS_URL = r'(?:http://)?videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
||||
@ -86,6 +87,7 @@ class ArteTvIE(InfoExtractor):
|
||||
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
||||
|
||||
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
||||
|
||||
def _key(m):
|
||||
quality = m.group('quality')
|
||||
if quality == 'hd':
|
||||
@ -111,7 +113,7 @@ class ArteTvIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, name)
|
||||
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
|
||||
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
||||
video_id, 'Downloading information')
|
||||
video_id, 'Downloading information')
|
||||
event_doc = config_doc.find('event')
|
||||
url_node = event_doc.find('video').find('urlHd')
|
||||
if url_node is None:
|
||||
@ -164,6 +166,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
all_formats = player_info['VSR'].values()
|
||||
# Some formats use the m3u8 protocol
|
||||
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
||||
|
||||
def _match_lang(f):
|
||||
if f.get('versionCode') is None:
|
||||
return True
|
||||
@ -176,7 +179,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
||||
# Some formats may not be in the same language as the url
|
||||
formats = filter(_match_lang, all_formats)
|
||||
formats = list(formats) # in python3 filter returns an iterator
|
||||
formats = list(formats) # in python3 filter returns an iterator
|
||||
if not formats:
|
||||
# Some videos are only available in the 'Originalversion'
|
||||
# they aren't tagged as being in French or German
|
||||
@ -192,14 +195,15 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
def sort_key(f):
|
||||
return (
|
||||
# Sort first by quality
|
||||
int(f.get('height',-1)),
|
||||
int(f.get('bitrate',-1)),
|
||||
int(f.get('height', -1)),
|
||||
int(f.get('bitrate', -1)),
|
||||
# The original version with subtitles has lower relevance
|
||||
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
|
||||
# The version with sourds/mal subtitles has also lower relevance
|
||||
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
|
||||
)
|
||||
formats = sorted(formats, key=sort_key)
|
||||
|
||||
def _format(format_info):
|
||||
quality = ''
|
||||
height = format_info.get('height')
|
||||
|
@ -26,7 +26,7 @@ class AUEngineIE(InfoExtractor):
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||
webpage, 'title')
|
||||
webpage, 'title')
|
||||
title = title.strip()
|
||||
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
||||
links = map(compat_urllib_parse.unquote, links)
|
||||
@ -45,8 +45,8 @@ class AUEngineIE(InfoExtractor):
|
||||
title = title[:-len(ext)]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
|
||||
_TEST = {
|
||||
'url': 'http://bambuser.com/v/4050584',
|
||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||
# u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||
'info_dict': {
|
||||
'id': '4050584',
|
||||
'ext': 'flv',
|
||||
@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
|
||||
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
|
||||
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
|
||||
info_json = self._download_webpage(info_url, video_id)
|
||||
info = json.loads(info_json)['result']
|
||||
|
||||
@ -67,14 +67,14 @@ class BambuserChannelIE(InfoExtractor):
|
||||
last_id = ''
|
||||
for i in itertools.count(1):
|
||||
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
|
||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||
'&method=broadcast&format=json&vid_older_than={last}'
|
||||
).format(user=user, count=self._STEP, last=last_id)
|
||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||
'&method=broadcast&format=json&vid_older_than={last}'
|
||||
).format(user=user, count=self._STEP, last=last_id)
|
||||
req = compat_urllib_request.Request(req_url)
|
||||
# Without setting this header, we wouldn't get any result
|
||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||
info_json = self._download_webpage(req, user,
|
||||
'Downloading page %d' % i)
|
||||
'Downloading page %d' % i)
|
||||
results = json.loads(info_json)['result']
|
||||
if len(results) == 0:
|
||||
break
|
||||
|
@ -79,12 +79,12 @@ class BandcampIE(InfoExtractor):
|
||||
initial_url = mp3_info['url']
|
||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||
m_url = re.match(re_url, initial_url)
|
||||
#We build the url we will use to get the final track url
|
||||
# We build the url we will use to get the final track url
|
||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
||||
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||
# If we could correctly generate the .rand field the url would be
|
||||
#in the "download_url" key
|
||||
# in the "download_url" key
|
||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||
|
||||
return {
|
||||
|
@ -163,7 +163,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
group_id = mobj.group('id')
|
||||
|
||||
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
|
||||
'Downloading playlist XML')
|
||||
'Downloading playlist XML')
|
||||
|
||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||
if no_items is not None:
|
||||
@ -190,7 +190,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
duration = int(item.get('duration'))
|
||||
|
||||
media_selection = self._download_xml(
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
programme_id, 'Downloading media selection XML')
|
||||
|
||||
for media in self._extract_medias(media_selection):
|
||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class BlipTVIE(SubtitlesInfoExtractor):
|
||||
|
||||
"""Information extractor for blip.tv"""
|
||||
|
||||
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
|
||||
|
@ -30,15 +30,15 @@ class CanalplusIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, mobj.group('path'))
|
||||
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
|
||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
||||
doc = self._download_xml(info_url,video_id,
|
||||
u'Downloading video info')
|
||||
doc = self._download_xml(info_url, video_id,
|
||||
u'Downloading video info')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||
infos = video_info.find('INFOS')
|
||||
media = video_info.find('MEDIA')
|
||||
formats = [media.find('VIDEOS/%s' % format)
|
||||
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
||||
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
||||
video_url = [format.text for format in formats if format is not None][-1]
|
||||
|
||||
return {'id': video_id,
|
||||
|
@ -5,7 +5,9 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class Channel9IE(InfoExtractor):
|
||||
|
||||
'''
|
||||
Common extractor for channel9.msdn.com.
|
||||
|
||||
@ -31,7 +33,7 @@ class Channel9IE(InfoExtractor):
|
||||
'session_code': 'KOS002',
|
||||
'session_day': 'Day 1',
|
||||
'session_room': 'Arena 1A',
|
||||
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
|
||||
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -44,7 +46,7 @@ class Channel9IE(InfoExtractor):
|
||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||
'duration': 1540,
|
||||
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||
'authors': [ 'Mike Wilmot' ],
|
||||
'authors': ['Mike Wilmot'],
|
||||
},
|
||||
}
|
||||
]
|
||||
@ -83,7 +85,7 @@ class Channel9IE(InfoExtractor):
|
||||
'format_id': x.group('quality'),
|
||||
'format_note': x.group('note'),
|
||||
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||
'preference': self._known_formats.index(x.group('quality')),
|
||||
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
|
||||
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
||||
@ -196,23 +198,23 @@ class Channel9IE(InfoExtractor):
|
||||
'rating_count': rating_count,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
}
|
||||
}
|
||||
|
||||
result = []
|
||||
|
||||
if slides is not None:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title + '-Slides', 'url': slides })
|
||||
d.update({'title': title + '-Slides', 'url': slides})
|
||||
result.append(d)
|
||||
|
||||
if zip_ is not None:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title + '-Zip', 'url': zip_ })
|
||||
d.update({'title': title + '-Zip', 'url': zip_})
|
||||
result.append(d)
|
||||
|
||||
if len(formats) > 0:
|
||||
d = common.copy()
|
||||
d.update({ 'title': title, 'formats': formats })
|
||||
d.update({'title': title, 'formats': formats})
|
||||
result.append(d)
|
||||
|
||||
return result
|
||||
@ -265,7 +267,7 @@ class Channel9IE(InfoExtractor):
|
||||
page_type = page_type_m.group('pagetype')
|
||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
||||
return self._extract_list(content_path)
|
||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
||||
return self._extract_entry_item(webpage, content_path)
|
||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||
return self._extract_session(webpage, content_path)
|
||||
|
@ -22,24 +22,24 @@ class CinemassacreIE(InfoExtractor):
|
||||
u'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
u'file': u'521be8ef82b16.flv',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20131002',
|
||||
u'title': u'The Mummy’s Hand (1940)',
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
},
|
||||
}]
|
||||
{
|
||||
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
u'file': u'521be8ef82b16.flv',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20131002',
|
||||
u'title': u'The Mummy’s Hand (1940)',
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
webpage_url = u'http://' + mobj.group('url')
|
||||
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
||||
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
||||
if not mobj:
|
||||
@ -48,9 +48,9 @@ class CinemassacreIE(InfoExtractor):
|
||||
video_id = mobj.group(u'video_id')
|
||||
|
||||
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
|
||||
webpage, u'title')
|
||||
webpage, u'title')
|
||||
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||
webpage, u'description', flags=re.DOTALL, fatal=False)
|
||||
webpage, u'description', flags=re.DOTALL, fatal=False)
|
||||
if len(video_description) == 0:
|
||||
video_description = None
|
||||
|
||||
@ -65,7 +65,7 @@ class CinemassacreIE(InfoExtractor):
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + sd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
'format': 'sd',
|
||||
'format_id': 'sd',
|
||||
@ -73,7 +73,7 @@ class CinemassacreIE(InfoExtractor):
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + hd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
'format': 'hd',
|
||||
'format_id': 'hd',
|
||||
|
@ -36,6 +36,7 @@ class ClipsyndicateIE(InfoExtractor):
|
||||
transform_source=fix_xml_ampersands)
|
||||
|
||||
track_doc = pdoc.find('trackList/track')
|
||||
|
||||
def find_param(name):
|
||||
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
||||
if node is not None:
|
||||
|
@ -1,5 +1,6 @@
|
||||
from .mtv import MTVIE
|
||||
|
||||
|
||||
class CMTIE(MTVIE):
|
||||
IE_NAME = u'cmt.com'
|
||||
_VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
|
||||
|
@ -25,16 +25,16 @@ class CNNIE(InfoExtractor):
|
||||
'upload_date': '20130609',
|
||||
},
|
||||
},
|
||||
{
|
||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
"info_dict": {
|
||||
"title": "Student's epic speech stuns new freshmen",
|
||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
"upload_date": "20130821",
|
||||
}
|
||||
}]
|
||||
{
|
||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
"info_dict": {
|
||||
"title": "Student's epic speech stuns new freshmen",
|
||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
"upload_date": "20130821",
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@ -79,7 +79,7 @@ class CNNIE(InfoExtractor):
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
||||
thumbnails = sorted([((int(t.attrib['height']), int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
||||
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
||||
|
||||
metas_el = info.find('metas')
|
||||
|
@ -21,35 +21,35 @@ class CollegeHumorIE(InfoExtractor):
|
||||
'age_limit': 13,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
|
||||
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
|
||||
'info_dict': {
|
||||
'id': '3505939',
|
||||
'ext': 'mp4',
|
||||
'title': 'Font Conference',
|
||||
'description': 'This video wasn\'t long enough,',
|
||||
'age_limit': 10,
|
||||
'duration': 179,
|
||||
{
|
||||
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
|
||||
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
|
||||
'info_dict': {
|
||||
'id': '3505939',
|
||||
'ext': 'mp4',
|
||||
'title': 'Font Conference',
|
||||
'description': 'This video wasn\'t long enough,',
|
||||
'age_limit': 10,
|
||||
'duration': 179,
|
||||
},
|
||||
},
|
||||
},
|
||||
# embedded youtube video
|
||||
{
|
||||
'url': 'http://www.collegehumor.com/embed/6950457',
|
||||
'info_dict': {
|
||||
'id': 'W5gMp3ZjYg4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
||||
'uploader': 'Funnyplox TV',
|
||||
'uploader_id': 'funnyploxtv',
|
||||
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
|
||||
'upload_date': '20140128',
|
||||
# embedded youtube video
|
||||
{
|
||||
'url': 'http://www.collegehumor.com/embed/6950457',
|
||||
'info_dict': {
|
||||
'id': 'W5gMp3ZjYg4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
||||
'uploader': 'Funnyplox TV',
|
||||
'uploader_id': 'funnyploxtv',
|
||||
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
|
||||
'upload_date': '20140128',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -122,7 +122,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
self.report_extraction(epTitle)
|
||||
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
if dlNewest:
|
||||
url = htmlHandle.geturl()
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
@ -148,13 +148,13 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
uri = mMovieParams[0][1]
|
||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||
idoc = self._download_xml(indexUrl, epTitle,
|
||||
'Downloading show index',
|
||||
'unable to download episode index')
|
||||
'Downloading show index',
|
||||
'unable to download episode index')
|
||||
|
||||
results = []
|
||||
|
||||
itemEls = idoc.findall('.//item')
|
||||
for partNum,itemEl in enumerate(itemEls):
|
||||
for partNum, itemEl in enumerate(itemEls):
|
||||
mediaId = itemEl.findall('./guid')[0].text
|
||||
shortMediaId = mediaId.split(':')[-1]
|
||||
showId = mediaId.split(':')[-2].replace('.com', '')
|
||||
@ -162,9 +162,9 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||
|
||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
cdoc = self._download_xml(configUrl, epTitle,
|
||||
'Downloading configuration for %s' % shortMediaId)
|
||||
'Downloading configuration for %s' % shortMediaId)
|
||||
|
||||
turls = []
|
||||
for rendition in cdoc.findall('.//rendition'):
|
||||
@ -186,7 +186,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
'width': w,
|
||||
})
|
||||
|
||||
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1)
|
||||
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum + 1)
|
||||
results.append({
|
||||
'id': shortMediaId,
|
||||
'formats': formats,
|
||||
|
@ -25,6 +25,7 @@ _NO_DEFAULT = object()
|
||||
|
||||
|
||||
class InfoExtractor(object):
|
||||
|
||||
"""Information Extractor class.
|
||||
|
||||
Information extractors are the classes that, given a URL, extract
|
||||
@ -306,17 +307,18 @@ class InfoExtractor(object):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
#Methods for following #608
|
||||
# Methods for following #608
|
||||
@staticmethod
|
||||
def url_result(url, ie=None, video_id=None):
|
||||
"""Returns a url that points to a page that should be processed"""
|
||||
#TODO: ie should be the class used for getting the info
|
||||
# TODO: ie should be the class used for getting the info
|
||||
video_info = {'_type': 'url',
|
||||
'url': url,
|
||||
'ie_key': ie}
|
||||
if video_id is not None:
|
||||
video_info['id'] = video_id
|
||||
return video_info
|
||||
|
||||
@staticmethod
|
||||
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
||||
"""Returns a playlist"""
|
||||
@ -340,7 +342,8 @@ class InfoExtractor(object):
|
||||
else:
|
||||
for p in pattern:
|
||||
mobj = re.search(p, string, flags)
|
||||
if mobj: break
|
||||
if mobj:
|
||||
break
|
||||
|
||||
if os.name != 'nt' and sys.stderr.isatty():
|
||||
_name = u'\033[0;34m%s\033[0m' % name
|
||||
@ -356,7 +359,7 @@ class InfoExtractor(object):
|
||||
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
||||
else:
|
||||
self._downloader.report_warning(u'unable to extract %s; '
|
||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||
return None
|
||||
|
||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||
@ -429,7 +432,8 @@ class InfoExtractor(object):
|
||||
|
||||
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
|
||||
regexes = self._og_regexes('video')
|
||||
if secure: regexes = self._og_regexes('video:secure_url') + regexes
|
||||
if secure:
|
||||
regexes = self._og_regexes('video:secure_url') + regexes
|
||||
return self._html_search_regex(regexes, html, name, **kargs)
|
||||
|
||||
def _html_search_meta(self, name, html, display_name=None):
|
||||
@ -470,7 +474,7 @@ class InfoExtractor(object):
|
||||
|
||||
def _twitter_search_player(self, html):
|
||||
return self._html_search_meta('twitter:player', html,
|
||||
'twitter card player')
|
||||
'twitter card player')
|
||||
|
||||
def _sort_formats(self, formats):
|
||||
if not formats:
|
||||
@ -530,6 +534,7 @@ class InfoExtractor(object):
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
|
||||
"""
|
||||
Base class for paged search queries extractors.
|
||||
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class CondeNastIE(InfoExtractor):
|
||||
|
||||
"""
|
||||
Condé Nast is a media group, some of its sites use a custom HTML5 player
|
||||
that works the same in all of them.
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class CriterionIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.criterion\.com/films/(\d*)-.+'
|
||||
_TEST = {
|
||||
@ -13,7 +14,7 @@ class CriterionIE(InfoExtractor):
|
||||
u'md5': u'bc51beba55685509883a9a7830919ec3',
|
||||
u'info_dict': {
|
||||
u"title": u"Le Samouraï",
|
||||
u"description" : u'md5:a2b4b116326558149bef81f76dcbb93f',
|
||||
u"description": u'md5:a2b4b116326558149bef81f76dcbb93f',
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,16 +24,16 @@ class CriterionIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
final_url = self._search_regex(r'so.addVariable\("videoURL", "(.+?)"\)\;',
|
||||
webpage, 'video url')
|
||||
webpage, 'video url')
|
||||
title = self._html_search_regex(r'<meta content="(.+?)" property="og:title" />',
|
||||
webpage, 'video title')
|
||||
webpage, 'video title')
|
||||
description = self._html_search_regex(r'<meta name="description" content="(.+?)" />',
|
||||
webpage, 'video description')
|
||||
webpage, 'video description')
|
||||
thumbnail = self._search_regex(r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
|
||||
webpage, 'thumbnail url')
|
||||
webpage, 'thumbnail url')
|
||||
|
||||
return {'id': video_id,
|
||||
'url' : final_url,
|
||||
'url': final_url,
|
||||
'title': title,
|
||||
'ext': determine_ext(final_url),
|
||||
'description': description,
|
||||
|
@ -1,7 +1,9 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re, base64, zlib
|
||||
import re
|
||||
import base64
|
||||
import zlib
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
@ -19,6 +21,7 @@ from ..aes import (
|
||||
inc,
|
||||
)
|
||||
|
||||
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_TESTS = [{
|
||||
@ -70,8 +73,10 @@ class CrunchyrollIE(InfoExtractor):
|
||||
return shaHash + [0] * 12
|
||||
|
||||
key = obfuscate_key(id)
|
||||
|
||||
class Counter:
|
||||
__value = iv
|
||||
|
||||
def next_value(self):
|
||||
temp = self.__value
|
||||
self.__value = inc(self.__value)
|
||||
@ -80,7 +85,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
return zlib.decompress(decrypted_data)
|
||||
|
||||
def _convert_subtitles_to_srt(self, subtitles):
|
||||
i=1
|
||||
i = 1
|
||||
output = ''
|
||||
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
|
||||
start = start.replace('.', ',')
|
||||
@ -90,10 +95,10 @@ class CrunchyrollIE(InfoExtractor):
|
||||
if not text:
|
||||
continue
|
||||
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||
i+=1
|
||||
i += 1
|
||||
return output
|
||||
|
||||
def _real_extract(self,url):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
@ -130,18 +135,18 @@ class CrunchyrollIE(InfoExtractor):
|
||||
formats = []
|
||||
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||
video_format = fmt+'p'
|
||||
video_format = fmt + 'p'
|
||||
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||
# urlencode doesn't work!
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
|
||||
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for ' + video_format)
|
||||
video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
|
||||
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'play_path': video_play_path,
|
||||
'play_path': video_play_path,
|
||||
'ext': 'flv',
|
||||
'format': video_format,
|
||||
'format_id': video_format,
|
||||
@ -149,8 +154,8 @@ class CrunchyrollIE(InfoExtractor):
|
||||
|
||||
subtitles = {}
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
|
||||
video_id, note='Downloading subtitles for '+sub_name)
|
||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
@ -167,12 +172,12 @@ class CrunchyrollIE(InfoExtractor):
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'thumbnail': video_thumbnail,
|
||||
'uploader': video_uploader,
|
||||
'thumbnail': video_thumbnail,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': video_upload_date,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ class CSpanIE(InfoExtractor):
|
||||
url = unescapeHTML(data['video']['files'][0]['path']['#text'])
|
||||
|
||||
doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
||||
video_id)
|
||||
video_id)
|
||||
|
||||
def find_string(s):
|
||||
return find_xpath_attr(doc, './/string', 'name', s).text
|
||||
|
@ -16,7 +16,9 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
@staticmethod
|
||||
def _build_request(url):
|
||||
"""Build a request with the family filter disabled"""
|
||||
@ -25,7 +27,9 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
request.add_header('Cookie', 'ff=off')
|
||||
return request
|
||||
|
||||
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
|
||||
@ -112,7 +116,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
u'Downloading embed page')
|
||||
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
|
||||
'video info', flags=re.MULTILINE)
|
||||
'video info', flags=re.MULTILINE)
|
||||
info = json.loads(info)
|
||||
if info.get('error') is not None:
|
||||
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
||||
@ -149,12 +153,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
view_count = str_to_int(view_count)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': info['owner_screenname'],
|
||||
'upload_date': video_upload_date,
|
||||
'title': self._og_search_title(webpage),
|
||||
'subtitles': video_subtitles,
|
||||
'upload_date': video_upload_date,
|
||||
'title': self._og_search_title(webpage),
|
||||
'subtitles': video_subtitles,
|
||||
'thumbnail': info['thumbnail_url'],
|
||||
'age_limit': age_limit,
|
||||
'view_count': view_count,
|
||||
@ -195,7 +199,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||
break
|
||||
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||
for video_id in orderedSet(video_ids)]
|
||||
for video_id in orderedSet(video_ids)]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
@ -9,7 +9,7 @@ from .common import InfoExtractor
|
||||
class DefenseGouvFrIE(InfoExtractor):
|
||||
IE_NAME = 'defense.gouv.fr'
|
||||
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
||||
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
||||
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
||||
@ -28,9 +28,9 @@ class DefenseGouvFrIE(InfoExtractor):
|
||||
webpage, 'ID')
|
||||
|
||||
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
||||
+ video_id)
|
||||
+ video_id)
|
||||
info = self._download_webpage(json_url, title,
|
||||
'Downloading JSON config')
|
||||
'Downloading JSON config')
|
||||
video_url = json.loads(info)['renditions'][0]['url']
|
||||
|
||||
return {'id': video_id,
|
||||
|
@ -15,6 +15,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class DepositFilesIE(InfoExtractor):
|
||||
|
||||
"""Information extractor for depositfiles.com"""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
||||
@ -25,7 +26,7 @@ class DepositFilesIE(InfoExtractor):
|
||||
url = 'http://depositfiles.com/en/files/' + file_id
|
||||
|
||||
# Retrieve file webpage with 'Free download' button pressed
|
||||
free_download_indication = {'gateway_result' : '1'}
|
||||
free_download_indication = {'gateway_result': '1'}
|
||||
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
||||
try:
|
||||
self.report_download_webpage(file_id)
|
||||
@ -51,10 +52,10 @@ class DepositFilesIE(InfoExtractor):
|
||||
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': file_title,
|
||||
'ext': file_extension.decode('utf-8'),
|
||||
'upload_date': None,
|
||||
'title': file_title,
|
||||
'ext': file_extension.decode('utf-8'),
|
||||
}]
|
||||
|
@ -28,7 +28,7 @@ class DiscoveryIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
|
||||
webpage, 'video list', flags=re.DOTALL)
|
||||
webpage, 'video list', flags=re.DOTALL)
|
||||
video_list = json.loads(video_list_json)
|
||||
info = video_list['clips'][0]
|
||||
formats = []
|
||||
|
@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||
info = self._download_json(info_url, video_id)
|
||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||
date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -23,7 +23,6 @@ class DreiSatIE(InfoExtractor):
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
@ -17,7 +17,7 @@ class EHowIE(InfoExtractor):
|
||||
u'info_dict': {
|
||||
u"title": u"Hardwood Flooring Basics",
|
||||
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
||||
u"uploader": u"Erick Nathan"
|
||||
u"uploader": u"Erick Nathan"
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,21 +26,20 @@ class EHowIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
||||
webpage, u'video URL')
|
||||
webpage, u'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
||||
webpage, u'uploader')
|
||||
webpage, u'uploader')
|
||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||
ext = determine_ext(final_url)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': uploader,
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,6 @@ class EightTracksIE(InfoExtractor):
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
@ -102,8 +101,8 @@ class EightTracksIE(InfoExtractor):
|
||||
res = []
|
||||
for i in range(track_count):
|
||||
api_json = self._download_webpage(next_url, playlist_id,
|
||||
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
|
||||
errnote=u'Failed to download song information')
|
||||
note=u'Downloading song information %s/%s' % (str(i + 1), track_count),
|
||||
errnote=u'Failed to download song information')
|
||||
api_data = json.loads(api_json)
|
||||
track_data = api_data[u'set']['track']
|
||||
info = {
|
||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class ExtremeTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
|
@ -15,6 +15,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FacebookIE(InfoExtractor):
|
||||
|
||||
"""Information Extractor for Facebook"""
|
||||
|
||||
_VALID_URL = r'''(?x)
|
||||
@ -50,7 +51,7 @@ class FacebookIE(InfoExtractor):
|
||||
login_page_req.add_header('Cookie', 'locale=en_US')
|
||||
self.report_login()
|
||||
login_page = self._download_webpage(login_page_req, None, note=False,
|
||||
errnote=u'Unable to download login page')
|
||||
errnote=u'Unable to download login page')
|
||||
lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
|
||||
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
|
||||
|
||||
@ -64,7 +65,7 @@ class FacebookIE(InfoExtractor):
|
||||
'legacy_return': '1',
|
||||
'timezone': '-60',
|
||||
'trynum': '1',
|
||||
}
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
try:
|
||||
|
@ -26,9 +26,9 @@ class FazIE(InfoExtractor):
|
||||
self.to_screen(video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
||||
u'config xml url')
|
||||
u'config xml url')
|
||||
config = self._download_xml(config_xml_url, video_id,
|
||||
u'Downloading config xml')
|
||||
u'Downloading config xml')
|
||||
|
||||
encodings = config.find('ENCODINGS')
|
||||
formats = []
|
||||
|
@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
|
||||
duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
|
||||
|
||||
like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
|
||||
webpage, 'like count', fatal=False)
|
||||
webpage, 'like count', fatal=False)
|
||||
dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
|
||||
webpage, 'dislike count', fatal=False)
|
||||
webpage, 'dislike count', fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -30,9 +30,9 @@ class FKTVIE(InfoExtractor):
|
||||
server = random.randint(2, 4)
|
||||
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
|
||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||
episode)
|
||||
episode)
|
||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||
u'playlist', flags=re.DOTALL)
|
||||
u'playlist', flags=re.DOTALL)
|
||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||
# TODO: return a single multipart video
|
||||
videos = []
|
||||
@ -71,8 +71,8 @@ class FKTVPosteckeIE(InfoExtractor):
|
||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||
video_title = 'Postecke %d' % episode
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': video_title,
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FlickrIE(InfoExtractor):
|
||||
|
||||
"""Information Extractor for Flickr videos"""
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||
_TEST = {
|
||||
@ -37,7 +38,7 @@ class FlickrIE(InfoExtractor):
|
||||
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
||||
|
||||
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
||||
first_xml, 'node_id')
|
||||
first_xml, 'node_id')
|
||||
|
||||
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
||||
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
||||
@ -50,11 +51,11 @@ class FlickrIE(InfoExtractor):
|
||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader_id': video_uploader_id,
|
||||
}]
|
||||
|
@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):
|
||||
description = self._html_search_meta('description', webpage, 'description')
|
||||
if description:
|
||||
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
||||
fatal=False)
|
||||
fatal=False)
|
||||
if upload_date:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
||||
@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
|
||||
|
||||
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
b'Origin': b'http://www.4tube.com',
|
||||
}
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
b'Origin': b'http://www.4tube.com',
|
||||
}
|
||||
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
||||
tokens = self._download_json(token_req, video_id)
|
||||
|
||||
@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
|
||||
'format_id': format + 'p',
|
||||
'resolution': format + 'p',
|
||||
'quality': int(format),
|
||||
} for format in sources]
|
||||
} for format in sources]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
@ -12,6 +12,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
def _extract_video(self, video_id):
|
||||
info = self._download_xml(
|
||||
'http://www.francetvinfo.fr/appftv/webservices/video/'
|
||||
@ -194,7 +195,7 @@ class GenerationQuoiIE(InfoExtractor):
|
||||
info_json = self._download_webpage(info_url, name)
|
||||
info = json.loads(info_json)
|
||||
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
||||
ie='Dailymotion')
|
||||
ie='Dailymotion')
|
||||
|
||||
|
||||
class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||
|
@ -9,7 +9,7 @@ class GamekingsIE(InfoExtractor):
|
||||
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
|
||||
u'file': u'20130811.mp4',
|
||||
# MD5 is flaky, seems to change regularly
|
||||
#u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||
# u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||
u'info_dict': {
|
||||
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
|
||||
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
|
||||
|
@ -116,14 +116,16 @@ class GenericIE(InfoExtractor):
|
||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||
|
||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||
|
||||
"""
|
||||
Subclass the HTTPRedirectHandler to make it use our
|
||||
HEADRequest also on the redirected URL
|
||||
"""
|
||||
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
if code in (301, 302, 303, 307):
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
newheaders = dict((k, v) for k, v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return HEADRequest(newurl,
|
||||
headers=newheaders,
|
||||
@ -133,19 +135,21 @@ class GenericIE(InfoExtractor):
|
||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||
|
||||
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
||||
|
||||
"""
|
||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||
"""
|
||||
|
||||
def http_error_405(self, req, fp, code, msg, headers):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
newheaders = dict((k, v) for k, v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True))
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True))
|
||||
|
||||
# Build our opener
|
||||
opener = compat_urllib_request.OpenerDirector()
|
||||
@ -301,7 +305,7 @@ class GenericIE(InfoExtractor):
|
||||
# Look for embedded blip.tv player
|
||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||
if mobj:
|
||||
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
|
||||
return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
|
||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
|
||||
if mobj:
|
||||
return self.url_result(mobj.group(1), 'BlipTV')
|
||||
|
@ -48,17 +48,17 @@ class GooglePlusIE(InfoExtractor):
|
||||
|
||||
# Extract uploader
|
||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
webpage, 'uploader', fatal=False)
|
||||
|
||||
# Extract title
|
||||
# Get the first line for title
|
||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||
webpage, 'title', default='NA')
|
||||
webpage, 'title', default='NA')
|
||||
|
||||
# Step 2, Simulate clicking the image box to launch video
|
||||
DOMAIN = 'https://plus.google.com/'
|
||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||
webpage, 'video page URL')
|
||||
webpage, 'video page URL')
|
||||
if not video_page.startswith(DOMAIN):
|
||||
video_page = DOMAIN + video_page
|
||||
|
||||
@ -80,7 +80,7 @@ class GooglePlusIE(InfoExtractor):
|
||||
# Treat escaped \u0026 style hex
|
||||
try:
|
||||
video_url = video_url.decode("unicode_escape")
|
||||
except AttributeError: # Python 3
|
||||
except AttributeError: # Python 3
|
||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||
|
||||
return {
|
||||
|
@ -6,6 +6,7 @@ import json
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class HarkIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
|
||||
_TEST = {
|
||||
@ -22,13 +23,13 @@ class HarkIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
|
||||
json_url = "http://www.hark.com/clips/%s.json" % (video_id)
|
||||
info_json = self._download_webpage(json_url, video_id)
|
||||
info = json.loads(info_json)
|
||||
final_url = info['url']
|
||||
|
||||
return {'id': video_id,
|
||||
'url' : final_url,
|
||||
'url': final_url,
|
||||
'title': info['name'],
|
||||
'ext': determine_ext(final_url),
|
||||
'description': info['description'],
|
||||
|
@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||
webpage, 'video URL')
|
||||
webpage, 'video URL')
|
||||
|
||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||
webpage, 'description', fatal=False)
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -13,6 +13,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class HypemIE(InfoExtractor):
|
||||
|
||||
"""Information Extractor for hypem"""
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
||||
_TEST = {
|
||||
@ -40,7 +41,7 @@ class HypemIE(InfoExtractor):
|
||||
self.report_extraction(track_id)
|
||||
|
||||
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
||||
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
||||
response, u'tracks', flags=re.MULTILINE | re.DOTALL).strip()
|
||||
try:
|
||||
track_list = json.loads(html_tracks)
|
||||
track = track_list[u'tracks'][0]
|
||||
@ -53,7 +54,7 @@ class HypemIE(InfoExtractor):
|
||||
title = track[u"song"]
|
||||
|
||||
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
||||
request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
|
||||
request = compat_urllib_request.Request(serve_url, "", {'Content-Type': 'application/json'})
|
||||
request.add_header('cookie', cookie)
|
||||
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
||||
try:
|
||||
@ -63,9 +64,9 @@ class HypemIE(InfoExtractor):
|
||||
final_url = song_data[u"url"]
|
||||
|
||||
return [{
|
||||
'id': track_id,
|
||||
'url': final_url,
|
||||
'ext': "mp3",
|
||||
'title': title,
|
||||
'artist': artist,
|
||||
'id': track_id,
|
||||
'url': final_url,
|
||||
'ext': "mp3",
|
||||
'title': title,
|
||||
'artist': artist,
|
||||
}]
|
||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class IGNIE(InfoExtractor):
|
||||
|
||||
"""
|
||||
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
|
||||
Some videos of it.ign.com are also supported
|
||||
@ -101,6 +102,7 @@ class IGNIE(InfoExtractor):
|
||||
|
||||
|
||||
class OneUPIE(IGNIE):
|
||||
|
||||
"""Extractor for 1up.com, it uses the ign videos system."""
|
||||
|
||||
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
||||
|
@ -37,11 +37,11 @@ class InfoQIE(InfoExtractor):
|
||||
|
||||
# Extract title
|
||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||
webpage, 'title')
|
||||
webpage, 'title')
|
||||
|
||||
# Extract description
|
||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||
webpage, 'description', fatal=False)
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
video_filename = video_url.split('/')[-1]
|
||||
video_id, extension = video_filename.split('.')
|
||||
|
@ -24,9 +24,9 @@ class InstagramIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
||||
webpage, 'uploader id', fatal=False)
|
||||
webpage, 'uploader id', fatal=False)
|
||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
|
||||
fatal=False)
|
||||
fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -29,7 +29,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
def _clean_query(query):
|
||||
NEEDED_ARGS = ['publishedid', 'customerid']
|
||||
query_dic = compat_urlparse.parse_qs(query)
|
||||
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
|
||||
cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
|
||||
# Other player ids return m3u8 urls
|
||||
cleaned_dic['playerid'] = '247'
|
||||
cleaned_dic['videokbrate'] = '100000'
|
||||
@ -42,22 +42,22 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
url = self._build_url(query)
|
||||
|
||||
flashconfiguration = self._download_xml(url, video_id,
|
||||
u'Downloading flash configuration')
|
||||
u'Downloading flash configuration')
|
||||
file_url = flashconfiguration.find('file').text
|
||||
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
||||
# Replace some of the parameters in the query to get the best quality
|
||||
# and http links (no m3u8 manifests)
|
||||
file_url = re.sub(r'(?<=\?)(.+)$',
|
||||
lambda m: self._clean_query(m.group()),
|
||||
file_url)
|
||||
lambda m: self._clean_query(m.group()),
|
||||
file_url)
|
||||
info = self._download_xml(file_url, video_id,
|
||||
u'Downloading video info')
|
||||
u'Downloading video info')
|
||||
item = info.find('channel/item')
|
||||
|
||||
def _bp(p):
|
||||
return xpath_with_ns(p,
|
||||
{'media': 'http://search.yahoo.com/mrss/',
|
||||
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
|
||||
{'media': 'http://search.yahoo.com/mrss/',
|
||||
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
|
||||
formats = []
|
||||
for content in item.findall(_bp('media:group/media:content')):
|
||||
attr = content.attrib
|
||||
|
@ -34,8 +34,8 @@ class IPrimaIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_url = 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (
|
||||
floor(random()*1073741824),
|
||||
floor(random()*1073741824))
|
||||
floor(random() * 1073741824),
|
||||
floor(random() * 1073741824))
|
||||
|
||||
req = compat_urllib_request.Request(player_url)
|
||||
req.add_header('Referer', url)
|
||||
@ -46,7 +46,7 @@ class IPrimaIE(InfoExtractor):
|
||||
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
|
||||
|
||||
if zoneGEO != '0':
|
||||
base_url = base_url.replace('token', 'token_'+zoneGEO)
|
||||
base_url = base_url.replace('token', 'token_' + zoneGEO)
|
||||
|
||||
formats = []
|
||||
for format_id in ['lq', 'hq', 'hd']:
|
||||
@ -63,13 +63,13 @@ class IPrimaIE(InfoExtractor):
|
||||
quality = 1
|
||||
elif format_id == 'hd':
|
||||
quality = 2
|
||||
filename = 'hq/'+filename
|
||||
filename = 'hq/' + filename
|
||||
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': base_url,
|
||||
'quality': quality,
|
||||
'play_path': 'mp4:'+filename.replace('"', '')[:-4],
|
||||
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
|
||||
'rtmp_live': True,
|
||||
'ext': 'flv',
|
||||
})
|
||||
|
@ -43,7 +43,7 @@ class IviIE(InfoExtractor):
|
||||
'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
|
||||
},
|
||||
'skip': 'Only works from Russia',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Sorted by quality
|
||||
@ -137,17 +137,17 @@ class IviCompilationIE(InfoExtractor):
|
||||
compilation_id = mobj.group('compilationid')
|
||||
season_id = mobj.group('seasonid')
|
||||
|
||||
if season_id is not None: # Season link
|
||||
if season_id is not None: # Season link
|
||||
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
|
||||
playlist_id = '%s/season%s' % (compilation_id, season_id)
|
||||
playlist_title = self._html_search_meta('title', season_page, 'title')
|
||||
entries = self._extract_entries(season_page, compilation_id)
|
||||
else: # Compilation link
|
||||
else: # Compilation link
|
||||
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
|
||||
playlist_id = compilation_id
|
||||
playlist_title = self._html_search_meta('title', compilation_page, 'title')
|
||||
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
|
||||
if len(seasons) == 0: # No seasons in this compilation
|
||||
if len(seasons) == 0: # No seasons in this compilation
|
||||
entries = self._extract_entries(compilation_page, compilation_id)
|
||||
else:
|
||||
entries = []
|
||||
|
@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
|
||||
'title': title,
|
||||
'description': description,
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class JukeboxIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
||||
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
||||
@ -37,10 +38,10 @@ class JukeboxIE(InfoExtractor):
|
||||
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract video url')
|
||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/','/')
|
||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/', '/')
|
||||
self.to_screen(u'Youtube video detected')
|
||||
return self.url_result(youtube_url,ie='Youtube')
|
||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/','/')
|
||||
return self.url_result(youtube_url, ie='Youtube')
|
||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/', '/')
|
||||
video_ext = unescapeHTML(mobj.group('video_ext'))
|
||||
|
||||
mobj = re.search(self._TITLE, html)
|
||||
|
@ -10,6 +10,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class JustinTVIE(InfoExtractor):
|
||||
|
||||
"""Information extractor for justin.tv and twitch.tv"""
|
||||
# TODO: One broadcast may be split into multiple videos. The key
|
||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||
@ -40,7 +41,7 @@ class JustinTVIE(InfoExtractor):
|
||||
def report_download_page(self, channel, offset):
|
||||
"""Report attempt to download a single page of videos."""
|
||||
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
||||
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
||||
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
||||
|
||||
# Return count of items, list of *valid* items
|
||||
def _parse_page(self, url, video_id):
|
||||
@ -49,7 +50,7 @@ class JustinTVIE(InfoExtractor):
|
||||
u'unable to download video info JSON')
|
||||
|
||||
response = json.loads(info_json)
|
||||
if type(response) != list:
|
||||
if not isinstance(response, list):
|
||||
error_text = response.get('error', 'unknown error')
|
||||
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
||||
info = []
|
||||
@ -94,8 +95,8 @@ class JustinTVIE(InfoExtractor):
|
||||
|
||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||
doc = self._download_xml(api, chapter_id,
|
||||
note=u'Downloading chapter information',
|
||||
errnote=u'Chapter information download failed')
|
||||
note=u'Downloading chapter information',
|
||||
errnote=u'Chapter information download failed')
|
||||
for a in doc.findall('.//archive'):
|
||||
if archive_id == a.find('./id').text:
|
||||
break
|
||||
@ -107,8 +108,8 @@ class JustinTVIE(InfoExtractor):
|
||||
|
||||
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
chapter_info = json.loads(chapter_info_json)
|
||||
|
||||
bracket_start = int(doc.find('.//bracket_start').text)
|
||||
@ -116,7 +117,7 @@ class JustinTVIE(InfoExtractor):
|
||||
|
||||
# TODO determine start (and probably fix up file)
|
||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||
#video_url += u'?start=' + TODO:start_timestamp
|
||||
# video_url += u'?start=' + TODO:start_timestamp
|
||||
# bracket_start is 13290, but we want 51670615
|
||||
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
||||
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||
|
@ -11,6 +11,7 @@ from ..aes import (
|
||||
aes_decrypt_text
|
||||
)
|
||||
|
||||
|
||||
class KeezMoviesIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
@ -40,7 +41,7 @@ class KeezMoviesIE(InfoExtractor):
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title')
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url'))
|
||||
if webpage.find('encrypted=true')!=-1:
|
||||
if webpage.find('encrypted=true') != -1:
|
||||
password = self._html_search_regex(r'video_title=(.+?)&', webpage, u'password')
|
||||
video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
|
@ -20,18 +20,18 @@ class KickStarterIE(InfoExtractor):
|
||||
webpage_src = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._search_regex(r'data-video="(.*?)">',
|
||||
webpage_src, u'video URL')
|
||||
webpage_src, u'video URL')
|
||||
if 'mp4' in video_url:
|
||||
ext = 'mp4'
|
||||
else:
|
||||
ext = 'flv'
|
||||
video_title = self._html_search_regex(r"<title>(.*?)</title>",
|
||||
webpage_src, u'title').rpartition(u'\u2014 Kickstarter')[0].strip()
|
||||
webpage_src, u'title').rpartition(u'\u2014 Kickstarter')[0].strip()
|
||||
|
||||
results = [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'ext': ext,
|
||||
}]
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'ext': ext,
|
||||
}]
|
||||
return results
|
||||
|
@ -33,20 +33,20 @@ class KontrTubeIE(InfoExtractor):
|
||||
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
||||
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
||||
title = self._html_search_regex(r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage,
|
||||
'video title')
|
||||
'video title')
|
||||
description = self._html_search_meta('description', webpage, 'video description')
|
||||
|
||||
mobj = re.search(r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
|
||||
webpage)
|
||||
webpage)
|
||||
duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
|
||||
|
||||
view_count = self._html_search_regex(r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', webpage,
|
||||
'view count', fatal=False)
|
||||
'view count', fatal=False)
|
||||
view_count = int(view_count) if view_count is not None else None
|
||||
|
||||
comment_count = None
|
||||
comment_str = self._html_search_regex(r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count',
|
||||
fatal=False)
|
||||
fatal=False)
|
||||
if comment_str.startswith('комментариев нет'):
|
||||
comment_count = 0
|
||||
else:
|
||||
|
@ -53,7 +53,7 @@ class LifeNewsIE(InfoExtractor):
|
||||
r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
|
||||
|
||||
upload_date = self._html_search_regex(
|
||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
|
||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
|
||||
if upload_date is not None:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
|
||||
|
@ -20,29 +20,29 @@ class LiveLeakIE(InfoExtractor):
|
||||
'title': 'Most unlucky car accident'
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
||||
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
||||
'info_dict': {
|
||||
'id': 'f93_1390833151',
|
||||
'ext': 'mp4',
|
||||
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
|
||||
'uploader': 'ARD_Stinkt',
|
||||
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
||||
'md5': '42c6d97d54f1db107958760788c5f48f',
|
||||
'info_dict': {
|
||||
'id': '4f7_1392687779',
|
||||
'ext': 'mp4',
|
||||
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
|
||||
'uploader': 'CapObveus',
|
||||
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}]
|
||||
{
|
||||
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
||||
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
||||
'info_dict': {
|
||||
'id': 'f93_1390833151',
|
||||
'ext': 'mp4',
|
||||
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
|
||||
'uploader': 'ARD_Stinkt',
|
||||
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
||||
'md5': '42c6d97d54f1db107958760788c5f48f',
|
||||
'info_dict': {
|
||||
'id': '4f7_1392687779',
|
||||
'ext': 'mp4',
|
||||
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
|
||||
'uploader': 'CapObveus',
|
||||
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
@ -29,7 +29,7 @@ class LivestreamIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': video_data['caption'],
|
||||
'thumbnail': video_data['thumbnail_url'],
|
||||
'upload_date': video_data['updated_at'].replace('-','')[:8],
|
||||
'upload_date': video_data['updated_at'].replace('-', '')[:8],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -41,10 +41,10 @@ class LivestreamIE(InfoExtractor):
|
||||
if video_id is None:
|
||||
# This is an event page:
|
||||
config_json = self._search_regex(r'window.config = ({.*?});',
|
||||
webpage, u'window config')
|
||||
webpage, u'window config')
|
||||
info = json.loads(config_json)['event']
|
||||
videos = [self._extract_video_info(video_data['data'])
|
||||
for video_data in info['feed']['data'] if video_data['type'] == u'video']
|
||||
for video_data in info['feed']['data'] if video_data['type'] == u'video']
|
||||
return self.playlist_result(videos, info['id'], info['full_name'])
|
||||
else:
|
||||
og_video = self._og_search_video_url(webpage, name=u'player url')
|
||||
|
@ -27,7 +27,7 @@ class M6IE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
|
||||
rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
|
||||
'Downloading video RSS')
|
||||
'Downloading video RSS')
|
||||
|
||||
title = rss.find('./channel/item/title').text
|
||||
description = rss.find('./channel/item/description').text
|
||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class MalemotionIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
|
||||
_TEST = {
|
||||
|
@ -9,7 +9,9 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class MetacafeIE(InfoExtractor):
|
||||
|
||||
"""Information Extractor for metacafe.com."""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
|
||||
@ -17,72 +19,71 @@ class MetacafeIE(InfoExtractor):
|
||||
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
|
||||
IE_NAME = u'metacafe'
|
||||
_TESTS = [
|
||||
# Youtube video
|
||||
{
|
||||
u"add_ie": ["Youtube"],
|
||||
u"url": u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
|
||||
u"file": u"_aUehQsCQtM.mp4",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20090102",
|
||||
u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
|
||||
u"description": u"md5:2439a8ef6d5a70e380c22f5ad323e5a8",
|
||||
u"uploader": u"PBS",
|
||||
u"uploader_id": u"PBS"
|
||||
}
|
||||
},
|
||||
# Normal metacafe video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
|
||||
u'md5': u'6e0bca200eaad2552e6915ed6fd4d9ad',
|
||||
u'info_dict': {
|
||||
u'id': u'11121940',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'News: Stuff You Won\'t Do with Your PlayStation 4',
|
||||
u'uploader': u'ign',
|
||||
u'description': u'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
|
||||
# Youtube video
|
||||
{
|
||||
u"add_ie": ["Youtube"],
|
||||
u"url": u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
|
||||
u"file": u"_aUehQsCQtM.mp4",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20090102",
|
||||
u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
|
||||
u"description": u"md5:2439a8ef6d5a70e380c22f5ad323e5a8",
|
||||
u"uploader": u"PBS",
|
||||
u"uploader_id": u"PBS"
|
||||
}
|
||||
},
|
||||
},
|
||||
# AnyClip video
|
||||
{
|
||||
u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/",
|
||||
u"file": u"an-dVVXnuY7Jh77J.mp4",
|
||||
u"info_dict": {
|
||||
u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3",
|
||||
u"uploader": u"anyclip",
|
||||
u"description": u"md5:38c711dd98f5bb87acf973d573442e67",
|
||||
# Normal metacafe video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
|
||||
u'md5': u'6e0bca200eaad2552e6915ed6fd4d9ad',
|
||||
u'info_dict': {
|
||||
u'id': u'11121940',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'News: Stuff You Won\'t Do with Your PlayStation 4',
|
||||
u'uploader': u'ign',
|
||||
u'description': u'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
|
||||
},
|
||||
},
|
||||
},
|
||||
# age-restricted video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
|
||||
u'md5': u'98dde7c1a35d02178e8ab7560fe8bd09',
|
||||
u'info_dict': {
|
||||
u'id': u'5186653',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
|
||||
u'uploader': u'Dwayne Pipe',
|
||||
u'description': u'md5:950bf4c581e2c059911fa3ffbe377e4b',
|
||||
u'age_limit': 18,
|
||||
# AnyClip video
|
||||
{
|
||||
u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/",
|
||||
u"file": u"an-dVVXnuY7Jh77J.mp4",
|
||||
u"info_dict": {
|
||||
u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3",
|
||||
u"uploader": u"anyclip",
|
||||
u"description": u"md5:38c711dd98f5bb87acf973d573442e67",
|
||||
},
|
||||
},
|
||||
},
|
||||
# cbs video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/cb-0rOxMBabDXN6/samsung_galaxy_note_2_samsungs_next_generation_phablet/',
|
||||
u'info_dict': {
|
||||
u'id': u'0rOxMBabDXN6',
|
||||
u'ext': u'flv',
|
||||
u'title': u'Samsung Galaxy Note 2: Samsung\'s next-generation phablet',
|
||||
u'description': u'md5:54d49fac53d26d5a0aaeccd061ada09d',
|
||||
u'duration': 129,
|
||||
# age-restricted video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
|
||||
u'md5': u'98dde7c1a35d02178e8ab7560fe8bd09',
|
||||
u'info_dict': {
|
||||
u'id': u'5186653',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
|
||||
u'uploader': u'Dwayne Pipe',
|
||||
u'description': u'md5:950bf4c581e2c059911fa3ffbe377e4b',
|
||||
u'age_limit': 18,
|
||||
},
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
# cbs video
|
||||
{
|
||||
u'url': u'http://www.metacafe.com/watch/cb-0rOxMBabDXN6/samsung_galaxy_note_2_samsungs_next_generation_phablet/',
|
||||
u'info_dict': {
|
||||
u'id': u'0rOxMBabDXN6',
|
||||
u'ext': u'flv',
|
||||
u'title': u'Samsung Galaxy Note 2: Samsung\'s next-generation phablet',
|
||||
u'description': u'md5:54d49fac53d26d5a0aaeccd061ada09d',
|
||||
u'duration': 129,
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def report_disclaimer(self):
|
||||
"""Report disclaimer retrieval."""
|
||||
self.to_screen(u'Retrieving disclaimer')
|
||||
@ -96,7 +97,7 @@ class MetacafeIE(InfoExtractor):
|
||||
disclaimer_form = {
|
||||
'filters': '0',
|
||||
'submit': "Continue - I'm over 18",
|
||||
}
|
||||
}
|
||||
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self.report_age_confirmation()
|
||||
@ -167,8 +168,8 @@ class MetacafeIE(InfoExtractor):
|
||||
video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, u'title')
|
||||
description = self._og_search_description(webpage)
|
||||
video_uploader = self._html_search_regex(
|
||||
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
|
||||
webpage, u'uploader nickname', fatal=False)
|
||||
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
|
||||
webpage, u'uploader nickname', fatal=False)
|
||||
|
||||
if re.search(r'"contentRating":"restricted"', webpage) is not None:
|
||||
age_limit = 18
|
||||
@ -176,13 +177,13 @@ class MetacafeIE(InfoExtractor):
|
||||
age_limit = 0
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'description': description,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_ext,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_ext,
|
||||
'age_limit': age_limit,
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ class MetacriticIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
# The xml is not well formatted, there are raw '&'
|
||||
info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
|
||||
video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
|
||||
video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)
|
||||
|
||||
clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
|
||||
formats = []
|
||||
@ -43,7 +43,7 @@ class MetacriticIE(InfoExtractor):
|
||||
self._sort_formats(formats)
|
||||
|
||||
description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
|
||||
webpage, 'description', flags=re.DOTALL)
|
||||
webpage, 'description', flags=re.DOTALL)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -30,9 +30,9 @@ class TechTVMITIE(InfoExtractor):
|
||||
clean_page = re.compile(u'<!--.*?-->', re.S).sub(u'', raw_page)
|
||||
|
||||
base_url = self._search_regex(r'ipadUrl: \'(.+?cloudfront.net/)',
|
||||
raw_page, u'base url')
|
||||
raw_page, u'base url')
|
||||
formats_json = self._search_regex(r'bitrates: (\[.+?\])', raw_page,
|
||||
u'video formats')
|
||||
u'video formats')
|
||||
formats_mit = json.loads(formats_json)
|
||||
formats = [
|
||||
{
|
||||
@ -49,7 +49,7 @@ class TechTVMITIE(InfoExtractor):
|
||||
title = get_element_by_id('edit-title', clean_page)
|
||||
description = clean_html(get_element_by_id('edit-description', clean_page))
|
||||
thumbnail = self._search_regex(r'playlist:.*?url: \'(.+?)\'',
|
||||
raw_page, u'thumbnail', flags=re.DOTALL)
|
||||
raw_page, u'thumbnail', flags=re.DOTALL)
|
||||
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
@ -79,5 +79,5 @@ class MITIE(TechTVMITIE):
|
||||
webpage = self._download_webpage(url, page_title)
|
||||
self.to_screen('%s: Extracting %s url' % (page_title, TechTVMITIE.IE_NAME))
|
||||
embed_url = self._search_regex(r'<iframe .*?src="(.+?)"', webpage,
|
||||
u'embed url')
|
||||
u'embed url')
|
||||
return self.url_result(embed_url, ie='TechTVMIT')
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user