Fixed PEP8 issues except E501
This commit is contained in:
parent
ccb079ee67
commit
3b5ee5c51e
@ -9,16 +9,17 @@ import youtube_dl
|
|||||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
opts_flag = []
|
opts_flag = []
|
||||||
for group in opt_parser.option_groups:
|
for group in opt_parser.option_groups:
|
||||||
for option in group.option_list:
|
for option in group.option_list:
|
||||||
#for every long flag
|
# for every long flag
|
||||||
opts_flag.append(option.get_opt_string())
|
opts_flag.append(option.get_opt_string())
|
||||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||||
template = f.read()
|
template = f.read()
|
||||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||||
#just using the special char
|
# just using the special char
|
||||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||||
f.write(filled_template)
|
f.write(filled_template)
|
||||||
|
|
||||||
|
@ -233,7 +233,9 @@ def rmtree(path):
|
|||||||
|
|
||||||
#==============================================================================
|
#==============================================================================
|
||||||
|
|
||||||
|
|
||||||
class BuildError(Exception):
|
class BuildError(Exception):
|
||||||
|
|
||||||
def __init__(self, output, code=500):
|
def __init__(self, output, code=500):
|
||||||
self.output = output
|
self.output = output
|
||||||
self.code = code
|
self.code = code
|
||||||
@ -247,6 +249,7 @@ class HTTPError(BuildError):
|
|||||||
|
|
||||||
|
|
||||||
class PythonBuilder(object):
|
class PythonBuilder(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
pythonVersion = kwargs.pop('python', '2.7')
|
pythonVersion = kwargs.pop('python', '2.7')
|
||||||
try:
|
try:
|
||||||
@ -262,6 +265,7 @@ class PythonBuilder(object):
|
|||||||
|
|
||||||
|
|
||||||
class GITInfoBuilder(object):
|
class GITInfoBuilder(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
try:
|
try:
|
||||||
self.user, self.repoName = kwargs['path'][:2]
|
self.user, self.repoName = kwargs['path'][:2]
|
||||||
@ -281,6 +285,7 @@ class GITInfoBuilder(object):
|
|||||||
|
|
||||||
|
|
||||||
class GITBuilder(GITInfoBuilder):
|
class GITBuilder(GITInfoBuilder):
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
|
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
|
||||||
@ -313,6 +318,7 @@ class YoutubeDLBuilder(object):
|
|||||||
|
|
||||||
|
|
||||||
class DownloadBuilder(object):
|
class DownloadBuilder(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
self.handler = kwargs.pop('handler')
|
self.handler = kwargs.pop('handler')
|
||||||
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
|
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
|
||||||
@ -341,6 +347,7 @@ class DownloadBuilder(object):
|
|||||||
|
|
||||||
|
|
||||||
class CleanupTempDir(object):
|
class CleanupTempDir(object):
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
try:
|
try:
|
||||||
rmtree(self.basePath)
|
rmtree(self.basePath)
|
||||||
@ -351,6 +358,7 @@ class CleanupTempDir(object):
|
|||||||
|
|
||||||
|
|
||||||
class Null(object):
|
class Null(object):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -369,7 +377,7 @@ class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, Clea
|
|||||||
|
|
||||||
|
|
||||||
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
class BuildHTTPRequestHandler(BaseHTTPRequestHandler):
|
||||||
actionDict = { 'build': Builder, 'download': Builder } # They're the same, no more caching.
|
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
|
||||||
|
|
||||||
def do_GET(self):
|
def do_GET(self):
|
||||||
path = urlparse.urlparse(self.path)
|
path = urlparse.urlparse(self.path)
|
||||||
|
@ -11,22 +11,22 @@ except NameError:
|
|||||||
|
|
||||||
versions_info = json.load(open('update/versions.json'))
|
versions_info = json.load(open('update/versions.json'))
|
||||||
if 'signature' in versions_info:
|
if 'signature' in versions_info:
|
||||||
del versions_info['signature']
|
del versions_info['signature']
|
||||||
|
|
||||||
print('Enter the PKCS1 private key, followed by a blank line:')
|
print('Enter the PKCS1 private key, followed by a blank line:')
|
||||||
privkey = b''
|
privkey = b''
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
line = input()
|
line = input()
|
||||||
except EOFError:
|
except EOFError:
|
||||||
break
|
break
|
||||||
if line == '':
|
if line == '':
|
||||||
break
|
break
|
||||||
privkey += line.encode('ascii') + b'\n'
|
privkey += line.encode('ascii') + b'\n'
|
||||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
||||||
|
|
||||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
||||||
print('signature: ' + signature)
|
print('signature: ' + signature)
|
||||||
|
|
||||||
versions_info['signature'] = signature
|
versions_info['signature'] = signature
|
||||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
||||||
|
@ -5,7 +5,7 @@ from __future__ import with_statement
|
|||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import glob
|
import glob
|
||||||
import io # For Python 2 compatibilty
|
import io # For Python 2 compatibilty
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
@ -39,8 +39,7 @@ now_iso = now.isoformat() + 'Z'
|
|||||||
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
|
atom_template = atom_template.replace('@TIMESTAMP@', now_iso)
|
||||||
|
|
||||||
versions_info = json.load(open('update/versions.json'))
|
versions_info = json.load(open('update/versions.json'))
|
||||||
versions = list(versions_info['versions'].keys())
|
versions = sorted(versions_info['versions'].keys())
|
||||||
versions.sort()
|
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for v in versions:
|
for v in versions:
|
||||||
@ -73,4 +72,3 @@ atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
|||||||
|
|
||||||
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file:
|
||||||
atom_file.write(atom_template)
|
atom_file.write(atom_template)
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(
|
|||||||
|
|
||||||
import youtube_dl
|
import youtube_dl
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf:
|
||||||
template = tmplf.read()
|
template = tmplf.read()
|
||||||
@ -21,7 +22,7 @@ def main():
|
|||||||
continue
|
continue
|
||||||
elif ie_desc is not None:
|
elif ie_desc is not None:
|
||||||
ie_html += ': {}'.format(ie.IE_DESC)
|
ie_html += ': {}'.format(ie.IE_DESC)
|
||||||
if ie.working() == False:
|
if not ie.working():
|
||||||
ie_html += ' (Currently broken)'
|
ie_html += ' (Currently broken)'
|
||||||
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
ie_htmls.append('<li>{}</li>'.format(ie_html))
|
||||||
|
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import sys, os
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import urllib.request as compat_urllib_request
|
import urllib.request as compat_urllib_request
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
import urllib2 as compat_urllib_request
|
import urllib2 as compat_urllib_request
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||||
@ -12,9 +13,9 @@ sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorr
|
|||||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
raw_input()
|
raw_input()
|
||||||
except NameError: # Python 3
|
except NameError: # Python 3
|
||||||
input()
|
input()
|
||||||
|
|
||||||
filename = sys.argv[0]
|
filename = sys.argv[0]
|
||||||
|
|
||||||
|
@ -9,4 +9,4 @@ py2exe_options = {
|
|||||||
"dll_excludes": ['w9xpopen.exe']
|
"dll_excludes": ['w9xpopen.exe']
|
||||||
}
|
}
|
||||||
|
|
||||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
setup(console=['youtube-dl.py'], options={"py2exe": py2exe_options}, zipfile=None)
|
||||||
|
@ -1,17 +1,23 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import sys, os
|
import sys
|
||||||
|
import os
|
||||||
import urllib2
|
import urllib2
|
||||||
import json, hashlib
|
import json
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
def rsa_verify(message, signature, key):
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from sys import version_info
|
from sys import version_info
|
||||||
|
|
||||||
def b(x):
|
def b(x):
|
||||||
if version_info[0] == 2: return x
|
if version_info[0] == 2:
|
||||||
else: return x.encode('latin1')
|
return x
|
||||||
assert(type(message) == type(b('')))
|
else:
|
||||||
|
return x.encode('latin1')
|
||||||
|
assert(isinstance(message, type(b(''))))
|
||||||
block_size = 0
|
block_size = 0
|
||||||
n = key[0]
|
n = key[0]
|
||||||
while n:
|
while n:
|
||||||
@ -23,13 +29,17 @@ def rsa_verify(message, signature, key):
|
|||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||||
signature >>= 8
|
signature >>= 8
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
if signature[0:2] != b('\x00\x01'):
|
||||||
|
return False
|
||||||
signature = signature[2:]
|
signature = signature[2:]
|
||||||
if not b('\x00') in signature: return False
|
if not b('\x00') in signature:
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
return False
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
signature = signature[signature.index(b('\x00')) + 1:]
|
||||||
|
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||||
|
return False
|
||||||
signature = signature[19:]
|
signature = signature[19:]
|
||||||
if signature != sha256(message).digest(): return False
|
if signature != sha256(message).digest():
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||||
@ -92,7 +102,7 @@ echo Updating youtube-dl...
|
|||||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
||||||
move /Y "%s.new" "%s"
|
move /Y "%s.new" "%s"
|
||||||
del "%s"
|
del "%s"
|
||||||
\n""" %(exe, exe, bat))
|
\n""" % (exe, exe, bat))
|
||||||
b.close()
|
b.close()
|
||||||
|
|
||||||
os.startfile(bat)
|
os.startfile(bat)
|
||||||
|
@ -47,13 +47,14 @@ def report_warning(message):
|
|||||||
|
|
||||||
|
|
||||||
class FakeYDL(YoutubeDL):
|
class FakeYDL(YoutubeDL):
|
||||||
|
|
||||||
def __init__(self, override=None):
|
def __init__(self, override=None):
|
||||||
# Different instances of the downloader can't share the same dictionary
|
# Different instances of the downloader can't share the same dictionary
|
||||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||||
params = get_params(override=override)
|
params = get_params(override=override)
|
||||||
super(FakeYDL, self).__init__(params)
|
super(FakeYDL, self).__init__(params)
|
||||||
self.result = []
|
self.result = []
|
||||||
|
|
||||||
def to_screen(self, s, skip_eol=None):
|
def to_screen(self, s, skip_eol=None):
|
||||||
print(s)
|
print(s)
|
||||||
|
|
||||||
@ -66,11 +67,14 @@ class FakeYDL(YoutubeDL):
|
|||||||
def expect_warning(self, regex):
|
def expect_warning(self, regex):
|
||||||
# Silence an expected warning matching a regex
|
# Silence an expected warning matching a regex
|
||||||
old_report_warning = self.report_warning
|
old_report_warning = self.report_warning
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
if re.match(regex, message): return
|
if re.match(regex, message):
|
||||||
|
return
|
||||||
old_report_warning(message)
|
old_report_warning(message)
|
||||||
self.report_warning = types.MethodType(report_warning, self)
|
self.report_warning = types.MethodType(report_warning, self)
|
||||||
|
|
||||||
|
|
||||||
def get_testcases():
|
def get_testcases():
|
||||||
for ie in youtube_dl.extractor.gen_extractors():
|
for ie in youtube_dl.extractor.gen_extractors():
|
||||||
t = getattr(ie, '_TEST', None)
|
t = getattr(ie, '_TEST', None)
|
||||||
|
@ -14,6 +14,7 @@ from youtube_dl.extractor import YoutubeIE
|
|||||||
|
|
||||||
|
|
||||||
class YDL(FakeYDL):
|
class YDL(FakeYDL):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(YDL, self).__init__(*args, **kwargs)
|
super(YDL, self).__init__(*args, **kwargs)
|
||||||
self.downloaded_info_dicts = []
|
self.downloaded_info_dicts = []
|
||||||
@ -27,13 +28,14 @@ class YDL(FakeYDL):
|
|||||||
|
|
||||||
|
|
||||||
class TestFormatSelection(unittest.TestCase):
|
class TestFormatSelection(unittest.TestCase):
|
||||||
|
|
||||||
def test_prefer_free_formats(self):
|
def test_prefer_free_formats(self):
|
||||||
# Same resolution => download webm
|
# Same resolution => download webm
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
ydl.params['prefer_free_formats'] = True
|
ydl.params['prefer_free_formats'] = True
|
||||||
formats = [
|
formats = [
|
||||||
{'ext': 'webm', 'height': 460},
|
{'ext': 'webm', 'height': 460},
|
||||||
{'ext': 'mp4', 'height': 460},
|
{'ext': 'mp4', 'height': 460},
|
||||||
]
|
]
|
||||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||||
yie = YoutubeIE(ydl)
|
yie = YoutubeIE(ydl)
|
||||||
@ -236,6 +238,7 @@ class TestFormatSelection(unittest.TestCase):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'width': None,
|
'width': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
def fname(templ):
|
def fname(templ):
|
||||||
ydl = YoutubeDL({'outtmpl': templ})
|
ydl = YoutubeDL({'outtmpl': templ})
|
||||||
return ydl.prepare_filename(info)
|
return ydl.prepare_filename(info)
|
||||||
|
@ -32,6 +32,7 @@ def _download_restricted(url, filename, age):
|
|||||||
|
|
||||||
|
|
||||||
class TestAgeRestriction(unittest.TestCase):
|
class TestAgeRestriction(unittest.TestCase):
|
||||||
|
|
||||||
def _assert_restricted(self, url, filename, age, old_age=None):
|
def _assert_restricted(self, url, filename, age, old_age=None):
|
||||||
self.assertTrue(_download_restricted(url, filename, old_age))
|
self.assertTrue(_download_restricted(url, filename, old_age))
|
||||||
self.assertFalse(_download_restricted(url, filename, age))
|
self.assertFalse(_download_restricted(url, filename, age))
|
||||||
|
@ -21,6 +21,7 @@ from youtube_dl.extractor import (
|
|||||||
|
|
||||||
|
|
||||||
class TestAllURLsMatching(unittest.TestCase):
|
class TestAllURLsMatching(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.ies = gen_extractors()
|
self.ies = gen_extractors()
|
||||||
|
|
||||||
@ -33,19 +34,19 @@ class TestAllURLsMatching(unittest.TestCase):
|
|||||||
def test_youtube_playlist_matching(self):
|
def test_youtube_playlist_matching(self):
|
||||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||||
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') # 585
|
||||||
assertPlaylist('PL63F0C78739B09958')
|
assertPlaylist('PL63F0C78739B09958')
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') # 668
|
||||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||||
# Top tracks
|
# Top tracks
|
||||||
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||||
|
|
||||||
def test_youtube_matching(self):
|
def test_youtube_matching(self):
|
||||||
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) # 668
|
||||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||||
|
@ -34,18 +34,23 @@ from youtube_dl.extractor import get_info_extractor
|
|||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
self.processed_info_dicts = []
|
self.processed_info_dicts = []
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def report_warning(self, message):
|
def report_warning(self, message):
|
||||||
# Don't accept warnings during tests
|
# Don't accept warnings during tests
|
||||||
raise ExtractorError(message)
|
raise ExtractorError(message)
|
||||||
|
|
||||||
def process_info(self, info_dict):
|
def process_info(self, info_dict):
|
||||||
self.processed_info_dicts.append(info_dict)
|
self.processed_info_dicts.append(info_dict)
|
||||||
return super(YoutubeDL, self).process_info(info_dict)
|
return super(YoutubeDL, self).process_info(info_dict)
|
||||||
|
|
||||||
|
|
||||||
def _file_md5(fn):
|
def _file_md5(fn):
|
||||||
with open(fn, 'rb') as f:
|
with open(fn, 'rb') as f:
|
||||||
return hashlib.md5(f.read()).hexdigest()
|
return hashlib.md5(f.read()).hexdigest()
|
||||||
@ -55,15 +60,19 @@ defs = get_testcases()
|
|||||||
|
|
||||||
class TestDownload(unittest.TestCase):
|
class TestDownload(unittest.TestCase):
|
||||||
maxDiff = None
|
maxDiff = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.defs = defs
|
self.defs = defs
|
||||||
|
|
||||||
### Dynamically generate tests
|
# Dynamically generate tests
|
||||||
|
|
||||||
|
|
||||||
def generator(test_case):
|
def generator(test_case):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
||||||
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
other_ies = [get_info_extractor(ie_key) for ie_key in test_case.get('add_ie', [])]
|
||||||
|
|
||||||
def print_skipping(reason):
|
def print_skipping(reason):
|
||||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
@ -73,7 +82,7 @@ def generator(test_case):
|
|||||||
info_dict = test_case.get('info_dict', {})
|
info_dict = test_case.get('info_dict', {})
|
||||||
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
||||||
print_skipping('The output file cannot be know, the "file" '
|
print_skipping('The output file cannot be know, the "file" '
|
||||||
'key is missing or the info_dict is incomplete')
|
'key is missing or the info_dict is incomplete')
|
||||||
return
|
return
|
||||||
if 'skip' in test_case:
|
if 'skip' in test_case:
|
||||||
print_skipping(test_case['skip'])
|
print_skipping(test_case['skip'])
|
||||||
@ -88,6 +97,7 @@ def generator(test_case):
|
|||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
finished_hook_called = set()
|
finished_hook_called = set()
|
||||||
|
|
||||||
def _hook(status):
|
def _hook(status):
|
||||||
if status['status'] == 'finished':
|
if status['status'] == 'finished':
|
||||||
finished_hook_called.add(status['filename'])
|
finished_hook_called.add(status['filename'])
|
||||||
@ -97,6 +107,7 @@ def generator(test_case):
|
|||||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||||
|
|
||||||
test_cases = test_case.get('playlist', [test_case])
|
test_cases = test_case.get('playlist', [test_case])
|
||||||
|
|
||||||
def try_rm_tcs_files():
|
def try_rm_tcs_files():
|
||||||
for tc in test_cases:
|
for tc in test_cases:
|
||||||
tc_filename = get_tc_filename(tc)
|
tc_filename = get_tc_filename(tc)
|
||||||
@ -142,12 +153,12 @@ def generator(test_case):
|
|||||||
else:
|
else:
|
||||||
got = info_dict.get(info_field)
|
got = info_dict.get(info_field)
|
||||||
self.assertEqual(expected, got,
|
self.assertEqual(expected, got,
|
||||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||||
|
|
||||||
# If checkable fields are missing from the test case, print the info_dict
|
# If checkable fields are missing from the test case, print the info_dict
|
||||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||||
for key, value in info_dict.items()
|
for key, value in info_dict.items()
|
||||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||||
|
|
||||||
@ -162,13 +173,13 @@ def generator(test_case):
|
|||||||
|
|
||||||
return test_template
|
return test_template
|
||||||
|
|
||||||
### And add them to TestDownload
|
# And add them to TestDownload
|
||||||
for n, test_case in enumerate(defs):
|
for n, test_case in enumerate(defs):
|
||||||
test_method = generator(test_case)
|
test_method = generator(test_case)
|
||||||
tname = 'test_' + str(test_case['name'])
|
tname = 'test_' + str(test_case['name'])
|
||||||
i = 1
|
i = 1
|
||||||
while hasattr(TestDownload, tname):
|
while hasattr(TestDownload, tname):
|
||||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
||||||
i += 1
|
i += 1
|
||||||
test_method.__name__ = tname
|
test_method.__name__ = tname
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
|
@ -11,12 +11,14 @@ try:
|
|||||||
except AttributeError:
|
except AttributeError:
|
||||||
_DEV_NULL = open(os.devnull, 'wb')
|
_DEV_NULL = open(os.devnull, 'wb')
|
||||||
|
|
||||||
|
|
||||||
class TestExecution(unittest.TestCase):
|
class TestExecution(unittest.TestCase):
|
||||||
|
|
||||||
def test_import(self):
|
def test_import(self):
|
||||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||||
|
|
||||||
def test_module_exec(self):
|
def test_module_exec(self):
|
||||||
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
|
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
|
||||||
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||||
|
|
||||||
def test_main_exec(self):
|
def test_main_exec(self):
|
||||||
|
@ -40,6 +40,7 @@ from youtube_dl.extractor import (
|
|||||||
|
|
||||||
|
|
||||||
class TestPlaylists(unittest.TestCase):
|
class TestPlaylists(unittest.TestCase):
|
||||||
|
|
||||||
def assertIsPlaylist(self, info):
|
def assertIsPlaylist(self, info):
|
||||||
"""Make sure the info has '_type' set to 'playlist'"""
|
"""Make sure the info has '_type' set to 'playlist'"""
|
||||||
self.assertEqual(info['_type'], 'playlist')
|
self.assertEqual(info['_type'], 'playlist')
|
||||||
@ -148,7 +149,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], 'Nightmare Night EP')
|
self.assertEqual(result['title'], 'Nightmare Night EP')
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
self.assertTrue(len(result['entries']) >= 4)
|
||||||
|
|
||||||
def test_smotri_community(self):
|
def test_smotri_community(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = SmotriCommunityIE(dl)
|
ie = SmotriCommunityIE(dl)
|
||||||
@ -157,7 +158,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertEqual(result['id'], 'kommuna')
|
self.assertEqual(result['id'], 'kommuna')
|
||||||
self.assertEqual(result['title'], 'КПРФ')
|
self.assertEqual(result['title'], 'КПРФ')
|
||||||
self.assertTrue(len(result['entries']) >= 4)
|
self.assertTrue(len(result['entries']) >= 4)
|
||||||
|
|
||||||
def test_smotri_user(self):
|
def test_smotri_user(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = SmotriUserIE(dl)
|
ie = SmotriUserIE(dl)
|
||||||
@ -176,7 +177,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertEqual(result['title'], 'Building Dynamic Websites')
|
self.assertEqual(result['title'], 'Building Dynamic Websites')
|
||||||
self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
||||||
self.assertEqual(len(result['entries']), 10)
|
self.assertEqual(len(result['entries']), 10)
|
||||||
|
|
||||||
def test_ivi_compilation(self):
|
def test_ivi_compilation(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = IviCompilationIE(dl)
|
ie = IviCompilationIE(dl)
|
||||||
@ -185,7 +186,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertEqual(result['id'], 'dezhurnyi_angel')
|
self.assertEqual(result['id'], 'dezhurnyi_angel')
|
||||||
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
|
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
|
||||||
self.assertTrue(len(result['entries']) >= 36)
|
self.assertTrue(len(result['entries']) >= 36)
|
||||||
|
|
||||||
def test_ivi_compilation_season(self):
|
def test_ivi_compilation_season(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = IviCompilationIE(dl)
|
ie = IviCompilationIE(dl)
|
||||||
@ -194,7 +195,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
|
self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
|
||||||
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
|
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
|
||||||
self.assertTrue(len(result['entries']) >= 20)
|
self.assertTrue(len(result['entries']) >= 20)
|
||||||
|
|
||||||
def test_imdb_list(self):
|
def test_imdb_list(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = ImdbListIE(dl)
|
ie = ImdbListIE(dl)
|
||||||
@ -222,7 +223,7 @@ class TestPlaylists(unittest.TestCase):
|
|||||||
self.assertEqual(result['id'], 'm7m0jJAbMQi')
|
self.assertEqual(result['id'], 'm7m0jJAbMQi')
|
||||||
self.assertEqual(result['title'], 'Driving')
|
self.assertEqual(result['title'], 'Driving')
|
||||||
self.assertEqual(len(result['entries']), 24)
|
self.assertEqual(len(result['entries']), 24)
|
||||||
|
|
||||||
def test_rutube_channel(self):
|
def test_rutube_channel(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = RutubeChannelIE(dl)
|
ie = RutubeChannelIE(dl)
|
||||||
|
@ -21,6 +21,7 @@ from youtube_dl.extractor import (
|
|||||||
class BaseTestSubtitles(unittest.TestCase):
|
class BaseTestSubtitles(unittest.TestCase):
|
||||||
url = None
|
url = None
|
||||||
IE = None
|
IE = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.DL = FakeYDL()
|
self.DL = FakeYDL()
|
||||||
self.ie = self.IE(self.DL)
|
self.ie = self.IE(self.DL)
|
||||||
|
@ -13,6 +13,7 @@ IGNORED_FILES = [
|
|||||||
|
|
||||||
|
|
||||||
class TestUnicodeLiterals(unittest.TestCase):
|
class TestUnicodeLiterals(unittest.TestCase):
|
||||||
|
|
||||||
def test_all_files(self):
|
def test_all_files(self):
|
||||||
print('Skipping this test (not yet fully implemented)')
|
print('Skipping this test (not yet fully implemented)')
|
||||||
return
|
return
|
||||||
|
@ -41,6 +41,7 @@ else:
|
|||||||
|
|
||||||
|
|
||||||
class TestUtil(unittest.TestCase):
|
class TestUtil(unittest.TestCase):
|
||||||
|
|
||||||
def test_timeconvert(self):
|
def test_timeconvert(self):
|
||||||
self.assertTrue(timeconvert('') is None)
|
self.assertTrue(timeconvert('') is None)
|
||||||
self.assertTrue(timeconvert('bougrg') is None)
|
self.assertTrue(timeconvert('bougrg') is None)
|
||||||
@ -109,14 +110,14 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||||
self.assertEqual(orderedSet([]), [])
|
self.assertEqual(orderedSet([]), [])
|
||||||
self.assertEqual(orderedSet([1]), [1])
|
self.assertEqual(orderedSet([1]), [1])
|
||||||
#keep the list ordered
|
# keep the list ordered
|
||||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||||
|
|
||||||
def test_unescape_html(self):
|
def test_unescape_html(self):
|
||||||
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
|
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange("19000101","20000101")
|
_20century = DateRange("19000101", "20000101")
|
||||||
self.assertFalse("17890714" in _20century)
|
self.assertFalse("17890714" in _20century)
|
||||||
_ac = DateRange("00010101")
|
_ac = DateRange("00010101")
|
||||||
self.assertTrue("19690721" in _ac)
|
self.assertTrue("19690721" in _ac)
|
||||||
|
@ -19,6 +19,7 @@ import youtube_dl.extractor
|
|||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
@ -31,19 +32,19 @@ params = get_params({
|
|||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
TEST_ID = 'gr51aVj-mLg'
|
TEST_ID = 'gr51aVj-mLg'
|
||||||
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
|
||||||
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
|
||||||
|
|
||||||
|
|
||||||
class TestAnnotations(unittest.TestCase):
|
class TestAnnotations(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# Clear old files
|
# Clear old files
|
||||||
self.tearDown()
|
self.tearDown()
|
||||||
|
|
||||||
|
|
||||||
def test_info_json(self):
|
def test_info_json(self):
|
||||||
expected = list(EXPECTED_ANNOTATIONS) #Two annotations could have the same text.
|
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
|
||||||
ie = youtube_dl.extractor.YoutubeIE()
|
ie = youtube_dl.extractor.YoutubeIE()
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_info_extractor(ie)
|
ydl.add_info_extractor(ie)
|
||||||
@ -59,18 +60,17 @@ class TestAnnotations(unittest.TestCase):
|
|||||||
self.assertEqual(annotationsTag.tag, 'annotations')
|
self.assertEqual(annotationsTag.tag, 'annotations')
|
||||||
annotations = annotationsTag.findall('annotation')
|
annotations = annotationsTag.findall('annotation')
|
||||||
|
|
||||||
#Not all the annotations have TEXT children and the annotations are returned unsorted.
|
# Not all the annotations have TEXT children and the annotations are returned unsorted.
|
||||||
for a in annotations:
|
for a in annotations:
|
||||||
self.assertEqual(a.tag, 'annotation')
|
self.assertEqual(a.tag, 'annotation')
|
||||||
if a.get('type') == 'text':
|
if a.get('type') == 'text':
|
||||||
textTag = a.find('TEXT')
|
textTag = a.find('TEXT')
|
||||||
text = textTag.text
|
text = textTag.text
|
||||||
self.assertTrue(text in expected) #assertIn only added in python 2.7
|
self.assertTrue(text in expected) # assertIn only added in python 2.7
|
||||||
#remove the first occurance, there could be more than one annotation with the same text
|
# remove the first occurance, there could be more than one annotation with the same text
|
||||||
expected.remove(text)
|
expected.remove(text)
|
||||||
#We should have seen (and removed) all the expected annotation texts.
|
# We should have seen (and removed) all the expected annotation texts.
|
||||||
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
|
||||||
|
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try_rm(ANNOTATIONS_FILE)
|
try_rm(ANNOTATIONS_FILE)
|
||||||
|
@ -18,6 +18,7 @@ import youtube_dl.extractor
|
|||||||
|
|
||||||
|
|
||||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||||
self.to_stderr = self.to_screen
|
self.to_stderr = self.to_screen
|
||||||
@ -41,6 +42,7 @@ For more information, contact phihag@phihag.de .'''
|
|||||||
|
|
||||||
|
|
||||||
class TestInfoJSON(unittest.TestCase):
|
class TestInfoJSON(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
# Clear old files
|
# Clear old files
|
||||||
self.tearDown()
|
self.tearDown()
|
||||||
|
@ -20,6 +20,7 @@ from youtube_dl.extractor import (
|
|||||||
|
|
||||||
|
|
||||||
class TestYoutubeLists(unittest.TestCase):
|
class TestYoutubeLists(unittest.TestCase):
|
||||||
|
|
||||||
def assertIsPlaylist(self, info):
|
def assertIsPlaylist(self, info):
|
||||||
"""Make sure the info has '_type' set to 'playlist'"""
|
"""Make sure the info has '_type' set to 'playlist'"""
|
||||||
self.assertEqual(info['_type'], 'playlist')
|
self.assertEqual(info['_type'], 'playlist')
|
||||||
@ -31,7 +32,7 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
self.assertIsPlaylist(result)
|
self.assertIsPlaylist(result)
|
||||||
self.assertEqual(result['title'], 'ytdl test PL')
|
self.assertEqual(result['title'], 'ytdl test PL')
|
||||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
self.assertEqual(ytie_results, ['bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||||
|
|
||||||
def test_youtube_playlist_noplaylist(self):
|
def test_youtube_playlist_noplaylist(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
@ -55,14 +56,14 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
self.assertTrue(len(result['entries']) >= 799)
|
self.assertTrue(len(result['entries']) >= 799)
|
||||||
|
|
||||||
def test_youtube_playlist_with_deleted(self):
|
def test_youtube_playlist_with_deleted(self):
|
||||||
#651
|
# 651
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
||||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
||||||
|
|
||||||
def test_youtube_playlist_empty(self):
|
def test_youtube_playlist_empty(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubePlaylistIE(dl)
|
ie = YoutubePlaylistIE(dl)
|
||||||
@ -83,10 +84,10 @@ class TestYoutubeLists(unittest.TestCase):
|
|||||||
def test_youtube_channel(self):
|
def test_youtube_channel(self):
|
||||||
dl = FakeYDL()
|
dl = FakeYDL()
|
||||||
ie = YoutubeChannelIE(dl)
|
ie = YoutubeChannelIE(dl)
|
||||||
#test paginated channel
|
# test paginated channel
|
||||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
||||||
self.assertTrue(len(result['entries']) > 90)
|
self.assertTrue(len(result['entries']) > 90)
|
||||||
#test autogenerated channel
|
# test autogenerated channel
|
||||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||||
self.assertTrue(len(result['entries']) >= 18)
|
self.assertTrue(len(result['entries']) >= 18)
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ _TESTS = [
|
|||||||
|
|
||||||
|
|
||||||
class TestSignature(unittest.TestCase):
|
class TestSignature(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
|
||||||
|
38
youtube-dl
38
youtube-dl
@ -1,21 +1,27 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import sys, os
|
import sys
|
||||||
import json, hashlib
|
import os
|
||||||
|
import json
|
||||||
|
import hashlib
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import urllib.request as compat_urllib_request
|
import urllib.request as compat_urllib_request
|
||||||
except ImportError: # Python 2
|
except ImportError: # Python 2
|
||||||
import urllib2 as compat_urllib_request
|
import urllib2 as compat_urllib_request
|
||||||
|
|
||||||
|
|
||||||
def rsa_verify(message, signature, key):
|
def rsa_verify(message, signature, key):
|
||||||
from struct import pack
|
from struct import pack
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from sys import version_info
|
from sys import version_info
|
||||||
|
|
||||||
def b(x):
|
def b(x):
|
||||||
if version_info[0] == 2: return x
|
if version_info[0] == 2:
|
||||||
else: return x.encode('latin1')
|
return x
|
||||||
assert(type(message) == type(b('')))
|
else:
|
||||||
|
return x.encode('latin1')
|
||||||
|
assert(isinstance(message, type(b(''))))
|
||||||
block_size = 0
|
block_size = 0
|
||||||
n = key[0]
|
n = key[0]
|
||||||
while n:
|
while n:
|
||||||
@ -27,13 +33,17 @@ def rsa_verify(message, signature, key):
|
|||||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||||
signature >>= 8
|
signature >>= 8
|
||||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||||
if signature[0:2] != b('\x00\x01'): return False
|
if signature[0:2] != b('\x00\x01'):
|
||||||
|
return False
|
||||||
signature = signature[2:]
|
signature = signature[2:]
|
||||||
if not b('\x00') in signature: return False
|
if not b('\x00') in signature:
|
||||||
signature = signature[signature.index(b('\x00'))+1:]
|
return False
|
||||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
signature = signature[signature.index(b('\x00')) + 1:]
|
||||||
|
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')):
|
||||||
|
return False
|
||||||
signature = signature[19:]
|
signature = signature[19:]
|
||||||
if signature != sha256(message).digest(): return False
|
if signature != sha256(message).digest():
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||||
@ -41,9 +51,9 @@ sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorr
|
|||||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
|
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
raw_input()
|
raw_input()
|
||||||
except NameError: # Python 3
|
except NameError: # Python 3
|
||||||
input()
|
input()
|
||||||
|
|
||||||
filename = sys.argv[0]
|
filename = sys.argv[0]
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ from .downloader import get_suitable_downloader
|
|||||||
|
|
||||||
# This class reproduces the old behaviour of FileDownloader
|
# This class reproduces the old behaviour of FileDownloader
|
||||||
class FileDownloader(RealFileDownloader):
|
class FileDownloader(RealFileDownloader):
|
||||||
|
|
||||||
def _do_download(self, filename, info_dict):
|
def _do_download(self, filename, info_dict):
|
||||||
real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
|
real_fd = get_suitable_downloader(info_dict)(self.ydl, self.params)
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
|
@ -61,6 +61,7 @@ from .version import __version__
|
|||||||
|
|
||||||
|
|
||||||
class YoutubeDL(object):
|
class YoutubeDL(object):
|
||||||
|
|
||||||
"""YoutubeDL class.
|
"""YoutubeDL class.
|
||||||
|
|
||||||
YoutubeDL objects are the ones responsible of downloading the
|
YoutubeDL objects are the ones responsible of downloading the
|
||||||
@ -268,12 +269,12 @@ class YoutubeDL(object):
|
|||||||
return message
|
return message
|
||||||
|
|
||||||
assert hasattr(self, '_output_process')
|
assert hasattr(self, '_output_process')
|
||||||
assert type(message) == type('')
|
assert isinstance(message, type(''))
|
||||||
line_count = message.count('\n') + 1
|
line_count = message.count('\n') + 1
|
||||||
self._output_process.stdin.write((message + '\n').encode('utf-8'))
|
self._output_process.stdin.write((message + '\n').encode('utf-8'))
|
||||||
self._output_process.stdin.flush()
|
self._output_process.stdin.flush()
|
||||||
res = ''.join(self._output_channel.readline().decode('utf-8')
|
res = ''.join(self._output_channel.readline().decode('utf-8')
|
||||||
for _ in range(line_count))
|
for _ in range(line_count))
|
||||||
return res[:-len('\n')]
|
return res[:-len('\n')]
|
||||||
|
|
||||||
def to_screen(self, message, skip_eol=False):
|
def to_screen(self, message, skip_eol=False):
|
||||||
@ -293,7 +294,7 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
def to_stderr(self, message):
|
def to_stderr(self, message):
|
||||||
"""Print message to stderr."""
|
"""Print message to stderr."""
|
||||||
assert type(message) == type('')
|
assert isinstance(message, type(''))
|
||||||
if self.params.get('logger'):
|
if self.params.get('logger'):
|
||||||
self.params['logger'].error(message)
|
self.params['logger'].error(message)
|
||||||
else:
|
else:
|
||||||
@ -491,7 +492,7 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
ie_result = ie.extract(url)
|
ie_result = ie.extract(url)
|
||||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||||
break
|
break
|
||||||
if isinstance(ie_result, list):
|
if isinstance(ie_result, list):
|
||||||
# Backwards compatibility: old IE result format
|
# Backwards compatibility: old IE result format
|
||||||
@ -500,17 +501,17 @@ class YoutubeDL(object):
|
|||||||
'entries': ie_result,
|
'entries': ie_result,
|
||||||
}
|
}
|
||||||
self.add_extra_info(ie_result,
|
self.add_extra_info(ie_result,
|
||||||
{
|
{
|
||||||
'extractor': ie.IE_NAME,
|
'extractor': ie.IE_NAME,
|
||||||
'webpage_url': url,
|
'webpage_url': url,
|
||||||
'webpage_url_basename': url_basename(url),
|
'webpage_url_basename': url_basename(url),
|
||||||
'extractor_key': ie.ie_key(),
|
'extractor_key': ie.ie_key(),
|
||||||
})
|
})
|
||||||
if process:
|
if process:
|
||||||
return self.process_ie_result(ie_result, download, extra_info)
|
return self.process_ie_result(ie_result, download, extra_info)
|
||||||
else:
|
else:
|
||||||
return ie_result
|
return ie_result
|
||||||
except ExtractorError as de: # An error we somewhat expected
|
except ExtractorError as de: # An error we somewhat expected
|
||||||
self.report_error(compat_str(de), de.format_traceback())
|
self.report_error(compat_str(de), de.format_traceback())
|
||||||
break
|
break
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
@ -533,7 +534,7 @@ class YoutubeDL(object):
|
|||||||
Returns the resolved ie_result.
|
Returns the resolved ie_result.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
|
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
|
||||||
if result_type == 'video':
|
if result_type == 'video':
|
||||||
self.add_extra_info(ie_result, extra_info)
|
self.add_extra_info(ie_result, extra_info)
|
||||||
return self.process_video_result(ie_result, download=download)
|
return self.process_video_result(ie_result, download=download)
|
||||||
@ -624,12 +625,12 @@ class YoutubeDL(object):
|
|||||||
elif result_type == 'compat_list':
|
elif result_type == 'compat_list':
|
||||||
def _fixup(r):
|
def _fixup(r):
|
||||||
self.add_extra_info(r,
|
self.add_extra_info(r,
|
||||||
{
|
{
|
||||||
'extractor': ie_result['extractor'],
|
'extractor': ie_result['extractor'],
|
||||||
'webpage_url': ie_result['webpage_url'],
|
'webpage_url': ie_result['webpage_url'],
|
||||||
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||||
'extractor_key': ie_result['extractor_key'],
|
'extractor_key': ie_result['extractor_key'],
|
||||||
})
|
})
|
||||||
return r
|
return r
|
||||||
ie_result['entries'] = [
|
ie_result['entries'] = [
|
||||||
self.process_ie_result(_fixup(r), download, extra_info)
|
self.process_ie_result(_fixup(r), download, extra_info)
|
||||||
@ -736,7 +737,7 @@ class YoutubeDL(object):
|
|||||||
# Two formats have been requested like '137+139'
|
# Two formats have been requested like '137+139'
|
||||||
format_1, format_2 = rf.split('+')
|
format_1, format_2 = rf.split('+')
|
||||||
formats_info = (self.select_format(format_1, formats),
|
formats_info = (self.select_format(format_1, formats),
|
||||||
self.select_format(format_2, formats))
|
self.select_format(format_2, formats))
|
||||||
if all(formats_info):
|
if all(formats_info):
|
||||||
selected_format = {
|
selected_format = {
|
||||||
'requested_formats': formats_info,
|
'requested_formats': formats_info,
|
||||||
@ -912,10 +913,10 @@ class YoutubeDL(object):
|
|||||||
with open(thumb_filename, 'wb') as thumbf:
|
with open(thumb_filename, 'wb') as thumbf:
|
||||||
shutil.copyfileobj(uf, thumbf)
|
shutil.copyfileobj(uf, thumbf)
|
||||||
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
|
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
|
||||||
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
self.report_warning('Unable to download thumbnail "%s": %s' %
|
self.report_warning('Unable to download thumbnail "%s": %s' %
|
||||||
(info_dict['thumbnail'], compat_str(err)))
|
(info_dict['thumbnail'], compat_str(err)))
|
||||||
|
|
||||||
if not self.params.get('skip_download', False):
|
if not self.params.get('skip_download', False):
|
||||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
|
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
|
||||||
@ -934,8 +935,8 @@ class YoutubeDL(object):
|
|||||||
if not merger._get_executable():
|
if not merger._get_executable():
|
||||||
postprocessors = []
|
postprocessors = []
|
||||||
self.report_warning('You have requested multiple '
|
self.report_warning('You have requested multiple '
|
||||||
'formats but ffmpeg or avconv are not installed.'
|
'formats but ffmpeg or avconv are not installed.'
|
||||||
' The formats won\'t be merged')
|
' The formats won\'t be merged')
|
||||||
else:
|
else:
|
||||||
postprocessors = [merger]
|
postprocessors = [merger]
|
||||||
for f in info_dict['requested_formats']:
|
for f in info_dict['requested_formats']:
|
||||||
@ -978,7 +979,7 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
for url in url_list:
|
for url in url_list:
|
||||||
try:
|
try:
|
||||||
#It also downloads the videos
|
# It also downloads the videos
|
||||||
self.extract_info(url)
|
self.extract_info(url)
|
||||||
except UnavailableVideoError:
|
except UnavailableVideoError:
|
||||||
self.report_error('unable to download video')
|
self.report_error('unable to download video')
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
__authors__ = (
|
__authors__ = (
|
||||||
'Ricardo Garcia Gonzalez',
|
'Ricardo Garcia Gonzalez',
|
||||||
'Danny Colligan',
|
'Danny Colligan',
|
||||||
'Benjamin Johnson',
|
'Benjamin Johnson',
|
||||||
@ -154,7 +154,8 @@ def parseOpts(overrideArguments=None):
|
|||||||
if len(opts) > 1:
|
if len(opts) > 1:
|
||||||
opts.insert(1, ', ')
|
opts.insert(1, ', ')
|
||||||
|
|
||||||
if option.takes_value(): opts.append(' %s' % option.metavar)
|
if option.takes_value():
|
||||||
|
opts.append(' %s' % option.metavar)
|
||||||
|
|
||||||
return "".join(opts)
|
return "".join(opts)
|
||||||
|
|
||||||
@ -166,7 +167,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||||
try:
|
try:
|
||||||
i = opts.index(private_opt)
|
i = opts.index(private_opt)
|
||||||
opts[i+1] = '<PRIVATE>'
|
opts[i + 1] = '<PRIVATE>'
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
return opts
|
return opts
|
||||||
@ -176,56 +177,57 @@ def parseOpts(overrideArguments=None):
|
|||||||
|
|
||||||
# No need to wrap help messages if we're on a wide console
|
# No need to wrap help messages if we're on a wide console
|
||||||
columns = get_term_width()
|
columns = get_term_width()
|
||||||
if columns: max_width = columns
|
if columns:
|
||||||
|
max_width = columns
|
||||||
|
|
||||||
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
||||||
fmt.format_option_strings = _format_option_string
|
fmt.format_option_strings = _format_option_string
|
||||||
|
|
||||||
kw = {
|
kw = {
|
||||||
'version' : __version__,
|
'version': __version__,
|
||||||
'formatter' : fmt,
|
'formatter': fmt,
|
||||||
'usage' : '%prog [options] url [url...]',
|
'usage': '%prog [options] url [url...]',
|
||||||
'conflict_handler' : 'resolve',
|
'conflict_handler': 'resolve',
|
||||||
}
|
}
|
||||||
|
|
||||||
parser = optparse.OptionParser(**kw)
|
parser = optparse.OptionParser(**kw)
|
||||||
|
|
||||||
# option groups
|
# option groups
|
||||||
general = optparse.OptionGroup(parser, 'General Options')
|
general = optparse.OptionGroup(parser, 'General Options')
|
||||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
||||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||||
|
|
||||||
general.add_option('-h', '--help',
|
general.add_option('-h', '--help',
|
||||||
action='help', help='print this help text and exit')
|
action='help', help='print this help text and exit')
|
||||||
general.add_option('-v', '--version',
|
general.add_option('-v', '--version',
|
||||||
action='version', help='print program version and exit')
|
action='version', help='print program version and exit')
|
||||||
general.add_option('-U', '--update',
|
general.add_option('-U', '--update',
|
||||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||||
general.add_option('-i', '--ignore-errors',
|
general.add_option('-i', '--ignore-errors',
|
||||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||||
general.add_option('--abort-on-error',
|
general.add_option('--abort-on-error',
|
||||||
action='store_false', dest='ignoreerrors',
|
action='store_false', dest='ignoreerrors',
|
||||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||||
general.add_option('--dump-user-agent',
|
general.add_option('--dump-user-agent',
|
||||||
action='store_true', dest='dump_user_agent',
|
action='store_true', dest='dump_user_agent',
|
||||||
help='display the current browser identification', default=False)
|
help='display the current browser identification', default=False)
|
||||||
general.add_option('--user-agent',
|
general.add_option('--user-agent',
|
||||||
dest='user_agent', help='specify a custom user agent', metavar='UA')
|
dest='user_agent', help='specify a custom user agent', metavar='UA')
|
||||||
general.add_option('--referer',
|
general.add_option('--referer',
|
||||||
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
||||||
metavar='REF', default=None)
|
metavar='REF', default=None)
|
||||||
general.add_option('--list-extractors',
|
general.add_option('--list-extractors',
|
||||||
action='store_true', dest='list_extractors',
|
action='store_true', dest='list_extractors',
|
||||||
help='List all supported extractors and the URLs they would handle', default=False)
|
help='List all supported extractors and the URLs they would handle', default=False)
|
||||||
general.add_option('--extractor-descriptions',
|
general.add_option('--extractor-descriptions',
|
||||||
action='store_true', dest='list_extractor_descriptions',
|
action='store_true', dest='list_extractor_descriptions',
|
||||||
help='Output descriptions of all supported extractors', default=False)
|
help='Output descriptions of all supported extractors', default=False)
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
'--proxy', dest='proxy', default=None, metavar='URL',
|
||||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||||
@ -243,14 +245,13 @@ def parseOpts(overrideArguments=None):
|
|||||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||||
general.add_option('--default-search',
|
general.add_option('--default-search',
|
||||||
dest='default_search', metavar='PREFIX',
|
dest='default_search', metavar='PREFIX',
|
||||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--ignore-config',
|
'--ignore-config',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||||
|
|
||||||
|
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--playlist-start',
|
'--playlist-start',
|
||||||
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
||||||
@ -259,8 +260,8 @@ def parseOpts(overrideArguments=None):
|
|||||||
'--playlist-end',
|
'--playlist-end',
|
||||||
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
||||||
help='playlist video to end at (default is last)')
|
help='playlist video to end at (default is last)')
|
||||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX', help='download only matching titles (regex or caseless sub-string)')
|
||||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX', help='skip download for matching titles (regex or caseless sub-string)')
|
||||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
selection.add_option('--max-downloads', metavar='NUMBER',
|
||||||
dest='max_downloads', type=int, default=None,
|
dest='max_downloads', type=int, default=None,
|
||||||
help='Abort after downloading NUMBER files')
|
help='Abort after downloading NUMBER files')
|
||||||
@ -298,196 +299,192 @@ def parseOpts(overrideArguments=None):
|
|||||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
||||||
|
|
||||||
authentication.add_option('-u', '--username',
|
authentication.add_option('-u', '--username',
|
||||||
dest='username', metavar='USERNAME', help='account username')
|
dest='username', metavar='USERNAME', help='account username')
|
||||||
authentication.add_option('-p', '--password',
|
authentication.add_option('-p', '--password',
|
||||||
dest='password', metavar='PASSWORD', help='account password')
|
dest='password', metavar='PASSWORD', help='account password')
|
||||||
authentication.add_option('-n', '--netrc',
|
authentication.add_option('-n', '--netrc',
|
||||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||||
authentication.add_option('--video-password',
|
authentication.add_option('--video-password',
|
||||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
||||||
|
|
||||||
|
|
||||||
video_format.add_option('-f', '--format',
|
video_format.add_option('-f', '--format',
|
||||||
action='store', dest='format', metavar='FORMAT', default=None,
|
action='store', dest='format', metavar='FORMAT', default=None,
|
||||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||||
video_format.add_option('--all-formats',
|
video_format.add_option('--all-formats',
|
||||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||||
video_format.add_option('--prefer-free-formats',
|
video_format.add_option('--prefer-free-formats',
|
||||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
||||||
video_format.add_option('--max-quality',
|
video_format.add_option('--max-quality',
|
||||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||||
video_format.add_option('-F', '--list-formats',
|
video_format.add_option('-F', '--list-formats',
|
||||||
action='store_true', dest='listformats', help='list all available formats')
|
action='store_true', dest='listformats', help='list all available formats')
|
||||||
|
|
||||||
subtitles.add_option('--write-sub', '--write-srt',
|
subtitles.add_option('--write-sub', '--write-srt',
|
||||||
action='store_true', dest='writesubtitles',
|
action='store_true', dest='writesubtitles',
|
||||||
help='write subtitle file', default=False)
|
help='write subtitle file', default=False)
|
||||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
||||||
action='store_true', dest='writeautomaticsub',
|
action='store_true', dest='writeautomaticsub',
|
||||||
help='write automatic subtitle file (youtube only)', default=False)
|
help='write automatic subtitle file (youtube only)', default=False)
|
||||||
subtitles.add_option('--all-subs',
|
subtitles.add_option('--all-subs',
|
||||||
action='store_true', dest='allsubtitles',
|
action='store_true', dest='allsubtitles',
|
||||||
help='downloads all the available subtitles of the video', default=False)
|
help='downloads all the available subtitles of the video', default=False)
|
||||||
subtitles.add_option('--list-subs',
|
subtitles.add_option('--list-subs',
|
||||||
action='store_true', dest='listsubtitles',
|
action='store_true', dest='listsubtitles',
|
||||||
help='lists all available subtitles for the video', default=False)
|
help='lists all available subtitles for the video', default=False)
|
||||||
subtitles.add_option('--sub-format',
|
subtitles.add_option('--sub-format',
|
||||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
action='store', dest='subtitlesformat', metavar='FORMAT',
|
||||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
||||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
||||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||||
default=[], callback=_comma_separated_values_options_callback,
|
default=[], callback=_comma_separated_values_options_callback,
|
||||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||||
|
|
||||||
downloader.add_option('-r', '--rate-limit',
|
downloader.add_option('-r', '--rate-limit',
|
||||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||||
downloader.add_option('-R', '--retries',
|
downloader.add_option('-R', '--retries',
|
||||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||||
downloader.add_option('--buffer-size',
|
downloader.add_option('--buffer-size',
|
||||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
||||||
downloader.add_option('--no-resize-buffer',
|
downloader.add_option('--no-resize-buffer',
|
||||||
action='store_true', dest='noresizebuffer',
|
action='store_true', dest='noresizebuffer',
|
||||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
||||||
|
|
||||||
verbosity.add_option('-q', '--quiet',
|
verbosity.add_option('-q', '--quiet',
|
||||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||||
verbosity.add_option('-s', '--simulate',
|
verbosity.add_option('-s', '--simulate',
|
||||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
||||||
verbosity.add_option('--skip-download',
|
verbosity.add_option('--skip-download',
|
||||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
||||||
verbosity.add_option('-g', '--get-url',
|
verbosity.add_option('-g', '--get-url',
|
||||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
||||||
verbosity.add_option('-e', '--get-title',
|
verbosity.add_option('-e', '--get-title',
|
||||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
||||||
verbosity.add_option('--get-id',
|
verbosity.add_option('--get-id',
|
||||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
||||||
verbosity.add_option('--get-thumbnail',
|
verbosity.add_option('--get-thumbnail',
|
||||||
action='store_true', dest='getthumbnail',
|
action='store_true', dest='getthumbnail',
|
||||||
help='simulate, quiet but print thumbnail URL', default=False)
|
help='simulate, quiet but print thumbnail URL', default=False)
|
||||||
verbosity.add_option('--get-description',
|
verbosity.add_option('--get-description',
|
||||||
action='store_true', dest='getdescription',
|
action='store_true', dest='getdescription',
|
||||||
help='simulate, quiet but print video description', default=False)
|
help='simulate, quiet but print video description', default=False)
|
||||||
verbosity.add_option('--get-duration',
|
verbosity.add_option('--get-duration',
|
||||||
action='store_true', dest='getduration',
|
action='store_true', dest='getduration',
|
||||||
help='simulate, quiet but print video length', default=False)
|
help='simulate, quiet but print video length', default=False)
|
||||||
verbosity.add_option('--get-filename',
|
verbosity.add_option('--get-filename',
|
||||||
action='store_true', dest='getfilename',
|
action='store_true', dest='getfilename',
|
||||||
help='simulate, quiet but print output filename', default=False)
|
help='simulate, quiet but print output filename', default=False)
|
||||||
verbosity.add_option('--get-format',
|
verbosity.add_option('--get-format',
|
||||||
action='store_true', dest='getformat',
|
action='store_true', dest='getformat',
|
||||||
help='simulate, quiet but print output format', default=False)
|
help='simulate, quiet but print output format', default=False)
|
||||||
verbosity.add_option('-j', '--dump-json',
|
verbosity.add_option('-j', '--dump-json',
|
||||||
action='store_true', dest='dumpjson',
|
action='store_true', dest='dumpjson',
|
||||||
help='simulate, quiet but print JSON information', default=False)
|
help='simulate, quiet but print JSON information', default=False)
|
||||||
verbosity.add_option('--newline',
|
verbosity.add_option('--newline',
|
||||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||||
verbosity.add_option('--no-progress',
|
verbosity.add_option('--no-progress',
|
||||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
||||||
verbosity.add_option('--console-title',
|
verbosity.add_option('--console-title',
|
||||||
action='store_true', dest='consoletitle',
|
action='store_true', dest='consoletitle',
|
||||||
help='display progress in console titlebar', default=False)
|
help='display progress in console titlebar', default=False)
|
||||||
verbosity.add_option('-v', '--verbose',
|
verbosity.add_option('-v', '--verbose',
|
||||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||||
verbosity.add_option('--dump-intermediate-pages',
|
verbosity.add_option('--dump-intermediate-pages',
|
||||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||||
help='print downloaded pages to debug problems (very verbose)')
|
help='print downloaded pages to debug problems (very verbose)')
|
||||||
verbosity.add_option('--write-pages',
|
verbosity.add_option('--write-pages',
|
||||||
action='store_true', dest='write_pages', default=False,
|
action='store_true', dest='write_pages', default=False,
|
||||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
||||||
verbosity.add_option('--youtube-print-sig-code',
|
verbosity.add_option('--youtube-print-sig-code',
|
||||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||||
help=optparse.SUPPRESS_HELP)
|
help=optparse.SUPPRESS_HELP)
|
||||||
verbosity.add_option('--print-traffic',
|
verbosity.add_option('--print-traffic',
|
||||||
dest='debug_printtraffic', action='store_true', default=False,
|
dest='debug_printtraffic', action='store_true', default=False,
|
||||||
help='Display sent and read HTTP traffic')
|
help='Display sent and read HTTP traffic')
|
||||||
|
|
||||||
|
|
||||||
filesystem.add_option('-t', '--title',
|
filesystem.add_option('-t', '--title',
|
||||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
||||||
filesystem.add_option('--id',
|
filesystem.add_option('--id',
|
||||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
||||||
filesystem.add_option('-l', '--literal',
|
filesystem.add_option('-l', '--literal',
|
||||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
||||||
filesystem.add_option('-A', '--auto-number',
|
filesystem.add_option('-A', '--auto-number',
|
||||||
action='store_true', dest='autonumber',
|
action='store_true', dest='autonumber',
|
||||||
help='number downloaded files starting from 00000', default=False)
|
help='number downloaded files starting from 00000', default=False)
|
||||||
filesystem.add_option('-o', '--output',
|
filesystem.add_option('-o', '--output',
|
||||||
dest='outtmpl', metavar='TEMPLATE',
|
dest='outtmpl', metavar='TEMPLATE',
|
||||||
help=('output filename template. Use %(title)s to get the title, '
|
help=('output filename template. Use %(title)s to get the title, '
|
||||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||||
'%(autonumber)s to get an automatically incremented number, '
|
'%(autonumber)s to get an automatically incremented number, '
|
||||||
'%(ext)s for the filename extension, '
|
'%(ext)s for the filename extension, '
|
||||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
||||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||||
filesystem.add_option('--autonumber-size',
|
filesystem.add_option('--autonumber-size',
|
||||||
dest='autonumber_size', metavar='NUMBER',
|
dest='autonumber_size', metavar='NUMBER',
|
||||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||||
filesystem.add_option('--restrict-filenames',
|
filesystem.add_option('--restrict-filenames',
|
||||||
action='store_true', dest='restrictfilenames',
|
action='store_true', dest='restrictfilenames',
|
||||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||||
filesystem.add_option('-a', '--batch-file',
|
filesystem.add_option('-a', '--batch-file',
|
||||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
||||||
filesystem.add_option('--load-info',
|
filesystem.add_option('--load-info',
|
||||||
dest='load_info_filename', metavar='FILE',
|
dest='load_info_filename', metavar='FILE',
|
||||||
help='json file containing the video information (created with the "--write-json" option)')
|
help='json file containing the video information (created with the "--write-json" option)')
|
||||||
filesystem.add_option('-w', '--no-overwrites',
|
filesystem.add_option('-w', '--no-overwrites',
|
||||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||||
filesystem.add_option('-c', '--continue',
|
filesystem.add_option('-c', '--continue',
|
||||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
||||||
filesystem.add_option('--no-continue',
|
filesystem.add_option('--no-continue',
|
||||||
action='store_false', dest='continue_dl',
|
action='store_false', dest='continue_dl',
|
||||||
help='do not resume partially downloaded files (restart from beginning)')
|
help='do not resume partially downloaded files (restart from beginning)')
|
||||||
filesystem.add_option('--cookies',
|
filesystem.add_option('--cookies',
|
||||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
||||||
filesystem.add_option('--no-part',
|
filesystem.add_option('--no-part',
|
||||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
||||||
filesystem.add_option('--no-mtime',
|
filesystem.add_option('--no-mtime',
|
||||||
action='store_false', dest='updatetime',
|
action='store_false', dest='updatetime',
|
||||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
help='do not use the Last-modified header to set the file modification time', default=True)
|
||||||
filesystem.add_option('--write-description',
|
filesystem.add_option('--write-description',
|
||||||
action='store_true', dest='writedescription',
|
action='store_true', dest='writedescription',
|
||||||
help='write video description to a .description file', default=False)
|
help='write video description to a .description file', default=False)
|
||||||
filesystem.add_option('--write-info-json',
|
filesystem.add_option('--write-info-json',
|
||||||
action='store_true', dest='writeinfojson',
|
action='store_true', dest='writeinfojson',
|
||||||
help='write video metadata to a .info.json file', default=False)
|
help='write video metadata to a .info.json file', default=False)
|
||||||
filesystem.add_option('--write-annotations',
|
filesystem.add_option('--write-annotations',
|
||||||
action='store_true', dest='writeannotations',
|
action='store_true', dest='writeannotations',
|
||||||
help='write video annotations to a .annotation file', default=False)
|
help='write video annotations to a .annotation file', default=False)
|
||||||
filesystem.add_option('--write-thumbnail',
|
filesystem.add_option('--write-thumbnail',
|
||||||
action='store_true', dest='writethumbnail',
|
action='store_true', dest='writethumbnail',
|
||||||
help='write thumbnail image to disk', default=False)
|
help='write thumbnail image to disk', default=False)
|
||||||
|
|
||||||
|
|
||||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
||||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
||||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
||||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
||||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
||||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
||||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
||||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
||||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
||||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||||
help='embed subtitles in the video (only for mp4 videos)')
|
help='embed subtitles in the video (only for mp4 videos)')
|
||||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||||
help='write metadata to the video file')
|
help='write metadata to the video file')
|
||||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||||
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
||||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||||
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
||||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||||
|
|
||||||
|
|
||||||
parser.add_option_group(general)
|
parser.add_option_group(general)
|
||||||
parser.add_option_group(selection)
|
parser.add_option_group(selection)
|
||||||
@ -593,7 +590,6 @@ def _real_main(argv=None):
|
|||||||
compat_print(desc)
|
compat_print(desc)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
# Conflicting, missing and erroneous options
|
# Conflicting, missing and erroneous options
|
||||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||||
parser.error(u'using .netrc conflicts with giving username/password')
|
parser.error(u'using .netrc conflicts with giving username/password')
|
||||||
@ -657,21 +653,21 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||||
# this was the old behaviour if only --all-sub was given.
|
# this was the old behaviour if only --all-sub was given.
|
||||||
if opts.allsubtitles and (opts.writeautomaticsub == False):
|
if opts.allsubtitles and not opts.writeautomaticsub:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
|
|
||||||
if sys.version_info < (3,):
|
if sys.version_info < (3,):
|
||||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||||
if opts.outtmpl is not None:
|
if opts.outtmpl is not None:
|
||||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
|
||||||
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||||
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
||||||
or (opts.useid and u'%(id)s.%(ext)s')
|
or (opts.useid and u'%(id)s.%(ext)s')
|
||||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
||||||
or u'%(title)s-%(id)s.%(ext)s')
|
or u'%(title)s-%(id)s.%(ext)s')
|
||||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||||
parser.error(u'Cannot download a video and extract audio into the same'
|
parser.error(u'Cannot download a video and extract audio into the same'
|
||||||
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||||
|
@ -7,10 +7,11 @@ from .utils import bytes_to_intlist, intlist_to_bytes
|
|||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, counter):
|
def aes_ctr_decrypt(data, key, counter):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in counter mode
|
Decrypt with aes in counter mode
|
||||||
|
|
||||||
@param {int[]} data cipher
|
@param {int[]} data cipher
|
||||||
@param {int[]} key 16/24/32-Byte cipher key
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
@param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block)
|
||||||
@ -19,23 +20,24 @@ def aes_ctr_decrypt(data, key, counter):
|
|||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
|
||||||
decrypted_data=[]
|
decrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
counter_block = counter.next_value()
|
counter_block = counter.next_value()
|
||||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||||
decrypted_data += xor(block, cipher_counter_block)
|
decrypted_data += xor(block, cipher_counter_block)
|
||||||
decrypted_data = decrypted_data[:len(data)]
|
decrypted_data = decrypted_data[:len(data)]
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
"""
|
"""
|
||||||
Decrypt with aes in CBC mode
|
Decrypt with aes in CBC mode
|
||||||
|
|
||||||
@param {int[]} data cipher
|
@param {int[]} data cipher
|
||||||
@param {int[]} key 16/24/32-Byte cipher key
|
@param {int[]} key 16/24/32-Byte cipher key
|
||||||
@param {int[]} iv 16-Byte IV
|
@param {int[]} iv 16-Byte IV
|
||||||
@ -43,94 +45,98 @@ def aes_cbc_decrypt(data, key, iv):
|
|||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||||
|
|
||||||
decrypted_data=[]
|
decrypted_data = []
|
||||||
previous_cipher_block = iv
|
previous_cipher_block = iv
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
block += [0]*(BLOCK_SIZE_BYTES - len(block))
|
block += [0] * (BLOCK_SIZE_BYTES - len(block))
|
||||||
|
|
||||||
decrypted_block = aes_decrypt(block, expanded_key)
|
decrypted_block = aes_decrypt(block, expanded_key)
|
||||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||||
previous_cipher_block = block
|
previous_cipher_block = block
|
||||||
decrypted_data = decrypted_data[:len(data)]
|
decrypted_data = decrypted_data[:len(data)]
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
|
||||||
def key_expansion(data):
|
def key_expansion(data):
|
||||||
"""
|
"""
|
||||||
Generate key schedule
|
Generate key schedule
|
||||||
|
|
||||||
@param {int[]} data 16/24/32-Byte cipher key
|
@param {int[]} data 16/24/32-Byte cipher key
|
||||||
@returns {int[]} 176/208/240-Byte expanded key
|
@returns {int[]} 176/208/240-Byte expanded key
|
||||||
"""
|
"""
|
||||||
data = data[:] # copy
|
data = data[:] # copy
|
||||||
rcon_iteration = 1
|
rcon_iteration = 1
|
||||||
key_size_bytes = len(data)
|
key_size_bytes = len(data)
|
||||||
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES
|
||||||
|
|
||||||
while len(data) < expanded_key_size_bytes:
|
while len(data) < expanded_key_size_bytes:
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
temp = key_schedule_core(temp, rcon_iteration)
|
temp = key_schedule_core(temp, rcon_iteration)
|
||||||
rcon_iteration += 1
|
rcon_iteration += 1
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
if key_size_bytes == 32:
|
if key_size_bytes == 32:
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
temp = sub_bytes(temp)
|
temp = sub_bytes(temp)
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
|
|
||||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes : 4-key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
data = data[:expanded_key_size_bytes]
|
data = data[:expanded_key_size_bytes]
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_encrypt(data, expanded_key):
|
def aes_encrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Encrypt one block with aes
|
Encrypt one block with aes
|
||||||
|
|
||||||
@param {int[]} data 16-Byte state
|
@param {int[]} data 16-Byte state
|
||||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||||
@returns {int[]} 16-Byte cipher
|
@returns {int[]} 16-Byte cipher
|
||||||
"""
|
"""
|
||||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||||
|
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
for i in range(1, rounds+1):
|
for i in range(1, rounds + 1):
|
||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
data = shift_rows(data)
|
data = shift_rows(data)
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns(data)
|
data = mix_columns(data)
|
||||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt(data, expanded_key):
|
def aes_decrypt(data, expanded_key):
|
||||||
"""
|
"""
|
||||||
Decrypt one block with aes
|
Decrypt one block with aes
|
||||||
|
|
||||||
@param {int[]} data 16-Byte cipher
|
@param {int[]} data 16-Byte cipher
|
||||||
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
@param {int[]} expanded_key 176/208/240-Byte expanded key
|
||||||
@returns {int[]} 16-Byte state
|
@returns {int[]} 16-Byte state
|
||||||
"""
|
"""
|
||||||
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
|
||||||
|
|
||||||
for i in range(rounds, 0, -1):
|
for i in range(rounds, 0, -1):
|
||||||
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES])
|
||||||
if i != rounds:
|
if i != rounds:
|
||||||
data = mix_columns_inv(data)
|
data = mix_columns_inv(data)
|
||||||
data = shift_rows_inv(data)
|
data = shift_rows_inv(data)
|
||||||
data = sub_bytes_inv(data)
|
data = sub_bytes_inv(data)
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
"""
|
"""
|
||||||
Decrypt text
|
Decrypt text
|
||||||
@ -138,33 +144,34 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
|
- The cipher key is retrieved by encrypting the first 16 Byte of 'password'
|
||||||
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
|
with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's)
|
||||||
- Mode of operation is 'counter'
|
- Mode of operation is 'counter'
|
||||||
|
|
||||||
@param {str} data Base64 encoded string
|
@param {str} data Base64 encoded string
|
||||||
@param {str,unicode} password Password (will be encoded with utf-8)
|
@param {str,unicode} password Password (will be encoded with utf-8)
|
||||||
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
|
@param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit
|
||||||
@returns {str} Decrypted data
|
@returns {str} Decrypted data
|
||||||
"""
|
"""
|
||||||
NONCE_LENGTH_BYTES = 8
|
NONCE_LENGTH_BYTES = 8
|
||||||
|
|
||||||
data = bytes_to_intlist(base64.b64decode(data))
|
data = bytes_to_intlist(base64.b64decode(data))
|
||||||
password = bytes_to_intlist(password.encode('utf-8'))
|
password = bytes_to_intlist(password.encode('utf-8'))
|
||||||
|
|
||||||
key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password))
|
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
nonce = data[:NONCE_LENGTH_BYTES]
|
nonce = data[:NONCE_LENGTH_BYTES]
|
||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
class Counter:
|
class Counter:
|
||||||
__value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
temp = self.__value
|
temp = self.__value
|
||||||
self.__value = inc(self.__value)
|
self.__value = inc(self.__value)
|
||||||
return temp
|
return temp
|
||||||
|
|
||||||
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
decrypted_data = aes_ctr_decrypt(cipher, key, Counter())
|
||||||
plaintext = intlist_to_bytes(decrypted_data)
|
plaintext = intlist_to_bytes(decrypted_data)
|
||||||
|
|
||||||
return plaintext
|
return plaintext
|
||||||
|
|
||||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||||
@ -200,14 +207,14 @@ SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x
|
|||||||
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||||
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||||
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
|
||||||
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
|
MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1),
|
||||||
(0x1,0x2,0x3,0x1),
|
(0x1, 0x2, 0x3, 0x1),
|
||||||
(0x1,0x1,0x2,0x3),
|
(0x1, 0x1, 0x2, 0x3),
|
||||||
(0x3,0x1,0x1,0x2))
|
(0x3, 0x1, 0x1, 0x2))
|
||||||
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
|
MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9),
|
||||||
(0x9,0xE,0xB,0xD),
|
(0x9, 0xE, 0xB, 0xD),
|
||||||
(0xD,0x9,0xE,0xB),
|
(0xD, 0x9, 0xE, 0xB),
|
||||||
(0xB,0xD,0x9,0xE))
|
(0xB, 0xD, 0x9, 0xE))
|
||||||
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
|
||||||
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
|
||||||
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
|
||||||
@ -241,30 +248,37 @@ RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7
|
|||||||
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
|
||||||
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes(data):
|
def sub_bytes(data):
|
||||||
return [SBOX[x] for x in data]
|
return [SBOX[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def sub_bytes_inv(data):
|
def sub_bytes_inv(data):
|
||||||
return [SBOX_INV[x] for x in data]
|
return [SBOX_INV[x] for x in data]
|
||||||
|
|
||||||
|
|
||||||
def rotate(data):
|
def rotate(data):
|
||||||
return data[1:] + [data[0]]
|
return data[1:] + [data[0]]
|
||||||
|
|
||||||
|
|
||||||
def key_schedule_core(data, rcon_iteration):
|
def key_schedule_core(data, rcon_iteration):
|
||||||
data = rotate(data)
|
data = rotate(data)
|
||||||
data = sub_bytes(data)
|
data = sub_bytes(data)
|
||||||
data[0] = data[0] ^ RCON[rcon_iteration]
|
data[0] = data[0] ^ RCON[rcon_iteration]
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def xor(data1, data2):
|
def xor(data1, data2):
|
||||||
return [x^y for x, y in zip(data1, data2)]
|
return [x ^ y for x, y in zip(data1, data2)]
|
||||||
|
|
||||||
|
|
||||||
def rijndael_mul(a, b):
|
def rijndael_mul(a, b):
|
||||||
if(a==0 or b==0):
|
if(a == 0 or b == 0):
|
||||||
return 0
|
return 0
|
||||||
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
|
||||||
|
|
||||||
|
|
||||||
def mix_column(data, matrix):
|
def mix_column(data, matrix):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
@ -275,33 +289,38 @@ def mix_column(data, matrix):
|
|||||||
data_mixed.append(mixed)
|
data_mixed.append(mixed)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
|
||||||
data_mixed = []
|
data_mixed = []
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
column = data[i*4 : (i+1)*4]
|
column = data[i * 4: (i + 1) * 4]
|
||||||
data_mixed += mix_column(column, matrix)
|
data_mixed += mix_column(column, matrix)
|
||||||
return data_mixed
|
return data_mixed
|
||||||
|
|
||||||
|
|
||||||
def mix_columns_inv(data):
|
def mix_columns_inv(data):
|
||||||
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
|
||||||
|
|
||||||
|
|
||||||
def shift_rows(data):
|
def shift_rows(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
|
data_shifted.append(data[((column + row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def shift_rows_inv(data):
|
def shift_rows_inv(data):
|
||||||
data_shifted = []
|
data_shifted = []
|
||||||
for column in range(4):
|
for column in range(4):
|
||||||
for row in range(4):
|
for row in range(4):
|
||||||
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
|
data_shifted.append(data[((column - row) & 0b11) * 4 + row])
|
||||||
return data_shifted
|
return data_shifted
|
||||||
|
|
||||||
|
|
||||||
def inc(data):
|
def inc(data):
|
||||||
data = data[:] # copy
|
data = data[:] # copy
|
||||||
for i in range(len(data)-1,-1,-1):
|
for i in range(len(data) - 1, -1, -1):
|
||||||
if data[i] == 255:
|
if data[i] == 255:
|
||||||
data[i] = 0
|
data[i] = 0
|
||||||
else:
|
else:
|
||||||
|
@ -11,6 +11,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FileDownloader(object):
|
class FileDownloader(object):
|
||||||
|
|
||||||
"""File Downloader class.
|
"""File Downloader class.
|
||||||
|
|
||||||
File downloader objects are the ones responsible of downloading the
|
File downloader objects are the ones responsible of downloading the
|
||||||
@ -77,7 +78,7 @@ class FileDownloader(object):
|
|||||||
if total is None:
|
if total is None:
|
||||||
return None
|
return None
|
||||||
dif = now - start
|
dif = now - start
|
||||||
if current == 0 or dif < 0.001: # One millisecond
|
if current == 0 or dif < 0.001: # One millisecond
|
||||||
return None
|
return None
|
||||||
rate = float(current) / dif
|
rate = float(current) / dif
|
||||||
return int((float(total) - float(current)) / rate)
|
return int((float(total) - float(current)) / rate)
|
||||||
@ -91,7 +92,7 @@ class FileDownloader(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def calc_speed(start, now, bytes):
|
def calc_speed(start, now, bytes):
|
||||||
dif = now - start
|
dif = now - start
|
||||||
if bytes == 0 or dif < 0.001: # One millisecond
|
if bytes == 0 or dif < 0.001: # One millisecond
|
||||||
return None
|
return None
|
||||||
return float(bytes) / dif
|
return float(bytes) / dif
|
||||||
|
|
||||||
@ -104,7 +105,7 @@ class FileDownloader(object):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def best_block_size(elapsed_time, bytes):
|
def best_block_size(elapsed_time, bytes):
|
||||||
new_min = max(bytes / 2.0, 1.0)
|
new_min = max(bytes / 2.0, 1.0)
|
||||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||||
if elapsed_time < 0.001:
|
if elapsed_time < 0.001:
|
||||||
return int(new_max)
|
return int(new_max)
|
||||||
rate = bytes / elapsed_time
|
rate = bytes / elapsed_time
|
||||||
|
@ -21,6 +21,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FlvReader(io.BytesIO):
|
class FlvReader(io.BytesIO):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Reader for Flv files
|
Reader for Flv files
|
||||||
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
||||||
@ -55,7 +56,7 @@ class FlvReader(io.BytesIO):
|
|||||||
if size == 1:
|
if size == 1:
|
||||||
real_size = self.read_unsigned_long_long()
|
real_size = self.read_unsigned_long_long()
|
||||||
header_end = 16
|
header_end = 16
|
||||||
return real_size, box_type, self.read(real_size-header_end)
|
return real_size, box_type, self.read(real_size - header_end)
|
||||||
|
|
||||||
def read_asrt(self):
|
def read_asrt(self):
|
||||||
# version
|
# version
|
||||||
@ -180,7 +181,7 @@ def build_fragments_list(boot_info):
|
|||||||
n_frags = segment_run_entry[1]
|
n_frags = segment_run_entry[1]
|
||||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||||
first_frag_number = fragment_run_entry_table[0]['first']
|
first_frag_number = fragment_run_entry_table[0]['first']
|
||||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
for (i, frag_number) in zip(range(1, n_frags + 1), itertools.count(first_frag_number)):
|
||||||
res.append((1, frag_number))
|
res.append((1, frag_number))
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@ -210,11 +211,13 @@ def _add_ns(prop):
|
|||||||
|
|
||||||
|
|
||||||
class HttpQuietDownloader(HttpFD):
|
class HttpQuietDownloader(HttpFD):
|
||||||
|
|
||||||
def to_screen(self, *args, **kargs):
|
def to_screen(self, *args, **kargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class F4mFD(FileDownloader):
|
class F4mFD(FileDownloader):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A downloader for f4m manifests or AdobeHDS.
|
A downloader for f4m manifests or AdobeHDS.
|
||||||
"""
|
"""
|
||||||
@ -225,12 +228,12 @@ class F4mFD(FileDownloader):
|
|||||||
manifest = self.ydl.urlopen(man_url).read()
|
manifest = self.ydl.urlopen(man_url).read()
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
http_dl = HttpQuietDownloader(self.ydl,
|
http_dl = HttpQuietDownloader(self.ydl,
|
||||||
{
|
{
|
||||||
'continuedl': True,
|
'continuedl': True,
|
||||||
'quiet': True,
|
'quiet': True,
|
||||||
'noprogress': True,
|
'noprogress': True,
|
||||||
'test': self.params.get('test', False),
|
'test': self.params.get('test', False),
|
||||||
})
|
})
|
||||||
|
|
||||||
doc = etree.fromstring(manifest)
|
doc = etree.fromstring(manifest)
|
||||||
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
||||||
@ -261,7 +264,7 @@ class F4mFD(FileDownloader):
|
|||||||
def frag_progress_hook(status):
|
def frag_progress_hook(status):
|
||||||
frag_total_bytes = status.get('total_bytes', 0)
|
frag_total_bytes = status.get('total_bytes', 0)
|
||||||
estimated_size = (state['downloaded_bytes'] +
|
estimated_size = (state['downloaded_bytes'] +
|
||||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||||
if status['status'] == 'finished':
|
if status['status'] == 'finished':
|
||||||
state['downloaded_bytes'] += frag_total_bytes
|
state['downloaded_bytes'] += frag_total_bytes
|
||||||
state['frag_counter'] += 1
|
state['frag_counter'] += 1
|
||||||
@ -271,13 +274,13 @@ class F4mFD(FileDownloader):
|
|||||||
frag_downloaded_bytes = status['downloaded_bytes']
|
frag_downloaded_bytes = status['downloaded_bytes']
|
||||||
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
||||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||||
frag_total_bytes)
|
frag_total_bytes)
|
||||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||||
progress += frag_progress / float(total_frags)
|
progress += frag_progress / float(total_frags)
|
||||||
|
|
||||||
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
||||||
self.report_progress(progress, format_bytes(estimated_size),
|
self.report_progress(progress, format_bytes(estimated_size),
|
||||||
status.get('speed'), eta)
|
status.get('speed'), eta)
|
||||||
http_dl.add_progress_hook(frag_progress_hook)
|
http_dl.add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
frags_filenames = []
|
frags_filenames = []
|
||||||
|
@ -8,13 +8,14 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class HlsFD(FileDownloader):
|
class HlsFD(FileDownloader):
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
|
||||||
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
||||||
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
||||||
|
|
||||||
for program in ['avconv', 'ffmpeg']:
|
for program in ['avconv', 'ffmpeg']:
|
||||||
try:
|
try:
|
||||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class HttpFD(FileDownloader):
|
class HttpFD(FileDownloader):
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class MplayerFD(FileDownloader):
|
class MplayerFD(FileDownloader):
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
self.report_destination(filename)
|
self.report_destination(filename)
|
||||||
|
@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class RtmpFD(FileDownloader):
|
class RtmpFD(FileDownloader):
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
def run_rtmpdump(args):
|
def run_rtmpdump(args):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
@ -36,13 +37,13 @@ class RtmpFD(FileDownloader):
|
|||||||
continue
|
continue
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
|
||||||
if mobj:
|
if mobj:
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||||
percent = float(mobj.group(2))
|
percent = float(mobj.group(2))
|
||||||
if not resume_percent:
|
if not resume_percent:
|
||||||
resume_percent = percent
|
resume_percent = percent
|
||||||
resume_downloaded_data_len = downloaded_data_len
|
resume_downloaded_data_len = downloaded_data_len
|
||||||
eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent)
|
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
|
||||||
speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len)
|
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
|
||||||
data_len = None
|
data_len = None
|
||||||
if percent > 0:
|
if percent > 0:
|
||||||
data_len = int(downloaded_data_len * 100 / percent)
|
data_len = int(downloaded_data_len * 100 / percent)
|
||||||
@ -62,7 +63,7 @@ class RtmpFD(FileDownloader):
|
|||||||
# no percent for live streams
|
# no percent for live streams
|
||||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||||
if mobj:
|
if mobj:
|
||||||
downloaded_data_len = int(float(mobj.group(1))*1024)
|
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||||
time_now = time.time()
|
time_now = time.time()
|
||||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||||
@ -78,7 +79,7 @@ class RtmpFD(FileDownloader):
|
|||||||
if not cursor_in_new_line:
|
if not cursor_in_new_line:
|
||||||
self.to_screen(u'')
|
self.to_screen(u'')
|
||||||
cursor_in_new_line = True
|
cursor_in_new_line = True
|
||||||
self.to_screen(u'[rtmpdump] '+line)
|
self.to_screen(u'[rtmpdump] ' + line)
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if not cursor_in_new_line:
|
if not cursor_in_new_line:
|
||||||
self.to_screen(u'')
|
self.to_screen(u'')
|
||||||
@ -157,7 +158,7 @@ class RtmpFD(FileDownloader):
|
|||||||
while (retval == 2 or retval == 1) and not test:
|
while (retval == 2 or retval == 1) and not test:
|
||||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
||||||
time.sleep(5.0) # This seems to be needed
|
time.sleep(5.0) # This seems to be needed
|
||||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
||||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
if prevsize == cursize and retval == 1:
|
if prevsize == cursize and retval == 1:
|
||||||
|
@ -306,4 +306,4 @@ def gen_extractors():
|
|||||||
|
|
||||||
def get_info_extractor(ie_name):
|
def get_info_extractor(ie_name):
|
||||||
"""Returns the info extractor class with the given ie_name"""
|
"""Returns the info extractor class with the given ie_name"""
|
||||||
return globals()[ie_name+'IE']
|
return globals()[ie_name + 'IE']
|
||||||
|
@ -79,7 +79,7 @@ class AddAnimeIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'video',
|
'_type': 'video',
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'description': video_description
|
'description': video_description
|
||||||
|
@ -28,7 +28,7 @@ class AnitubeIE(InfoExtractor):
|
|||||||
webpage, u'key')
|
webpage, u'key')
|
||||||
|
|
||||||
config_xml = self._download_xml('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
|
config_xml = self._download_xml('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
|
||||||
key)
|
key)
|
||||||
|
|
||||||
video_title = config_xml.find('title').text
|
video_title = config_xml.find('title').text
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ class AparatIE(InfoExtractor):
|
|||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"تیم گلکسی 11 - زومیت",
|
u"title": u"تیم گلکسی 11 - زومیت",
|
||||||
},
|
},
|
||||||
#u'skip': u'Extremely unreliable',
|
# u'skip': u'Extremely unreliable',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -66,11 +66,13 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
uploader_id = mobj.group('company')
|
uploader_id = mobj.group('company')
|
||||||
|
|
||||||
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
|
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
|
||||||
|
|
||||||
def fix_html(s):
|
def fix_html(s):
|
||||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', s)
|
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', s)
|
||||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
||||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||||
|
|
||||||
def _clean_json(m):
|
def _clean_json(m):
|
||||||
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||||
@ -82,7 +84,7 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
for li in doc.findall('./div/ul/li'):
|
for li in doc.findall('./div/ul/li'):
|
||||||
on_click = li.find('.//a').attrib['onClick']
|
on_click = li.find('.//a').attrib['onClick']
|
||||||
trailer_info_json = self._search_regex(self._JSON_RE,
|
trailer_info_json = self._search_regex(self._JSON_RE,
|
||||||
on_click, u'trailer info')
|
on_click, u'trailer info')
|
||||||
trailer_info = json.loads(trailer_info_json)
|
trailer_info = json.loads(trailer_info_json)
|
||||||
title = trailer_info['title']
|
title = trailer_info['title']
|
||||||
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
||||||
|
@ -15,10 +15,11 @@ from ..utils import (
|
|||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
)
|
)
|
||||||
|
|
||||||
# There are different sources of video in arte.tv, the extraction process
|
# There are different sources of video in arte.tv, the extraction process
|
||||||
# is different for each one. The videos usually expire in 7 days, so we can't
|
# is different for each one. The videos usually expire in 7 days, so we can't
|
||||||
# add tests.
|
# add tests.
|
||||||
|
|
||||||
|
|
||||||
class ArteTvIE(InfoExtractor):
|
class ArteTvIE(InfoExtractor):
|
||||||
_VIDEOS_URL = r'(?:http://)?videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
_VIDEOS_URL = r'(?:http://)?videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||||
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
||||||
@ -86,6 +87,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
||||||
|
|
||||||
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
||||||
|
|
||||||
def _key(m):
|
def _key(m):
|
||||||
quality = m.group('quality')
|
quality = m.group('quality')
|
||||||
if quality == 'hd':
|
if quality == 'hd':
|
||||||
@ -95,7 +97,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
# We pick the best quality
|
# We pick the best quality
|
||||||
video_urls = sorted(video_urls, key=_key)
|
video_urls = sorted(video_urls, key=_key)
|
||||||
video_url = list(video_urls)[-1].group('url')
|
video_url = list(video_urls)[-1].group('url')
|
||||||
|
|
||||||
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
||||||
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
||||||
config_xml, 'thumbnail')
|
config_xml, 'thumbnail')
|
||||||
@ -111,7 +113,7 @@ class ArteTvIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, name)
|
webpage = self._download_webpage(url, name)
|
||||||
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
|
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
|
||||||
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
||||||
video_id, 'Downloading information')
|
video_id, 'Downloading information')
|
||||||
event_doc = config_doc.find('event')
|
event_doc = config_doc.find('event')
|
||||||
url_node = event_doc.find('video').find('urlHd')
|
url_node = event_doc.find('video').find('urlHd')
|
||||||
if url_node is None:
|
if url_node is None:
|
||||||
@ -164,6 +166,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
all_formats = player_info['VSR'].values()
|
all_formats = player_info['VSR'].values()
|
||||||
# Some formats use the m3u8 protocol
|
# Some formats use the m3u8 protocol
|
||||||
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
||||||
|
|
||||||
def _match_lang(f):
|
def _match_lang(f):
|
||||||
if f.get('versionCode') is None:
|
if f.get('versionCode') is None:
|
||||||
return True
|
return True
|
||||||
@ -176,7 +179,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
return any(re.match(r, f['versionCode']) for r in regexes)
|
||||||
# Some formats may not be in the same language as the url
|
# Some formats may not be in the same language as the url
|
||||||
formats = filter(_match_lang, all_formats)
|
formats = filter(_match_lang, all_formats)
|
||||||
formats = list(formats) # in python3 filter returns an iterator
|
formats = list(formats) # in python3 filter returns an iterator
|
||||||
if not formats:
|
if not formats:
|
||||||
# Some videos are only available in the 'Originalversion'
|
# Some videos are only available in the 'Originalversion'
|
||||||
# they aren't tagged as being in French or German
|
# they aren't tagged as being in French or German
|
||||||
@ -192,14 +195,15 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
def sort_key(f):
|
def sort_key(f):
|
||||||
return (
|
return (
|
||||||
# Sort first by quality
|
# Sort first by quality
|
||||||
int(f.get('height',-1)),
|
int(f.get('height', -1)),
|
||||||
int(f.get('bitrate',-1)),
|
int(f.get('bitrate', -1)),
|
||||||
# The original version with subtitles has lower relevance
|
# The original version with subtitles has lower relevance
|
||||||
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
|
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
|
||||||
# The version with sourds/mal subtitles has also lower relevance
|
# The version with sourds/mal subtitles has also lower relevance
|
||||||
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
|
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
|
||||||
)
|
)
|
||||||
formats = sorted(formats, key=sort_key)
|
formats = sorted(formats, key=sort_key)
|
||||||
|
|
||||||
def _format(format_info):
|
def _format(format_info):
|
||||||
quality = ''
|
quality = ''
|
||||||
height = format_info.get('height')
|
height = format_info.get('height')
|
||||||
|
@ -26,7 +26,7 @@ class AUEngineIE(InfoExtractor):
|
|||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
title = title.strip()
|
title = title.strip()
|
||||||
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
||||||
links = map(compat_urllib_parse.unquote, links)
|
links = map(compat_urllib_parse.unquote, links)
|
||||||
@ -45,8 +45,8 @@ class AUEngineIE(InfoExtractor):
|
|||||||
title = title[:-len(ext)]
|
title = title[:-len(ext)]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ class BambuserIE(InfoExtractor):
|
|||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://bambuser.com/v/4050584',
|
'url': 'http://bambuser.com/v/4050584',
|
||||||
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
# MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388
|
||||||
#u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
# u'md5': 'fba8f7693e48fd4e8641b3fd5539a641',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4050584',
|
'id': '4050584',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
|
info_url = ('http://player-c.api.bambuser.com/getVideo.json?'
|
||||||
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
|
'&api_key=%s&vid=%s' % (self._API_KEY, video_id))
|
||||||
info_json = self._download_webpage(info_url, video_id)
|
info_json = self._download_webpage(info_url, video_id)
|
||||||
info = json.loads(info_json)['result']
|
info = json.loads(info_json)['result']
|
||||||
|
|
||||||
@ -67,14 +67,14 @@ class BambuserChannelIE(InfoExtractor):
|
|||||||
last_id = ''
|
last_id = ''
|
||||||
for i in itertools.count(1):
|
for i in itertools.count(1):
|
||||||
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
|
req_url = ('http://bambuser.com/xhr-api/index.php?username={user}'
|
||||||
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
|
||||||
'&method=broadcast&format=json&vid_older_than={last}'
|
'&method=broadcast&format=json&vid_older_than={last}'
|
||||||
).format(user=user, count=self._STEP, last=last_id)
|
).format(user=user, count=self._STEP, last=last_id)
|
||||||
req = compat_urllib_request.Request(req_url)
|
req = compat_urllib_request.Request(req_url)
|
||||||
# Without setting this header, we wouldn't get any result
|
# Without setting this header, we wouldn't get any result
|
||||||
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
|
||||||
info_json = self._download_webpage(req, user,
|
info_json = self._download_webpage(req, user,
|
||||||
'Downloading page %d' % i)
|
'Downloading page %d' % i)
|
||||||
results = json.loads(info_json)['result']
|
results = json.loads(info_json)['result']
|
||||||
if len(results) == 0:
|
if len(results) == 0:
|
||||||
break
|
break
|
||||||
|
@ -79,12 +79,12 @@ class BandcampIE(InfoExtractor):
|
|||||||
initial_url = mp3_info['url']
|
initial_url = mp3_info['url']
|
||||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||||
m_url = re.match(re_url, initial_url)
|
m_url = re.match(re_url, initial_url)
|
||||||
#We build the url we will use to get the final track url
|
# We build the url we will use to get the final track url
|
||||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), video_id, m_url.group('ts'))
|
||||||
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
|
||||||
# If we could correctly generate the .rand field the url would be
|
# If we could correctly generate the .rand field the url would be
|
||||||
#in the "download_url" key
|
# in the "download_url" key
|
||||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -163,7 +163,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
|||||||
group_id = mobj.group('id')
|
group_id = mobj.group('id')
|
||||||
|
|
||||||
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
|
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
|
||||||
'Downloading playlist XML')
|
'Downloading playlist XML')
|
||||||
|
|
||||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||||
if no_items is not None:
|
if no_items is not None:
|
||||||
@ -190,7 +190,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
|||||||
duration = int(item.get('duration'))
|
duration = int(item.get('duration'))
|
||||||
|
|
||||||
media_selection = self._download_xml(
|
media_selection = self._download_xml(
|
||||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||||
programme_id, 'Downloading media selection XML')
|
programme_id, 'Downloading media selection XML')
|
||||||
|
|
||||||
for media in self._extract_medias(media_selection):
|
for media in self._extract_medias(media_selection):
|
||||||
@ -215,4 +215,4 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BlipTVIE(SubtitlesInfoExtractor):
|
class BlipTVIE(SubtitlesInfoExtractor):
|
||||||
|
|
||||||
"""Information extractor for blip.tv"""
|
"""Information extractor for blip.tv"""
|
||||||
|
|
||||||
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
|
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
|
||||||
@ -56,7 +57,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
|||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'data-episode-id="([0-9]+)', info_page, 'video_id')
|
r'data-episode-id="([0-9]+)', info_page, 'video_id')
|
||||||
return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
|
return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
|
||||||
|
|
||||||
cchar = '&' if '?' in url else '?'
|
cchar = '&' if '?' in url else '?'
|
||||||
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
||||||
request = compat_urllib_request.Request(json_url)
|
request = compat_urllib_request.Request(json_url)
|
||||||
|
@ -29,7 +29,7 @@ class Canalc2IE(InfoExtractor):
|
|||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'class="evenement8">(.*?)</a>', webpage, u'title')
|
r'class="evenement8">(.*?)</a>', webpage, u'title')
|
||||||
|
|
||||||
return {'id': video_id,
|
return {'id': video_id,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
|
@ -30,15 +30,15 @@ class CanalplusIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, mobj.group('path'))
|
webpage = self._download_webpage(url, mobj.group('path'))
|
||||||
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
|
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
|
||||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
||||||
doc = self._download_xml(info_url,video_id,
|
doc = self._download_xml(info_url, video_id,
|
||||||
u'Downloading video info')
|
u'Downloading video info')
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||||
infos = video_info.find('INFOS')
|
infos = video_info.find('INFOS')
|
||||||
media = video_info.find('MEDIA')
|
media = video_info.find('MEDIA')
|
||||||
formats = [media.find('VIDEOS/%s' % format)
|
formats = [media.find('VIDEOS/%s' % format)
|
||||||
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
||||||
video_url = [format.text for format in formats if format is not None][-1]
|
video_url = [format.text for format in formats if format is not None][-1]
|
||||||
|
|
||||||
return {'id': video_id,
|
return {'id': video_id,
|
||||||
|
@ -5,7 +5,9 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
class Channel9IE(InfoExtractor):
|
class Channel9IE(InfoExtractor):
|
||||||
|
|
||||||
'''
|
'''
|
||||||
Common extractor for channel9.msdn.com.
|
Common extractor for channel9.msdn.com.
|
||||||
|
|
||||||
@ -31,7 +33,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
'session_code': 'KOS002',
|
'session_code': 'KOS002',
|
||||||
'session_day': 'Day 1',
|
'session_day': 'Day 1',
|
||||||
'session_room': 'Arena 1A',
|
'session_room': 'Arena 1A',
|
||||||
'session_speakers': [ 'Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen' ],
|
'session_speakers': ['Ed Blankenship', 'Andrew Coates', 'Brady Gaster', 'Patrick Klug', 'Mads Kristensen'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -44,7 +46,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||||
'duration': 1540,
|
'duration': 1540,
|
||||||
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||||
'authors': [ 'Mike Wilmot' ],
|
'authors': ['Mike Wilmot'],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -83,7 +85,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
'format_id': x.group('quality'),
|
'format_id': x.group('quality'),
|
||||||
'format_note': x.group('note'),
|
'format_note': x.group('note'),
|
||||||
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
|
||||||
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
|
||||||
'preference': self._known_formats.index(x.group('quality')),
|
'preference': self._known_formats.index(x.group('quality')),
|
||||||
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
|
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
|
||||||
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
|
||||||
@ -94,7 +96,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
|
|
||||||
def _extract_title(self, html):
|
def _extract_title(self, html):
|
||||||
title = self._html_search_meta('title', html, 'title')
|
title = self._html_search_meta('title', html, 'title')
|
||||||
if title is None:
|
if title is None:
|
||||||
title = self._og_search_title(html)
|
title = self._og_search_title(html)
|
||||||
TITLE_SUFFIX = ' (Channel 9)'
|
TITLE_SUFFIX = ' (Channel 9)'
|
||||||
if title is not None and title.endswith(TITLE_SUFFIX):
|
if title is not None and title.endswith(TITLE_SUFFIX):
|
||||||
@ -167,7 +169,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
|
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
|
||||||
|
|
||||||
def _extract_content(self, html, content_path):
|
def _extract_content(self, html, content_path):
|
||||||
# Look for downloadable content
|
# Look for downloadable content
|
||||||
formats = self._formats_from_html(html)
|
formats = self._formats_from_html(html)
|
||||||
slides = self._extract_slides(html)
|
slides = self._extract_slides(html)
|
||||||
zip_ = self._extract_zip(html)
|
zip_ = self._extract_zip(html)
|
||||||
@ -196,23 +198,23 @@ class Channel9IE(InfoExtractor):
|
|||||||
'rating_count': rating_count,
|
'rating_count': rating_count,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'comment_count': comment_count,
|
'comment_count': comment_count,
|
||||||
}
|
}
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
if slides is not None:
|
if slides is not None:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title + '-Slides', 'url': slides })
|
d.update({'title': title + '-Slides', 'url': slides})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
if zip_ is not None:
|
if zip_ is not None:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title + '-Zip', 'url': zip_ })
|
d.update({'title': title + '-Zip', 'url': zip_})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
if len(formats) > 0:
|
if len(formats) > 0:
|
||||||
d = common.copy()
|
d = common.copy()
|
||||||
d.update({ 'title': title, 'formats': formats })
|
d.update({'title': title, 'formats': formats})
|
||||||
result.append(d)
|
result.append(d)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
@ -265,9 +267,9 @@ class Channel9IE(InfoExtractor):
|
|||||||
page_type = page_type_m.group('pagetype')
|
page_type = page_type_m.group('pagetype')
|
||||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
||||||
return self._extract_list(content_path)
|
return self._extract_list(content_path)
|
||||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
||||||
return self._extract_entry_item(webpage, content_path)
|
return self._extract_entry_item(webpage, content_path)
|
||||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||||
return self._extract_session(webpage, content_path)
|
return self._extract_session(webpage, content_path)
|
||||||
else:
|
else:
|
||||||
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
||||||
|
@ -22,24 +22,24 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
u'skip_download': True,
|
u'skip_download': True,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||||
u'file': u'521be8ef82b16.flv',
|
u'file': u'521be8ef82b16.flv',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u'upload_date': u'20131002',
|
u'upload_date': u'20131002',
|
||||||
u'title': u'The Mummy’s Hand (1940)',
|
u'title': u'The Mummy’s Hand (1940)',
|
||||||
},
|
},
|
||||||
u'params': {
|
u'params': {
|
||||||
# rtmp download
|
# rtmp download
|
||||||
u'skip_download': True,
|
u'skip_download': True,
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
webpage_url = u'http://' + mobj.group('url')
|
webpage_url = u'http://' + mobj.group('url')
|
||||||
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
||||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
||||||
if not mobj:
|
if not mobj:
|
||||||
@ -48,9 +48,9 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
video_id = mobj.group(u'video_id')
|
video_id = mobj.group(u'video_id')
|
||||||
|
|
||||||
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
|
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
|
||||||
webpage, u'title')
|
webpage, u'title')
|
||||||
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
|
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||||
webpage, u'description', flags=re.DOTALL, fatal=False)
|
webpage, u'description', flags=re.DOTALL, fatal=False)
|
||||||
if len(video_description) == 0:
|
if len(video_description) == 0:
|
||||||
video_description = None
|
video_description = None
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': url,
|
'url': url,
|
||||||
'play_path': 'mp4:' + sd_file,
|
'play_path': 'mp4:' + sd_file,
|
||||||
'rtmp_live': True, # workaround
|
'rtmp_live': True, # workaround
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'format': 'sd',
|
'format': 'sd',
|
||||||
'format_id': 'sd',
|
'format_id': 'sd',
|
||||||
@ -73,7 +73,7 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': url,
|
'url': url,
|
||||||
'play_path': 'mp4:' + hd_file,
|
'play_path': 'mp4:' + hd_file,
|
||||||
'rtmp_live': True, # workaround
|
'rtmp_live': True, # workaround
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'format': 'hd',
|
'format': 'hd',
|
||||||
'format_id': 'hd',
|
'format_id': 'hd',
|
||||||
|
@ -36,6 +36,7 @@ class ClipsyndicateIE(InfoExtractor):
|
|||||||
transform_source=fix_xml_ampersands)
|
transform_source=fix_xml_ampersands)
|
||||||
|
|
||||||
track_doc = pdoc.find('trackList/track')
|
track_doc = pdoc.find('trackList/track')
|
||||||
|
|
||||||
def find_param(name):
|
def find_param(name):
|
||||||
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
node = find_xpath_attr(track_doc, './/param', 'name', name)
|
||||||
if node is not None:
|
if node is not None:
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from .mtv import MTVIE
|
from .mtv import MTVIE
|
||||||
|
|
||||||
|
|
||||||
class CMTIE(MTVIE):
|
class CMTIE(MTVIE):
|
||||||
IE_NAME = u'cmt.com'
|
IE_NAME = u'cmt.com'
|
||||||
_VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
|
_VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
|
||||||
|
@ -25,16 +25,16 @@ class CNNIE(InfoExtractor):
|
|||||||
'upload_date': '20130609',
|
'upload_date': '20130609',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||||
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||||
"info_dict": {
|
"info_dict": {
|
||||||
"title": "Student's epic speech stuns new freshmen",
|
"title": "Student's epic speech stuns new freshmen",
|
||||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||||
"upload_date": "20130821",
|
"upload_date": "20130821",
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
@ -79,7 +79,7 @@ class CNNIE(InfoExtractor):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
thumbnails = sorted([((int(t.attrib['height']), int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
|
||||||
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
|
||||||
|
|
||||||
metas_el = info.find('metas')
|
metas_el = info.find('metas')
|
||||||
|
@ -21,35 +21,35 @@ class CollegeHumorIE(InfoExtractor):
|
|||||||
'age_limit': 13,
|
'age_limit': 13,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
|
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
|
||||||
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
|
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3505939',
|
'id': '3505939',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Font Conference',
|
'title': 'Font Conference',
|
||||||
'description': 'This video wasn\'t long enough,',
|
'description': 'This video wasn\'t long enough,',
|
||||||
'age_limit': 10,
|
'age_limit': 10,
|
||||||
'duration': 179,
|
'duration': 179,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
# embedded youtube video
|
||||||
# embedded youtube video
|
{
|
||||||
{
|
'url': 'http://www.collegehumor.com/embed/6950457',
|
||||||
'url': 'http://www.collegehumor.com/embed/6950457',
|
'info_dict': {
|
||||||
'info_dict': {
|
'id': 'W5gMp3ZjYg4',
|
||||||
'id': 'W5gMp3ZjYg4',
|
'ext': 'mp4',
|
||||||
'ext': 'mp4',
|
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
||||||
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
'uploader': 'Funnyplox TV',
|
||||||
'uploader': 'Funnyplox TV',
|
'uploader_id': 'funnyploxtv',
|
||||||
'uploader_id': 'funnyploxtv',
|
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
|
||||||
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
|
'upload_date': '20140128',
|
||||||
'upload_date': '20140128',
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'add_ie': ['Youtube'],
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -122,7 +122,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
epTitle = mobj.group('episode')
|
epTitle = mobj.group('episode')
|
||||||
|
|
||||||
self.report_extraction(epTitle)
|
self.report_extraction(epTitle)
|
||||||
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
|
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||||
if dlNewest:
|
if dlNewest:
|
||||||
url = htmlHandle.geturl()
|
url = htmlHandle.geturl()
|
||||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||||
@ -148,13 +148,13 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
uri = mMovieParams[0][1]
|
uri = mMovieParams[0][1]
|
||||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||||
idoc = self._download_xml(indexUrl, epTitle,
|
idoc = self._download_xml(indexUrl, epTitle,
|
||||||
'Downloading show index',
|
'Downloading show index',
|
||||||
'unable to download episode index')
|
'unable to download episode index')
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
itemEls = idoc.findall('.//item')
|
itemEls = idoc.findall('.//item')
|
||||||
for partNum,itemEl in enumerate(itemEls):
|
for partNum, itemEl in enumerate(itemEls):
|
||||||
mediaId = itemEl.findall('./guid')[0].text
|
mediaId = itemEl.findall('./guid')[0].text
|
||||||
shortMediaId = mediaId.split(':')[-1]
|
shortMediaId = mediaId.split(':')[-1]
|
||||||
showId = mediaId.split(':')[-2].replace('.com', '')
|
showId = mediaId.split(':')[-2].replace('.com', '')
|
||||||
@ -162,9 +162,9 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||||
|
|
||||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||||
cdoc = self._download_xml(configUrl, epTitle,
|
cdoc = self._download_xml(configUrl, epTitle,
|
||||||
'Downloading configuration for %s' % shortMediaId)
|
'Downloading configuration for %s' % shortMediaId)
|
||||||
|
|
||||||
turls = []
|
turls = []
|
||||||
for rendition in cdoc.findall('.//rendition'):
|
for rendition in cdoc.findall('.//rendition'):
|
||||||
@ -186,7 +186,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
'width': w,
|
'width': w,
|
||||||
})
|
})
|
||||||
|
|
||||||
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1)
|
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum + 1)
|
||||||
results.append({
|
results.append({
|
||||||
'id': shortMediaId,
|
'id': shortMediaId,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
@ -25,6 +25,7 @@ _NO_DEFAULT = object()
|
|||||||
|
|
||||||
|
|
||||||
class InfoExtractor(object):
|
class InfoExtractor(object):
|
||||||
|
|
||||||
"""Information Extractor class.
|
"""Information Extractor class.
|
||||||
|
|
||||||
Information extractors are the classes that, given a URL, extract
|
Information extractors are the classes that, given a URL, extract
|
||||||
@ -306,17 +307,18 @@ class InfoExtractor(object):
|
|||||||
"""Report attempt to log in."""
|
"""Report attempt to log in."""
|
||||||
self.to_screen(u'Logging in')
|
self.to_screen(u'Logging in')
|
||||||
|
|
||||||
#Methods for following #608
|
# Methods for following #608
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def url_result(url, ie=None, video_id=None):
|
def url_result(url, ie=None, video_id=None):
|
||||||
"""Returns a url that points to a page that should be processed"""
|
"""Returns a url that points to a page that should be processed"""
|
||||||
#TODO: ie should be the class used for getting the info
|
# TODO: ie should be the class used for getting the info
|
||||||
video_info = {'_type': 'url',
|
video_info = {'_type': 'url',
|
||||||
'url': url,
|
'url': url,
|
||||||
'ie_key': ie}
|
'ie_key': ie}
|
||||||
if video_id is not None:
|
if video_id is not None:
|
||||||
video_info['id'] = video_id
|
video_info['id'] = video_id
|
||||||
return video_info
|
return video_info
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
def playlist_result(entries, playlist_id=None, playlist_title=None):
|
||||||
"""Returns a playlist"""
|
"""Returns a playlist"""
|
||||||
@ -340,7 +342,8 @@ class InfoExtractor(object):
|
|||||||
else:
|
else:
|
||||||
for p in pattern:
|
for p in pattern:
|
||||||
mobj = re.search(p, string, flags)
|
mobj = re.search(p, string, flags)
|
||||||
if mobj: break
|
if mobj:
|
||||||
|
break
|
||||||
|
|
||||||
if os.name != 'nt' and sys.stderr.isatty():
|
if os.name != 'nt' and sys.stderr.isatty():
|
||||||
_name = u'\033[0;34m%s\033[0m' % name
|
_name = u'\033[0;34m%s\033[0m' % name
|
||||||
@ -356,7 +359,7 @@ class InfoExtractor(object):
|
|||||||
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
raise RegexNotFoundError(u'Unable to extract %s' % _name)
|
||||||
else:
|
else:
|
||||||
self._downloader.report_warning(u'unable to extract %s; '
|
self._downloader.report_warning(u'unable to extract %s; '
|
||||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||||
@ -396,7 +399,7 @@ class InfoExtractor(object):
|
|||||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||||
except (IOError, netrc.NetrcParseError) as err:
|
except (IOError, netrc.NetrcParseError) as err:
|
||||||
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
|
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
|
||||||
|
|
||||||
return (username, password)
|
return (username, password)
|
||||||
|
|
||||||
# Helper functions for extracting OpenGraph info
|
# Helper functions for extracting OpenGraph info
|
||||||
@ -429,7 +432,8 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
|
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
|
||||||
regexes = self._og_regexes('video')
|
regexes = self._og_regexes('video')
|
||||||
if secure: regexes = self._og_regexes('video:secure_url') + regexes
|
if secure:
|
||||||
|
regexes = self._og_regexes('video:secure_url') + regexes
|
||||||
return self._html_search_regex(regexes, html, name, **kargs)
|
return self._html_search_regex(regexes, html, name, **kargs)
|
||||||
|
|
||||||
def _html_search_meta(self, name, html, display_name=None):
|
def _html_search_meta(self, name, html, display_name=None):
|
||||||
@ -470,7 +474,7 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
def _twitter_search_player(self, html):
|
def _twitter_search_player(self, html):
|
||||||
return self._html_search_meta('twitter:player', html,
|
return self._html_search_meta('twitter:player', html,
|
||||||
'twitter card player')
|
'twitter card player')
|
||||||
|
|
||||||
def _sort_formats(self, formats):
|
def _sort_formats(self, formats):
|
||||||
if not formats:
|
if not formats:
|
||||||
@ -530,6 +534,7 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Base class for paged search queries extractors.
|
Base class for paged search queries extractors.
|
||||||
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class CondeNastIE(InfoExtractor):
|
class CondeNastIE(InfoExtractor):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Condé Nast is a media group, some of its sites use a custom HTML5 player
|
Condé Nast is a media group, some of its sites use a custom HTML5 player
|
||||||
that works the same in all of them.
|
that works the same in all of them.
|
||||||
|
@ -5,6 +5,7 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
from ..utils import determine_ext
|
||||||
|
|
||||||
|
|
||||||
class CriterionIE(InfoExtractor):
|
class CriterionIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www\.criterion\.com/films/(\d*)-.+'
|
_VALID_URL = r'https?://www\.criterion\.com/films/(\d*)-.+'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
@ -13,7 +14,7 @@ class CriterionIE(InfoExtractor):
|
|||||||
u'md5': u'bc51beba55685509883a9a7830919ec3',
|
u'md5': u'bc51beba55685509883a9a7830919ec3',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Le Samouraï",
|
u"title": u"Le Samouraï",
|
||||||
u"description" : u'md5:a2b4b116326558149bef81f76dcbb93f',
|
u"description": u'md5:a2b4b116326558149bef81f76dcbb93f',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -23,16 +24,16 @@ class CriterionIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
final_url = self._search_regex(r'so.addVariable\("videoURL", "(.+?)"\)\;',
|
final_url = self._search_regex(r'so.addVariable\("videoURL", "(.+?)"\)\;',
|
||||||
webpage, 'video url')
|
webpage, 'video url')
|
||||||
title = self._html_search_regex(r'<meta content="(.+?)" property="og:title" />',
|
title = self._html_search_regex(r'<meta content="(.+?)" property="og:title" />',
|
||||||
webpage, 'video title')
|
webpage, 'video title')
|
||||||
description = self._html_search_regex(r'<meta name="description" content="(.+?)" />',
|
description = self._html_search_regex(r'<meta name="description" content="(.+?)" />',
|
||||||
webpage, 'video description')
|
webpage, 'video description')
|
||||||
thumbnail = self._search_regex(r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
|
thumbnail = self._search_regex(r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
|
||||||
webpage, 'thumbnail url')
|
webpage, 'thumbnail url')
|
||||||
|
|
||||||
return {'id': video_id,
|
return {'id': video_id,
|
||||||
'url' : final_url,
|
'url': final_url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'ext': determine_ext(final_url),
|
'ext': determine_ext(final_url),
|
||||||
'description': description,
|
'description': description,
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re, base64, zlib
|
import re
|
||||||
|
import base64
|
||||||
|
import zlib
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from math import pow, sqrt, floor
|
from math import pow, sqrt, floor
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -19,6 +21,7 @@ from ..aes import (
|
|||||||
inc,
|
inc,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollIE(InfoExtractor):
|
class CrunchyrollIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
_VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@ -68,10 +71,12 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
|
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
|
||||||
# Extend 160 Bit hash to 256 Bit
|
# Extend 160 Bit hash to 256 Bit
|
||||||
return shaHash + [0] * 12
|
return shaHash + [0] * 12
|
||||||
|
|
||||||
key = obfuscate_key(id)
|
key = obfuscate_key(id)
|
||||||
|
|
||||||
class Counter:
|
class Counter:
|
||||||
__value = iv
|
__value = iv
|
||||||
|
|
||||||
def next_value(self):
|
def next_value(self):
|
||||||
temp = self.__value
|
temp = self.__value
|
||||||
self.__value = inc(self.__value)
|
self.__value = inc(self.__value)
|
||||||
@ -80,7 +85,7 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
return zlib.decompress(decrypted_data)
|
return zlib.decompress(decrypted_data)
|
||||||
|
|
||||||
def _convert_subtitles_to_srt(self, subtitles):
|
def _convert_subtitles_to_srt(self, subtitles):
|
||||||
i=1
|
i = 1
|
||||||
output = ''
|
output = ''
|
||||||
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
|
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
|
||||||
start = start.replace('.', ',')
|
start = start.replace('.', ',')
|
||||||
@ -90,10 +95,10 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
if not text:
|
if not text:
|
||||||
continue
|
continue
|
||||||
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||||
i+=1
|
i += 1
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def _real_extract(self,url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('video_id')
|
video_id = mobj.group('video_id')
|
||||||
|
|
||||||
@ -123,25 +128,25 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
||||||
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
||||||
|
|
||||||
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
|
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
|
||||||
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
|
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
|
||||||
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
stream_quality, stream_format = self._FORMAT_IDS[fmt]
|
||||||
video_format = fmt+'p'
|
video_format = fmt + 'p'
|
||||||
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
|
||||||
# urlencode doesn't work!
|
# urlencode doesn't work!
|
||||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
|
||||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||||
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
|
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for ' + video_format)
|
||||||
video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
|
video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
|
||||||
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
|
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'play_path': video_play_path,
|
'play_path': video_play_path,
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'format': video_format,
|
'format': video_format,
|
||||||
'format_id': video_format,
|
'format_id': video_format,
|
||||||
@ -149,8 +154,8 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||||
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
|
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||||
video_id, note='Downloading subtitles for '+sub_name)
|
video_id, note='Downloading subtitles for ' + sub_name)
|
||||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||||
@ -167,12 +172,12 @@ class CrunchyrollIE(InfoExtractor):
|
|||||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'description': video_description,
|
'description': video_description,
|
||||||
'thumbnail': video_thumbnail,
|
'thumbnail': video_thumbnail,
|
||||||
'uploader': video_uploader,
|
'uploader': video_uploader,
|
||||||
'upload_date': video_upload_date,
|
'upload_date': video_upload_date,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ class CSpanIE(InfoExtractor):
|
|||||||
url = unescapeHTML(data['video']['files'][0]['path']['#text'])
|
url = unescapeHTML(data['video']['files'][0]['path']['#text'])
|
||||||
|
|
||||||
doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
def find_string(s):
|
def find_string(s):
|
||||||
return find_xpath_attr(doc, './/string', 'name', s).text
|
return find_xpath_attr(doc, './/string', 'name', s).text
|
||||||
|
@ -16,7 +16,9 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class DailymotionBaseInfoExtractor(InfoExtractor):
|
class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _build_request(url):
|
def _build_request(url):
|
||||||
"""Build a request with the family filter disabled"""
|
"""Build a request with the family filter disabled"""
|
||||||
@ -25,7 +27,9 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
|||||||
request.add_header('Cookie', 'ff=off')
|
request.add_header('Cookie', 'ff=off')
|
||||||
return request
|
return request
|
||||||
|
|
||||||
|
|
||||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||||
|
|
||||||
"""Information Extractor for Dailymotion"""
|
"""Information Extractor for Dailymotion"""
|
||||||
|
|
||||||
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
|
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
|
||||||
@ -45,7 +49,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
u'file': u'x33vw9.mp4',
|
u'file': u'x33vw9.mp4',
|
||||||
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
|
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"uploader": u"Amphora Alex and Van .",
|
u"uploader": u"Amphora Alex and Van .",
|
||||||
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
|
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -112,7 +116,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
embed_page = self._download_webpage(embed_url, video_id,
|
embed_page = self._download_webpage(embed_url, video_id,
|
||||||
u'Downloading embed page')
|
u'Downloading embed page')
|
||||||
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
|
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
|
||||||
'video info', flags=re.MULTILINE)
|
'video info', flags=re.MULTILINE)
|
||||||
info = json.loads(info)
|
info = json.loads(info)
|
||||||
if info.get('error') is not None:
|
if info.get('error') is not None:
|
||||||
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
|
||||||
@ -149,12 +153,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
view_count = str_to_int(view_count)
|
view_count = str_to_int(view_count)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'uploader': info['owner_screenname'],
|
'uploader': info['owner_screenname'],
|
||||||
'upload_date': video_upload_date,
|
'upload_date': video_upload_date,
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
'subtitles': video_subtitles,
|
'subtitles': video_subtitles,
|
||||||
'thumbnail': info['thumbnail_url'],
|
'thumbnail': info['thumbnail_url'],
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
@ -195,7 +199,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
|||||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||||
break
|
break
|
||||||
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||||
for video_id in orderedSet(video_ids)]
|
for video_id in orderedSet(video_ids)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
@ -9,7 +9,7 @@ from .common import InfoExtractor
|
|||||||
class DefenseGouvFrIE(InfoExtractor):
|
class DefenseGouvFrIE(InfoExtractor):
|
||||||
IE_NAME = 'defense.gouv.fr'
|
IE_NAME = 'defense.gouv.fr'
|
||||||
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
_VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/'
|
||||||
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
r'ligthboxvideo/base-de-medias/webtv/(.*)')
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
||||||
@ -26,13 +26,13 @@ class DefenseGouvFrIE(InfoExtractor):
|
|||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r"flashvars.pvg_id=\"(\d+)\";",
|
r"flashvars.pvg_id=\"(\d+)\";",
|
||||||
webpage, 'ID')
|
webpage, 'ID')
|
||||||
|
|
||||||
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
||||||
+ video_id)
|
+ video_id)
|
||||||
info = self._download_webpage(json_url, title,
|
info = self._download_webpage(json_url, title,
|
||||||
'Downloading JSON config')
|
'Downloading JSON config')
|
||||||
video_url = json.loads(info)['renditions'][0]['url']
|
video_url = json.loads(info)['renditions'][0]['url']
|
||||||
|
|
||||||
return {'id': video_id,
|
return {'id': video_id,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
|
@ -15,6 +15,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class DepositFilesIE(InfoExtractor):
|
class DepositFilesIE(InfoExtractor):
|
||||||
|
|
||||||
"""Information extractor for depositfiles.com"""
|
"""Information extractor for depositfiles.com"""
|
||||||
|
|
||||||
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
||||||
@ -25,7 +26,7 @@ class DepositFilesIE(InfoExtractor):
|
|||||||
url = 'http://depositfiles.com/en/files/' + file_id
|
url = 'http://depositfiles.com/en/files/' + file_id
|
||||||
|
|
||||||
# Retrieve file webpage with 'Free download' button pressed
|
# Retrieve file webpage with 'Free download' button pressed
|
||||||
free_download_indication = {'gateway_result' : '1'}
|
free_download_indication = {'gateway_result': '1'}
|
||||||
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
||||||
try:
|
try:
|
||||||
self.report_download_webpage(file_id)
|
self.report_download_webpage(file_id)
|
||||||
@ -51,10 +52,10 @@ class DepositFilesIE(InfoExtractor):
|
|||||||
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
'id': file_id.decode('utf-8'),
|
'id': file_id.decode('utf-8'),
|
||||||
'url': file_url.decode('utf-8'),
|
'url': file_url.decode('utf-8'),
|
||||||
'uploader': None,
|
'uploader': None,
|
||||||
'upload_date': None,
|
'upload_date': None,
|
||||||
'title': file_title,
|
'title': file_title,
|
||||||
'ext': file_extension.decode('utf-8'),
|
'ext': file_extension.decode('utf-8'),
|
||||||
}]
|
}]
|
||||||
|
@ -28,7 +28,7 @@ class DiscoveryIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
|
video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
|
||||||
webpage, 'video list', flags=re.DOTALL)
|
webpage, 'video list', flags=re.DOTALL)
|
||||||
video_list = json.loads(video_list_json)
|
video_list = json.loads(video_list_json)
|
||||||
info = video_list['clips'][0]
|
info = video_list['clips'][0]
|
||||||
formats = []
|
formats = []
|
||||||
|
@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||||
info = self._download_json(info_url, video_id)
|
info = self._download_json(info_url, video_id)
|
||||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -17,13 +17,12 @@ class DreiSatIE(InfoExtractor):
|
|||||||
u'md5': u'9dcfe344732808dbfcc901537973c922',
|
u'md5': u'9dcfe344732808dbfcc901537973c922',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Kaffeeland Schweiz",
|
u"title": u"Kaffeeland Schweiz",
|
||||||
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
||||||
u"uploader": u"3sat",
|
u"uploader": u"3sat",
|
||||||
u"upload_date": u"20130622"
|
u"upload_date": u"20130622"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
@ -17,7 +17,7 @@ class EHowIE(InfoExtractor):
|
|||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Hardwood Flooring Basics",
|
u"title": u"Hardwood Flooring Basics",
|
||||||
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
||||||
u"uploader": u"Erick Nathan"
|
u"uploader": u"Erick Nathan"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -26,21 +26,20 @@ class EHowIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
||||||
webpage, u'video URL')
|
webpage, u'video URL')
|
||||||
final_url = compat_urllib_parse.unquote(video_url)
|
final_url = compat_urllib_parse.unquote(video_url)
|
||||||
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
||||||
webpage, u'uploader')
|
webpage, u'uploader')
|
||||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||||
ext = determine_ext(final_url)
|
ext = determine_ext(final_url)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'video',
|
'_type': 'video',
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': final_url,
|
'url': final_url,
|
||||||
'ext': ext,
|
'ext': ext,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,6 @@ class EightTracksIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
@ -102,8 +101,8 @@ class EightTracksIE(InfoExtractor):
|
|||||||
res = []
|
res = []
|
||||||
for i in range(track_count):
|
for i in range(track_count):
|
||||||
api_json = self._download_webpage(next_url, playlist_id,
|
api_json = self._download_webpage(next_url, playlist_id,
|
||||||
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
|
note=u'Downloading song information %s/%s' % (str(i + 1), track_count),
|
||||||
errnote=u'Failed to download song information')
|
errnote=u'Failed to download song information')
|
||||||
api_data = json.loads(api_json)
|
api_data = json.loads(api_json)
|
||||||
track_data = api_data[u'set']['track']
|
track_data = api_data[u'set']['track']
|
||||||
info = {
|
info = {
|
||||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ExtremeTubeIE(InfoExtractor):
|
class ExtremeTubeIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
|
@ -15,6 +15,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FacebookIE(InfoExtractor):
|
class FacebookIE(InfoExtractor):
|
||||||
|
|
||||||
"""Information Extractor for Facebook"""
|
"""Information Extractor for Facebook"""
|
||||||
|
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
@ -50,7 +51,7 @@ class FacebookIE(InfoExtractor):
|
|||||||
login_page_req.add_header('Cookie', 'locale=en_US')
|
login_page_req.add_header('Cookie', 'locale=en_US')
|
||||||
self.report_login()
|
self.report_login()
|
||||||
login_page = self._download_webpage(login_page_req, None, note=False,
|
login_page = self._download_webpage(login_page_req, None, note=False,
|
||||||
errnote=u'Unable to download login page')
|
errnote=u'Unable to download login page')
|
||||||
lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
|
lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
|
||||||
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
|
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
|
||||||
|
|
||||||
@ -64,7 +65,7 @@ class FacebookIE(InfoExtractor):
|
|||||||
'legacy_return': '1',
|
'legacy_return': '1',
|
||||||
'timezone': '-60',
|
'timezone': '-60',
|
||||||
'trynum': '1',
|
'trynum': '1',
|
||||||
}
|
}
|
||||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
try:
|
try:
|
||||||
|
@ -26,9 +26,9 @@ class FazIE(InfoExtractor):
|
|||||||
self.to_screen(video_id)
|
self.to_screen(video_id)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
||||||
u'config xml url')
|
u'config xml url')
|
||||||
config = self._download_xml(config_xml_url, video_id,
|
config = self._download_xml(config_xml_url, video_id,
|
||||||
u'Downloading config xml')
|
u'Downloading config xml')
|
||||||
|
|
||||||
encodings = config.find('ENCODINGS')
|
encodings = config.find('ENCODINGS')
|
||||||
formats = []
|
formats = []
|
||||||
|
@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):
|
|||||||
duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
|
duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
|
||||||
|
|
||||||
like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
|
like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
|
||||||
webpage, 'like count', fatal=False)
|
webpage, 'like count', fatal=False)
|
||||||
dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
|
dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
|
||||||
webpage, 'dislike count', fatal=False)
|
webpage, 'dislike count', fatal=False)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
@ -57,4 +57,4 @@ class FirstTVIE(InfoExtractor):
|
|||||||
'duration': int_or_none(duration),
|
'duration': int_or_none(duration),
|
||||||
'like_count': int_or_none(like_count),
|
'like_count': int_or_none(like_count),
|
||||||
'dislike_count': int_or_none(dislike_count),
|
'dislike_count': int_or_none(dislike_count),
|
||||||
}
|
}
|
||||||
|
@ -30,9 +30,9 @@ class FKTVIE(InfoExtractor):
|
|||||||
server = random.randint(2, 4)
|
server = random.randint(2, 4)
|
||||||
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
|
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
|
||||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||||
episode)
|
episode)
|
||||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||||
u'playlist', flags=re.DOTALL)
|
u'playlist', flags=re.DOTALL)
|
||||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||||
# TODO: return a single multipart video
|
# TODO: return a single multipart video
|
||||||
videos = []
|
videos = []
|
||||||
@ -71,8 +71,8 @@ class FKTVPosteckeIE(InfoExtractor):
|
|||||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||||
video_title = 'Postecke %d' % episode
|
video_title = 'Postecke %d' % episode
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': determine_ext(video_url),
|
'ext': determine_ext(video_url),
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FlickrIE(InfoExtractor):
|
class FlickrIE(InfoExtractor):
|
||||||
|
|
||||||
"""Information Extractor for Flickr videos"""
|
"""Information Extractor for Flickr videos"""
|
||||||
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
@ -17,8 +18,8 @@ class FlickrIE(InfoExtractor):
|
|||||||
'file': '5645318632.mp4',
|
'file': '5645318632.mp4',
|
||||||
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||||
"uploader_id": "forestwander-nature-pictures",
|
"uploader_id": "forestwander-nature-pictures",
|
||||||
"title": "Dark Hollow Waterfalls"
|
"title": "Dark Hollow Waterfalls"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -37,7 +38,7 @@ class FlickrIE(InfoExtractor):
|
|||||||
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
||||||
|
|
||||||
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
||||||
first_xml, 'node_id')
|
first_xml, 'node_id')
|
||||||
|
|
||||||
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
||||||
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
||||||
@ -50,11 +51,11 @@ class FlickrIE(InfoExtractor):
|
|||||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'uploader_id': video_uploader_id,
|
'uploader_id': video_uploader_id,
|
||||||
}]
|
}]
|
||||||
|
@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):
|
|||||||
description = self._html_search_meta('description', webpage, 'description')
|
description = self._html_search_meta('description', webpage, 'description')
|
||||||
if description:
|
if description:
|
||||||
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
if upload_date:
|
if upload_date:
|
||||||
upload_date = unified_strdate(upload_date)
|
upload_date = unified_strdate(upload_date)
|
||||||
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
||||||
@ -65,9 +65,9 @@ class FourTubeIE(InfoExtractor):
|
|||||||
|
|
||||||
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
||||||
headers = {
|
headers = {
|
||||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
b'Origin': b'http://www.4tube.com',
|
b'Origin': b'http://www.4tube.com',
|
||||||
}
|
}
|
||||||
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
||||||
tokens = self._download_json(token_req, video_id)
|
tokens = self._download_json(token_req, video_id)
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ class FourTubeIE(InfoExtractor):
|
|||||||
'format_id': format + 'p',
|
'format_id': format + 'p',
|
||||||
'resolution': format + 'p',
|
'resolution': format + 'p',
|
||||||
'quality': int(format),
|
'quality': int(format),
|
||||||
} for format in sources]
|
} for format in sources]
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
@ -92,4 +92,4 @@ class FourTubeIE(InfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'webpage_url': webpage_url,
|
'webpage_url': webpage_url,
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
def _extract_video(self, video_id):
|
def _extract_video(self, video_id):
|
||||||
info = self._download_xml(
|
info = self._download_xml(
|
||||||
'http://www.francetvinfo.fr/appftv/webservices/video/'
|
'http://www.francetvinfo.fr/appftv/webservices/video/'
|
||||||
@ -194,7 +195,7 @@ class GenerationQuoiIE(InfoExtractor):
|
|||||||
info_json = self._download_webpage(info_url, name)
|
info_json = self._download_webpage(info_url, name)
|
||||||
info = json.loads(info_json)
|
info = json.loads(info_json)
|
||||||
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
return self.url_result('http://www.dailymotion.com/video/%s' % info['id'],
|
||||||
ie='Dailymotion')
|
ie='Dailymotion')
|
||||||
|
|
||||||
|
|
||||||
class CultureboxIE(FranceTVBaseInfoExtractor):
|
class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||||
|
@ -9,7 +9,7 @@ class GamekingsIE(InfoExtractor):
|
|||||||
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
|
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
|
||||||
u'file': u'20130811.mp4',
|
u'file': u'20130811.mp4',
|
||||||
# MD5 is flaky, seems to change regularly
|
# MD5 is flaky, seems to change regularly
|
||||||
#u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
# u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
|
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
|
||||||
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
|
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
|
||||||
|
@ -116,14 +116,16 @@ class GenericIE(InfoExtractor):
|
|||||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||||
|
|
||||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Subclass the HTTPRedirectHandler to make it use our
|
Subclass the HTTPRedirectHandler to make it use our
|
||||||
HEADRequest also on the redirected URL
|
HEADRequest also on the redirected URL
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||||
if code in (301, 302, 303, 307):
|
if code in (301, 302, 303, 307):
|
||||||
newurl = newurl.replace(' ', '%20')
|
newurl = newurl.replace(' ', '%20')
|
||||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
newheaders = dict((k, v) for k, v in req.headers.items()
|
||||||
if k.lower() not in ("content-length", "content-type"))
|
if k.lower() not in ("content-length", "content-type"))
|
||||||
return HEADRequest(newurl,
|
return HEADRequest(newurl,
|
||||||
headers=newheaders,
|
headers=newheaders,
|
||||||
@ -133,19 +135,21 @@ class GenericIE(InfoExtractor):
|
|||||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||||
|
|
||||||
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def http_error_405(self, req, fp, code, msg, headers):
|
def http_error_405(self, req, fp, code, msg, headers):
|
||||||
fp.read()
|
fp.read()
|
||||||
fp.close()
|
fp.close()
|
||||||
|
|
||||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
newheaders = dict((k, v) for k, v in req.headers.items()
|
||||||
if k.lower() not in ("content-length", "content-type"))
|
if k.lower() not in ("content-length", "content-type"))
|
||||||
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
||||||
headers=newheaders,
|
headers=newheaders,
|
||||||
origin_req_host=req.get_origin_req_host(),
|
origin_req_host=req.get_origin_req_host(),
|
||||||
unverifiable=True))
|
unverifiable=True))
|
||||||
|
|
||||||
# Build our opener
|
# Build our opener
|
||||||
opener = compat_urllib_request.OpenerDirector()
|
opener = compat_urllib_request.OpenerDirector()
|
||||||
@ -301,7 +305,7 @@ class GenericIE(InfoExtractor):
|
|||||||
# Look for embedded blip.tv player
|
# Look for embedded blip.tv player
|
||||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV')
|
return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
|
||||||
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
|
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9]+)', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
return self.url_result(mobj.group(1), 'BlipTV')
|
return self.url_result(mobj.group(1), 'BlipTV')
|
||||||
|
@ -48,17 +48,17 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
|
|
||||||
# Extract uploader
|
# Extract uploader
|
||||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||||
webpage, 'uploader', fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
|
|
||||||
# Extract title
|
# Extract title
|
||||||
# Get the first line for title
|
# Get the first line for title
|
||||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||||
webpage, 'title', default='NA')
|
webpage, 'title', default='NA')
|
||||||
|
|
||||||
# Step 2, Simulate clicking the image box to launch video
|
# Step 2, Simulate clicking the image box to launch video
|
||||||
DOMAIN = 'https://plus.google.com/'
|
DOMAIN = 'https://plus.google.com/'
|
||||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||||
webpage, 'video page URL')
|
webpage, 'video page URL')
|
||||||
if not video_page.startswith(DOMAIN):
|
if not video_page.startswith(DOMAIN):
|
||||||
video_page = DOMAIN + video_page
|
video_page = DOMAIN + video_page
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
# Treat escaped \u0026 style hex
|
# Treat escaped \u0026 style hex
|
||||||
try:
|
try:
|
||||||
video_url = video_url.decode("unicode_escape")
|
video_url = video_url.decode("unicode_escape")
|
||||||
except AttributeError: # Python 3
|
except AttributeError: # Python 3
|
||||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -6,6 +6,7 @@ import json
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
from ..utils import determine_ext
|
||||||
|
|
||||||
|
|
||||||
class HarkIE(InfoExtractor):
|
class HarkIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
|
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
@ -22,13 +23,13 @@ class HarkIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
|
json_url = "http://www.hark.com/clips/%s.json" % (video_id)
|
||||||
info_json = self._download_webpage(json_url, video_id)
|
info_json = self._download_webpage(json_url, video_id)
|
||||||
info = json.loads(info_json)
|
info = json.loads(info_json)
|
||||||
final_url = info['url']
|
final_url = info['url']
|
||||||
|
|
||||||
return {'id': video_id,
|
return {'id': video_id,
|
||||||
'url' : final_url,
|
'url': final_url,
|
||||||
'title': info['name'],
|
'title': info['name'],
|
||||||
'ext': determine_ext(final_url),
|
'ext': determine_ext(final_url),
|
||||||
'description': info['description'],
|
'description': info['description'],
|
||||||
|
@ -13,7 +13,7 @@ class HowcastIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '390161',
|
'id': '390161',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
||||||
'title': 'How to Tie a Square Knot Properly',
|
'title': 'How to Tie a Square Knot Properly',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):
|
|||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
|
|
||||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||||
webpage, 'video URL')
|
webpage, 'video URL')
|
||||||
|
|
||||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||||
webpage, 'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -13,6 +13,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class HypemIE(InfoExtractor):
|
class HypemIE(InfoExtractor):
|
||||||
|
|
||||||
"""Information Extractor for hypem"""
|
"""Information Extractor for hypem"""
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
@ -40,7 +41,7 @@ class HypemIE(InfoExtractor):
|
|||||||
self.report_extraction(track_id)
|
self.report_extraction(track_id)
|
||||||
|
|
||||||
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
||||||
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
response, u'tracks', flags=re.MULTILINE | re.DOTALL).strip()
|
||||||
try:
|
try:
|
||||||
track_list = json.loads(html_tracks)
|
track_list = json.loads(html_tracks)
|
||||||
track = track_list[u'tracks'][0]
|
track = track_list[u'tracks'][0]
|
||||||
@ -53,7 +54,7 @@ class HypemIE(InfoExtractor):
|
|||||||
title = track[u"song"]
|
title = track[u"song"]
|
||||||
|
|
||||||
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
||||||
request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
|
request = compat_urllib_request.Request(serve_url, "", {'Content-Type': 'application/json'})
|
||||||
request.add_header('cookie', cookie)
|
request.add_header('cookie', cookie)
|
||||||
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
||||||
try:
|
try:
|
||||||
@ -63,9 +64,9 @@ class HypemIE(InfoExtractor):
|
|||||||
final_url = song_data[u"url"]
|
final_url = song_data[u"url"]
|
||||||
|
|
||||||
return [{
|
return [{
|
||||||
'id': track_id,
|
'id': track_id,
|
||||||
'url': final_url,
|
'url': final_url,
|
||||||
'ext': "mp3",
|
'ext': "mp3",
|
||||||
'title': title,
|
'title': title,
|
||||||
'artist': artist,
|
'artist': artist,
|
||||||
}]
|
}]
|
||||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class IGNIE(InfoExtractor):
|
class IGNIE(InfoExtractor):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
|
Extractor for some of the IGN sites, like www.ign.com, es.ign.com de.ign.com.
|
||||||
Some videos of it.ign.com are also supported
|
Some videos of it.ign.com are also supported
|
||||||
@ -101,6 +102,7 @@ class IGNIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class OneUPIE(IGNIE):
|
class OneUPIE(IGNIE):
|
||||||
|
|
||||||
"""Extractor for 1up.com, it uses the ign videos system."""
|
"""Extractor for 1up.com, it uses the ign videos system."""
|
||||||
|
|
||||||
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
||||||
|
@ -63,7 +63,7 @@ class ImdbListIE(InfoExtractor):
|
|||||||
IE_NAME = 'imdb:list'
|
IE_NAME = 'imdb:list'
|
||||||
IE_DESC = 'Internet Movie Database lists'
|
IE_DESC = 'Internet Movie Database lists'
|
||||||
_VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
|
_VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
list_id = mobj.group('id')
|
list_id = mobj.group('id')
|
||||||
|
@ -37,11 +37,11 @@ class InfoQIE(InfoExtractor):
|
|||||||
|
|
||||||
# Extract title
|
# Extract title
|
||||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
|
|
||||||
# Extract description
|
# Extract description
|
||||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||||
webpage, 'description', fatal=False)
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
video_filename = video_url.split('/')[-1]
|
video_filename = video_url.split('/')[-1]
|
||||||
video_id, extension = video_filename.split('.')
|
video_id, extension = video_filename.split('.')
|
||||||
|
@ -24,9 +24,9 @@ class InstagramIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
||||||
webpage, 'uploader id', fatal=False)
|
webpage, 'uploader id', fatal=False)
|
||||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
|
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -29,7 +29,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
def _clean_query(query):
|
def _clean_query(query):
|
||||||
NEEDED_ARGS = ['publishedid', 'customerid']
|
NEEDED_ARGS = ['publishedid', 'customerid']
|
||||||
query_dic = compat_urlparse.parse_qs(query)
|
query_dic = compat_urlparse.parse_qs(query)
|
||||||
cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS)
|
cleaned_dic = dict((k, v[0]) for (k, v) in query_dic.items() if k in NEEDED_ARGS)
|
||||||
# Other player ids return m3u8 urls
|
# Other player ids return m3u8 urls
|
||||||
cleaned_dic['playerid'] = '247'
|
cleaned_dic['playerid'] = '247'
|
||||||
cleaned_dic['videokbrate'] = '100000'
|
cleaned_dic['videokbrate'] = '100000'
|
||||||
@ -42,22 +42,22 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
url = self._build_url(query)
|
url = self._build_url(query)
|
||||||
|
|
||||||
flashconfiguration = self._download_xml(url, video_id,
|
flashconfiguration = self._download_xml(url, video_id,
|
||||||
u'Downloading flash configuration')
|
u'Downloading flash configuration')
|
||||||
file_url = flashconfiguration.find('file').text
|
file_url = flashconfiguration.find('file').text
|
||||||
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
||||||
# Replace some of the parameters in the query to get the best quality
|
# Replace some of the parameters in the query to get the best quality
|
||||||
# and http links (no m3u8 manifests)
|
# and http links (no m3u8 manifests)
|
||||||
file_url = re.sub(r'(?<=\?)(.+)$',
|
file_url = re.sub(r'(?<=\?)(.+)$',
|
||||||
lambda m: self._clean_query(m.group()),
|
lambda m: self._clean_query(m.group()),
|
||||||
file_url)
|
file_url)
|
||||||
info = self._download_xml(file_url, video_id,
|
info = self._download_xml(file_url, video_id,
|
||||||
u'Downloading video info')
|
u'Downloading video info')
|
||||||
item = info.find('channel/item')
|
item = info.find('channel/item')
|
||||||
|
|
||||||
def _bp(p):
|
def _bp(p):
|
||||||
return xpath_with_ns(p,
|
return xpath_with_ns(p,
|
||||||
{'media': 'http://search.yahoo.com/mrss/',
|
{'media': 'http://search.yahoo.com/mrss/',
|
||||||
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
|
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'})
|
||||||
formats = []
|
formats = []
|
||||||
for content in item.findall(_bp('media:group/media:content')):
|
for content in item.findall(_bp('media:group/media:content')):
|
||||||
attr = content.attrib
|
attr = content.attrib
|
||||||
|
@ -34,8 +34,8 @@ class IPrimaIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
player_url = 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (
|
player_url = 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (
|
||||||
floor(random()*1073741824),
|
floor(random() * 1073741824),
|
||||||
floor(random()*1073741824))
|
floor(random() * 1073741824))
|
||||||
|
|
||||||
req = compat_urllib_request.Request(player_url)
|
req = compat_urllib_request.Request(player_url)
|
||||||
req.add_header('Referer', url)
|
req.add_header('Referer', url)
|
||||||
@ -46,7 +46,7 @@ class IPrimaIE(InfoExtractor):
|
|||||||
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
|
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
|
||||||
|
|
||||||
if zoneGEO != '0':
|
if zoneGEO != '0':
|
||||||
base_url = base_url.replace('token', 'token_'+zoneGEO)
|
base_url = base_url.replace('token', 'token_' + zoneGEO)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id in ['lq', 'hq', 'hd']:
|
for format_id in ['lq', 'hq', 'hd']:
|
||||||
@ -63,13 +63,13 @@ class IPrimaIE(InfoExtractor):
|
|||||||
quality = 1
|
quality = 1
|
||||||
elif format_id == 'hd':
|
elif format_id == 'hd':
|
||||||
quality = 2
|
quality = 2
|
||||||
filename = 'hq/'+filename
|
filename = 'hq/' + filename
|
||||||
|
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'url': base_url,
|
'url': base_url,
|
||||||
'quality': quality,
|
'quality': quality,
|
||||||
'play_path': 'mp4:'+filename.replace('"', '')[:-4],
|
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
|
||||||
'rtmp_live': True,
|
'rtmp_live': True,
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
})
|
})
|
||||||
|
@ -43,7 +43,7 @@ class IviIE(InfoExtractor):
|
|||||||
'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
|
'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
|
||||||
},
|
},
|
||||||
'skip': 'Only works from Russia',
|
'skip': 'Only works from Russia',
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
# Sorted by quality
|
# Sorted by quality
|
||||||
@ -102,7 +102,7 @@ class IviIE(InfoExtractor):
|
|||||||
compilation = result['compilation']
|
compilation = result['compilation']
|
||||||
title = result['title']
|
title = result['title']
|
||||||
|
|
||||||
title = '%s - %s' % (compilation, title) if compilation is not None else title
|
title = '%s - %s' % (compilation, title) if compilation is not None else title
|
||||||
|
|
||||||
previews = result['preview']
|
previews = result['preview']
|
||||||
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
|
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
|
||||||
@ -137,17 +137,17 @@ class IviCompilationIE(InfoExtractor):
|
|||||||
compilation_id = mobj.group('compilationid')
|
compilation_id = mobj.group('compilationid')
|
||||||
season_id = mobj.group('seasonid')
|
season_id = mobj.group('seasonid')
|
||||||
|
|
||||||
if season_id is not None: # Season link
|
if season_id is not None: # Season link
|
||||||
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
|
season_page = self._download_webpage(url, compilation_id, 'Downloading season %s web page' % season_id)
|
||||||
playlist_id = '%s/season%s' % (compilation_id, season_id)
|
playlist_id = '%s/season%s' % (compilation_id, season_id)
|
||||||
playlist_title = self._html_search_meta('title', season_page, 'title')
|
playlist_title = self._html_search_meta('title', season_page, 'title')
|
||||||
entries = self._extract_entries(season_page, compilation_id)
|
entries = self._extract_entries(season_page, compilation_id)
|
||||||
else: # Compilation link
|
else: # Compilation link
|
||||||
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
|
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
|
||||||
playlist_id = compilation_id
|
playlist_id = compilation_id
|
||||||
playlist_title = self._html_search_meta('title', compilation_page, 'title')
|
playlist_title = self._html_search_meta('title', compilation_page, 'title')
|
||||||
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
|
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
|
||||||
if len(seasons) == 0: # No seasons in this compilation
|
if len(seasons) == 0: # No seasons in this compilation
|
||||||
entries = self._extract_entries(compilation_page, compilation_id)
|
entries = self._extract_entries(compilation_page, compilation_id)
|
||||||
else:
|
else:
|
||||||
entries = []
|
entries = []
|
||||||
@ -157,4 +157,4 @@ class IviCompilationIE(InfoExtractor):
|
|||||||
compilation_id, 'Downloading season %s web page' % season_id)
|
compilation_id, 'Downloading season %s web page' % season_id)
|
||||||
entries.extend(self._extract_entries(season_page, compilation_id))
|
entries.extend(self._extract_entries(season_page, compilation_id))
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id, playlist_title)
|
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||||
|
@ -45,4 +45,3 @@ class JadoreCettePubIE(InfoExtractor):
|
|||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ class JeuxVideoIE(InfoExtractor):
|
|||||||
xml_link = self._html_search_regex(
|
xml_link = self._html_search_regex(
|
||||||
r'<param name="flashvars" value="config=(.*?)" />',
|
r'<param name="flashvars" value="config=(.*?)" />',
|
||||||
webpage, 'config URL')
|
webpage, 'config URL')
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||||
xml_link, 'video ID')
|
xml_link, 'video ID')
|
||||||
@ -38,7 +38,7 @@ class JeuxVideoIE(InfoExtractor):
|
|||||||
xml_link, title, 'Downloading XML config')
|
xml_link, title, 'Downloading XML config')
|
||||||
info_json = config.find('format.json').text
|
info_json = config.find('format.json').text
|
||||||
info = json.loads(info_json)['versions'][0]
|
info = json.loads(info_json)['versions'][0]
|
||||||
|
|
||||||
video_url = 'http://video720.jeuxvideo.com/' + info['file']
|
video_url = 'http://video720.jeuxvideo.com/' + info['file']
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
|||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class JukeboxIE(InfoExtractor):
|
class JukeboxIE(InfoExtractor):
|
||||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
||||||
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
||||||
@ -37,10 +38,10 @@ class JukeboxIE(InfoExtractor):
|
|||||||
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
raise ExtractorError(u'Cannot extract video url')
|
raise ExtractorError(u'Cannot extract video url')
|
||||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/','/')
|
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/', '/')
|
||||||
self.to_screen(u'Youtube video detected')
|
self.to_screen(u'Youtube video detected')
|
||||||
return self.url_result(youtube_url,ie='Youtube')
|
return self.url_result(youtube_url, ie='Youtube')
|
||||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/','/')
|
video_url = unescapeHTML(mobj.group('video_url')).replace('\/', '/')
|
||||||
video_ext = unescapeHTML(mobj.group('video_ext'))
|
video_ext = unescapeHTML(mobj.group('video_ext'))
|
||||||
|
|
||||||
mobj = re.search(self._TITLE, html)
|
mobj = re.search(self._TITLE, html)
|
||||||
|
@ -10,6 +10,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class JustinTVIE(InfoExtractor):
|
class JustinTVIE(InfoExtractor):
|
||||||
|
|
||||||
"""Information extractor for justin.tv and twitch.tv"""
|
"""Information extractor for justin.tv and twitch.tv"""
|
||||||
# TODO: One broadcast may be split into multiple videos. The key
|
# TODO: One broadcast may be split into multiple videos. The key
|
||||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||||
@ -30,9 +31,9 @@ class JustinTVIE(InfoExtractor):
|
|||||||
u'file': u'296128360.flv',
|
u'file': u'296128360.flv',
|
||||||
u'md5': u'ecaa8a790c22a40770901460af191c9a',
|
u'md5': u'ecaa8a790c22a40770901460af191c9a',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"upload_date": u"20110927",
|
u"upload_date": u"20110927",
|
||||||
u"uploader_id": 25114803,
|
u"uploader_id": 25114803,
|
||||||
u"uploader": u"thegamedevhub",
|
u"uploader": u"thegamedevhub",
|
||||||
u"title": u"Beginner Series - Scripting With Python Pt.1"
|
u"title": u"Beginner Series - Scripting With Python Pt.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -40,7 +41,7 @@ class JustinTVIE(InfoExtractor):
|
|||||||
def report_download_page(self, channel, offset):
|
def report_download_page(self, channel, offset):
|
||||||
"""Report attempt to download a single page of videos."""
|
"""Report attempt to download a single page of videos."""
|
||||||
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
||||||
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
||||||
|
|
||||||
# Return count of items, list of *valid* items
|
# Return count of items, list of *valid* items
|
||||||
def _parse_page(self, url, video_id):
|
def _parse_page(self, url, video_id):
|
||||||
@ -49,7 +50,7 @@ class JustinTVIE(InfoExtractor):
|
|||||||
u'unable to download video info JSON')
|
u'unable to download video info JSON')
|
||||||
|
|
||||||
response = json.loads(info_json)
|
response = json.loads(info_json)
|
||||||
if type(response) != list:
|
if not isinstance(response, list):
|
||||||
error_text = response.get('error', 'unknown error')
|
error_text = response.get('error', 'unknown error')
|
||||||
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
||||||
info = []
|
info = []
|
||||||
@ -94,8 +95,8 @@ class JustinTVIE(InfoExtractor):
|
|||||||
|
|
||||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||||
doc = self._download_xml(api, chapter_id,
|
doc = self._download_xml(api, chapter_id,
|
||||||
note=u'Downloading chapter information',
|
note=u'Downloading chapter information',
|
||||||
errnote=u'Chapter information download failed')
|
errnote=u'Chapter information download failed')
|
||||||
for a in doc.findall('.//archive'):
|
for a in doc.findall('.//archive'):
|
||||||
if archive_id == a.find('./id').text:
|
if archive_id == a.find('./id').text:
|
||||||
break
|
break
|
||||||
@ -107,8 +108,8 @@ class JustinTVIE(InfoExtractor):
|
|||||||
|
|
||||||
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||||
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
||||||
note='Downloading chapter metadata',
|
note='Downloading chapter metadata',
|
||||||
errnote='Download of chapter metadata failed')
|
errnote='Download of chapter metadata failed')
|
||||||
chapter_info = json.loads(chapter_info_json)
|
chapter_info = json.loads(chapter_info_json)
|
||||||
|
|
||||||
bracket_start = int(doc.find('.//bracket_start').text)
|
bracket_start = int(doc.find('.//bracket_start').text)
|
||||||
@ -116,7 +117,7 @@ class JustinTVIE(InfoExtractor):
|
|||||||
|
|
||||||
# TODO determine start (and probably fix up file)
|
# TODO determine start (and probably fix up file)
|
||||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||||
#video_url += u'?start=' + TODO:start_timestamp
|
# video_url += u'?start=' + TODO:start_timestamp
|
||||||
# bracket_start is 13290, but we want 51670615
|
# bracket_start is 13290, but we want 51670615
|
||||||
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
||||||
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||||
|
@ -10,7 +10,7 @@ _md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
|||||||
|
|
||||||
class KankanIE(InfoExtractor):
|
class KankanIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
|
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
|
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
|
||||||
'file': '48863.flv',
|
'file': '48863.flv',
|
||||||
|
@ -11,6 +11,7 @@ from ..aes import (
|
|||||||
aes_decrypt_text
|
aes_decrypt_text
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class KeezMoviesIE(InfoExtractor):
|
class KeezMoviesIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
@ -40,7 +41,7 @@ class KeezMoviesIE(InfoExtractor):
|
|||||||
|
|
||||||
video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title')
|
video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title')
|
||||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url'))
|
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url'))
|
||||||
if webpage.find('encrypted=true')!=-1:
|
if webpage.find('encrypted=true') != -1:
|
||||||
password = self._html_search_regex(r'video_title=(.+?)&', webpage, u'password')
|
password = self._html_search_regex(r'video_title=(.+?)&', webpage, u'password')
|
||||||
video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
|
video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8')
|
||||||
path = compat_urllib_parse_urlparse(video_url).path
|
path = compat_urllib_parse_urlparse(video_url).path
|
||||||
|
@ -20,18 +20,18 @@ class KickStarterIE(InfoExtractor):
|
|||||||
webpage_src = self._download_webpage(url, video_id)
|
webpage_src = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_url = self._search_regex(r'data-video="(.*?)">',
|
video_url = self._search_regex(r'data-video="(.*?)">',
|
||||||
webpage_src, u'video URL')
|
webpage_src, u'video URL')
|
||||||
if 'mp4' in video_url:
|
if 'mp4' in video_url:
|
||||||
ext = 'mp4'
|
ext = 'mp4'
|
||||||
else:
|
else:
|
||||||
ext = 'flv'
|
ext = 'flv'
|
||||||
video_title = self._html_search_regex(r"<title>(.*?)</title>",
|
video_title = self._html_search_regex(r"<title>(.*?)</title>",
|
||||||
webpage_src, u'title').rpartition(u'\u2014 Kickstarter')[0].strip()
|
webpage_src, u'title').rpartition(u'\u2014 Kickstarter')[0].strip()
|
||||||
|
|
||||||
results = [{
|
results = [{
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'ext': ext,
|
'ext': ext,
|
||||||
}]
|
}]
|
||||||
return results
|
return results
|
||||||
|
@ -33,20 +33,20 @@ class KontrTubeIE(InfoExtractor):
|
|||||||
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
||||||
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
||||||
title = self._html_search_regex(r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage,
|
title = self._html_search_regex(r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage,
|
||||||
'video title')
|
'video title')
|
||||||
description = self._html_search_meta('description', webpage, 'video description')
|
description = self._html_search_meta('description', webpage, 'video description')
|
||||||
|
|
||||||
mobj = re.search(r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
|
mobj = re.search(r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
|
||||||
webpage)
|
webpage)
|
||||||
duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
|
duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
|
||||||
|
|
||||||
view_count = self._html_search_regex(r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', webpage,
|
view_count = self._html_search_regex(r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', webpage,
|
||||||
'view count', fatal=False)
|
'view count', fatal=False)
|
||||||
view_count = int(view_count) if view_count is not None else None
|
view_count = int(view_count) if view_count is not None else None
|
||||||
|
|
||||||
comment_count = None
|
comment_count = None
|
||||||
comment_str = self._html_search_regex(r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count',
|
comment_str = self._html_search_regex(r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
if comment_str.startswith('комментариев нет'):
|
if comment_str.startswith('комментариев нет'):
|
||||||
comment_count = 0
|
comment_count = 0
|
||||||
else:
|
else:
|
||||||
@ -63,4 +63,4 @@ class KontrTubeIE(InfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'comment_count': comment_count,
|
'comment_count': comment_count,
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ class LifeNewsIE(InfoExtractor):
|
|||||||
r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
|
r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
|
||||||
|
|
||||||
upload_date = self._html_search_regex(
|
upload_date = self._html_search_regex(
|
||||||
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
|
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date', fatal=False)
|
||||||
if upload_date is not None:
|
if upload_date is not None:
|
||||||
upload_date = unified_strdate(upload_date)
|
upload_date = unified_strdate(upload_date)
|
||||||
|
|
||||||
@ -66,4 +66,4 @@ class LifeNewsIE(InfoExtractor):
|
|||||||
'view_count': int_or_none(view_count),
|
'view_count': int_or_none(view_count),
|
||||||
'comment_count': int_or_none(comment_count),
|
'comment_count': int_or_none(comment_count),
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
}
|
}
|
||||||
|
@ -20,29 +20,29 @@ class LiveLeakIE(InfoExtractor):
|
|||||||
'title': 'Most unlucky car accident'
|
'title': 'Most unlucky car accident'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
|
||||||
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'f93_1390833151',
|
'id': 'f93_1390833151',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
|
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
|
||||||
'uploader': 'ARD_Stinkt',
|
'uploader': 'ARD_Stinkt',
|
||||||
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
|
||||||
'md5': '42c6d97d54f1db107958760788c5f48f',
|
'md5': '42c6d97d54f1db107958760788c5f48f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4f7_1392687779',
|
'id': '4f7_1392687779',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
|
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
|
||||||
'uploader': 'CapObveus',
|
'uploader': 'CapObveus',
|
||||||
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
|
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
@ -29,7 +29,7 @@ class LivestreamIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': video_data['caption'],
|
'title': video_data['caption'],
|
||||||
'thumbnail': video_data['thumbnail_url'],
|
'thumbnail': video_data['thumbnail_url'],
|
||||||
'upload_date': video_data['updated_at'].replace('-','')[:8],
|
'upload_date': video_data['updated_at'].replace('-', '')[:8],
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -41,10 +41,10 @@ class LivestreamIE(InfoExtractor):
|
|||||||
if video_id is None:
|
if video_id is None:
|
||||||
# This is an event page:
|
# This is an event page:
|
||||||
config_json = self._search_regex(r'window.config = ({.*?});',
|
config_json = self._search_regex(r'window.config = ({.*?});',
|
||||||
webpage, u'window config')
|
webpage, u'window config')
|
||||||
info = json.loads(config_json)['event']
|
info = json.loads(config_json)['event']
|
||||||
videos = [self._extract_video_info(video_data['data'])
|
videos = [self._extract_video_info(video_data['data'])
|
||||||
for video_data in info['feed']['data'] if video_data['type'] == u'video']
|
for video_data in info['feed']['data'] if video_data['type'] == u'video']
|
||||||
return self.playlist_result(videos, info['id'], info['full_name'])
|
return self.playlist_result(videos, info['id'], info['full_name'])
|
||||||
else:
|
else:
|
||||||
og_video = self._og_search_video_url(webpage, name=u'player url')
|
og_video = self._og_search_video_url(webpage, name=u'player url')
|
||||||
|
@ -89,7 +89,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
|||||||
'password': password,
|
'password': password,
|
||||||
'remember': 'false',
|
'remember': 'false',
|
||||||
'stayPut': 'false'
|
'stayPut': 'false'
|
||||||
}
|
}
|
||||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||||
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
|||||||
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
|
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
response = m.group('json')
|
response = m.group('json')
|
||||||
response_json = json.loads(response)
|
response_json = json.loads(response)
|
||||||
state = response_json['state']
|
state = response_json['state']
|
||||||
|
|
||||||
if state == 'notlogged':
|
if state == 'notlogged':
|
||||||
@ -167,7 +167,7 @@ class LyndaCourseIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
course_path = mobj.group('coursepath')
|
course_path = mobj.group('coursepath')
|
||||||
course_id = mobj.group('courseid')
|
course_id = mobj.group('courseid')
|
||||||
|
|
||||||
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||||
course_id, 'Downloading course JSON')
|
course_id, 'Downloading course JSON')
|
||||||
course_json = json.loads(page)
|
course_json = json.loads(page)
|
||||||
@ -198,4 +198,4 @@ class LyndaCourseIE(InfoExtractor):
|
|||||||
|
|
||||||
course_title = course_json['Title']
|
course_title = course_json['Title']
|
||||||
|
|
||||||
return self.playlist_result(entries, course_id, course_title)
|
return self.playlist_result(entries, course_id, course_title)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user