Merge branch 'master' of https://github.com/rg3/youtube-dl into multipart_videos

This commit is contained in:
Mark Lee 2014-05-28 13:49:06 -07:00
commit f856064222
90 changed files with 2781 additions and 809 deletions

View File

@ -3,6 +3,7 @@ python:
- "2.6" - "2.6"
- "2.7" - "2.7"
- "3.3" - "3.3"
- "3.4"
script: nosetests test --verbose script: nosetests test --verbose
notifications: notifications:
email: email:

View File

@ -1,14 +0,0 @@
2013.01.02 Codename: GIULIA
* Add support for ComedyCentral clips <nto>
* Corrected Vimeo description fetching <Nick Daniels>
* Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
* --verbose offers more environment info
* New info_dict field: uploader_id
* New updates system, with signature checking
* New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
* Fixed IEs: BlipTv
* Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
* Simplified IEs and test code
* Various (Python 3 and other) fixes
* Revamped and expanded tests

View File

@ -1,7 +1,7 @@
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
clean: clean:
rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
cleanall: clean cleanall: clean
rm -f youtube-dl youtube-dl.exe rm -f youtube-dl youtube-dl.exe
@ -55,7 +55,9 @@ README.txt: README.md
pandoc -f markdown -t plain README.md -o README.txt pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.1: README.md youtube-dl.1: README.md
pandoc -s -f markdown -t man README.md -o youtube-dl.1 python devscripts/prepare_manpage.py >youtube-dl.1.temp.md
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
rm -f youtube-dl.1.temp.md
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
python devscripts/bash-completion.py python devscripts/bash-completion.py

View File

@ -1,11 +1,24 @@
% YOUTUBE-DL(1)
# NAME
youtube-dl - download videos from youtube.com or other video platforms youtube-dl - download videos from youtube.com or other video platforms
# SYNOPSIS # SYNOPSIS
**youtube-dl** [OPTIONS] URL [URL...] **youtube-dl** [OPTIONS] URL [URL...]
# INSTALLATION
To install it right away for all UNIX users (Linux, OS X, etc.), type:
sudo curl https://yt-dl.org/latest/youtube-dl -o /usr/local/bin/youtube-dl
sudo chmod a+x /usr/local/bin/youtube-dl
If you do not have curl, you can alternatively use a recent wget:
sudo wget https://yt-dl.org/downloads/2014.05.13/youtube-dl -O /usr/local/bin/youtube-dl
sudo chmod a+x /usr/local/bin/youtube-dl
Windows users can [download a .exe file](https://yt-dl.org/latest/youtube-dl.exe) and place it in their home directory or any other location on their [PATH](http://en.wikipedia.org/wiki/PATH_%28variable%29).
Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
# DESCRIPTION # DESCRIPTION
**youtube-dl** is a small command-line program to download videos from **youtube-dl** is a small command-line program to download videos from
YouTube.com and a few more sites. It requires the Python interpreter, version YouTube.com and a few more sites. It requires the Python interpreter, version
@ -458,7 +471,7 @@ If your report is shorter than two lines, it is almost certainly missing some of
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
Site support requests must contain an example URL. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL. Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
### Are you using the latest version? ### Are you using the latest version?

View File

@ -15,7 +15,7 @@ header = oldreadme[:oldreadme.index('# OPTIONS')]
footer = oldreadme[oldreadme.index('# CONFIGURATION'):] footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
options = helptext[helptext.index(' General Options:') + 19:] options = helptext[helptext.index(' General Options:') + 19:]
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M) options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options)
options = '# OPTIONS\n' + options + '\n' options = '# OPTIONS\n' + options + '\n'
with io.open(README_FILE, 'w', encoding='utf-8') as f: with io.open(README_FILE, 'w', encoding='utf-8') as f:

View File

@ -0,0 +1,20 @@
import io
import os.path
import sys
import re
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
README_FILE = os.path.join(ROOT_DIR, 'README.md')
with io.open(README_FILE, encoding='utf-8') as f:
readme = f.read()
PREFIX = '%YOUTUBE-DL(1)\n\n# NAME\n'
readme = re.sub(r'(?s)# INSTALLATION.*?(?=# DESCRIPTION)', '', readme)
readme = PREFIX + readme
if sys.version_info < (3, 0):
print(readme.encode('utf-8'))
else:
print(readme)

View File

@ -74,13 +74,19 @@ class FakeYDL(YoutubeDL):
old_report_warning(message) old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self) self.report_warning = types.MethodType(report_warning, self)
def gettestcases():
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors(): for ie in youtube_dl.extractor.gen_extractors():
t = getattr(ie, '_TEST', None) t = getattr(ie, '_TEST', None)
if t: if t:
t['name'] = type(ie).__name__[:-len('IE')] assert not hasattr(ie, '_TESTS'), \
yield t '%s has _TEST and _TESTS' % type(ie).__name__
for t in getattr(ie, '_TESTS', []): tests = [t]
else:
tests = getattr(ie, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(ie).__name__[:-len('IE')] t['name'] = type(ie).__name__[:-len('IE')]
yield t yield t
@ -101,7 +107,7 @@ def expect_info_dict(self, expected_dict, got_dict):
elif isinstance(expected, type): elif isinstance(expected, type):
got = got_dict.get(info_field) got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected), self.assertTrue(isinstance(got, expected),
u'Expected type %r, but got value %r of type %r' % (expected, got, type(got))) u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else: else:
if isinstance(expected, compat_str) and expected.startswith('md5:'): if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field)) got = 'md5:' + md5(got_dict.get(info_field))
@ -129,3 +135,17 @@ def expect_info_dict(self, expected_dict, got_dict):
missing_keys, missing_keys,
'Missing keys in test definition: %s' % ( 'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys)))) ', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexpMatches'):
return self.assertRegexpMatches(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text)
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)

View File

@ -8,7 +8,7 @@ import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL from youtube_dl import YoutubeDL
from youtube_dl.extractor import YoutubeIE from youtube_dl.extractor import YoutubeIE
@ -67,7 +67,7 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0] downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4') self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibilty # No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL() ydl = YDL()
ydl.params['prefer_free_formats'] = False ydl.params['prefer_free_formats'] = False
formats = [ formats = [
@ -274,6 +274,12 @@ class TestFormatSelection(unittest.TestCase):
# Replace missing fields with 'NA' # Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4') self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -77,20 +77,20 @@ class TestAllURLsMatching(unittest.TestCase):
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url']) self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
def test_justin_tv_channelid_matching(self): def test_justin_tv_channelid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"www.justin.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"www.twitch.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv")) self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv/")) self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/")) self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
def test_justintv_videoid_matching(self): def test_justintv_videoid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/b/328087483")) self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
def test_justin_tv_chapterid_matching(self): def test_justin_tv_chapterid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361")) self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
def test_youtube_extract(self): def test_youtube_extract(self):
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id) assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
@ -106,7 +106,7 @@ class TestAllURLsMatching(unittest.TestCase):
def test_no_duplicates(self): def test_no_duplicates(self):
ies = gen_extractors() ies = gen_extractors()
for tc in gettestcases(): for tc in gettestcases(include_onlymatching=True):
url = tc['url'] url = tc['url']
for ie in ies: for ie in ies:
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'): if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
@ -176,5 +176,6 @@ class TestAllURLsMatching(unittest.TestCase):
'https://screen.yahoo.com/smartwatches-latest-wearable-gadgets-163745379-cbs.html', 'https://screen.yahoo.com/smartwatches-latest-wearable-gadgets-163745379-cbs.html',
['Yahoo']) ['Yahoo'])
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -10,6 +10,7 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import ( from test.helper import (
assertRegexpMatches,
expect_info_dict, expect_info_dict,
FakeYDL, FakeYDL,
) )
@ -22,9 +23,11 @@ from youtube_dl.extractor import (
VimeoUserIE, VimeoUserIE,
VimeoAlbumIE, VimeoAlbumIE,
VimeoGroupsIE, VimeoGroupsIE,
VineUserIE,
UstreamChannelIE, UstreamChannelIE,
SoundcloudSetIE, SoundcloudSetIE,
SoundcloudUserIE, SoundcloudUserIE,
SoundcloudPlaylistIE,
LivestreamIE, LivestreamIE,
NHLVideocenterIE, NHLVideocenterIE,
BambuserChannelIE, BambuserChannelIE,
@ -100,6 +103,13 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['title'], 'Rolex Awards for Enterprise') self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
self.assertTrue(len(result['entries']) > 72) self.assertTrue(len(result['entries']) > 72)
def test_vine_user(self):
dl = FakeYDL()
ie = VineUserIE(dl)
result = ie.extract('https://vine.co/Visa')
self.assertIsPlaylist(result)
self.assertTrue(len(result['entries']) >= 50)
def test_ustream_channel(self): def test_ustream_channel(self):
dl = FakeYDL() dl = FakeYDL()
ie = UstreamChannelIE(dl) ie = UstreamChannelIE(dl)
@ -124,6 +134,17 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['id'], '9615865') self.assertEqual(result['id'], '9615865')
self.assertTrue(len(result['entries']) >= 12) self.assertTrue(len(result['entries']) >= 12)
def test_soundcloud_playlist(self):
dl = FakeYDL()
ie = SoundcloudPlaylistIE(dl)
result = ie.extract('http://api.soundcloud.com/playlists/4110309')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '4110309')
self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
assertRegexpMatches(
self, result['description'], r'TILT Brass - Bowery Poetry Club')
self.assertEqual(len(result['entries']), 6)
def test_livestream_event(self): def test_livestream_event(self):
dl = FakeYDL() dl = FakeYDL()
ie = LivestreamIE(dl) ie = LivestreamIE(dl)
@ -192,16 +213,16 @@ class TestPlaylists(unittest.TestCase):
self.assertIsPlaylist(result) self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'dezhurnyi_angel') self.assertEqual(result['id'], 'dezhurnyi_angel')
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)') self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
self.assertTrue(len(result['entries']) >= 36) self.assertTrue(len(result['entries']) >= 16)
def test_ivi_compilation_season(self): def test_ivi_compilation_season(self):
dl = FakeYDL() dl = FakeYDL()
ie = IviCompilationIE(dl) ie = IviCompilationIE(dl)
result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2') result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season1')
self.assertIsPlaylist(result) self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'dezhurnyi_angel/season2') self.assertEqual(result['id'], 'dezhurnyi_angel/season1')
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон') self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 1 сезон')
self.assertTrue(len(result['entries']) >= 20) self.assertTrue(len(result['entries']) >= 16)
def test_imdb_list(self): def test_imdb_list(self):
dl = FakeYDL() dl = FakeYDL()

View File

@ -181,7 +181,7 @@ class TestTedSubtitles(BaseTestSubtitles):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 28) self.assertTrue(len(subtitles.keys()) >= 28)
def test_list_subtitles(self): def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server') self.DL.expect_warning(u'Automatic Captions not supported by this server')

View File

@ -31,6 +31,7 @@ from .utils import (
ContentTooShortError, ContentTooShortError,
date_from_str, date_from_str,
DateRange, DateRange,
DEFAULT_OUTTMPL,
determine_ext, determine_ext,
DownloadError, DownloadError,
encodeFilename, encodeFilename,
@ -441,7 +442,8 @@ class YoutubeDL(object):
if v is not None) if v is not None)
template_dict = collections.defaultdict(lambda: 'NA', template_dict) template_dict = collections.defaultdict(lambda: 'NA', template_dict)
tmpl = os.path.expanduser(self.params['outtmpl']) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
tmpl = os.path.expanduser(outtmpl)
filename = tmpl % template_dict filename = tmpl % template_dict
return filename return filename
except ValueError as err: except ValueError as err:
@ -1049,10 +1051,11 @@ class YoutubeDL(object):
def download(self, url_list): def download(self, url_list):
"""Download a given list of URLs.""" """Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and if (len(url_list) > 1 and
'%' not in self.params['outtmpl'] '%' not in outtmpl
and self.params.get('max_downloads') != 1): and self.params.get('max_downloads') != 1):
raise SameFileError(self.params['outtmpl']) raise SameFileError(outtmpl)
for url in url_list: for url in url_list:
try: try:
@ -1163,57 +1166,57 @@ class YoutubeDL(object):
res = default res = default
return res return res
def list_formats(self, info_dict): def _format_note(self, fdict):
def format_note(fdict): res = ''
res = '' if fdict.get('ext') in ['f4f', 'f4m']:
if fdict.get('ext') in ['f4f', 'f4m']: res += '(unsupported) '
res += '(unsupported) ' if fdict.get('format_note') is not None:
if fdict.get('format_note') is not None: res += fdict['format_note'] + ' '
res += fdict['format_note'] + ' ' if fdict.get('tbr') is not None:
if fdict.get('tbr') is not None: res += '%4dk ' % fdict['tbr']
res += '%4dk ' % fdict['tbr'] if fdict.get('container') is not None:
if fdict.get('container') is not None: if res:
if res: res += ', '
res += ', ' res += '%s container' % fdict['container']
res += '%s container' % fdict['container'] if (fdict.get('vcodec') is not None and
if (fdict.get('vcodec') is not None and fdict.get('vcodec') != 'none'):
fdict.get('vcodec') != 'none'): if res:
if res: res += ', '
res += ', ' res += fdict['vcodec']
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None: if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr'] res += '@'
if fdict.get('acodec') is not None: elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
if res: res += 'video@'
res += ', ' if fdict.get('vbr') is not None:
if fdict['acodec'] == 'none': res += '%4dk' % fdict['vbr']
res += 'video only' if fdict.get('acodec') is not None:
else: if res:
res += '%-5s' % fdict['acodec'] res += ', '
elif fdict.get('abr') is not None: if fdict['acodec'] == 'none':
if res: res += 'video only'
res += ', ' else:
res += 'audio' res += '%-5s' % fdict['acodec']
if fdict.get('abr') is not None: elif fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr'] if res:
if fdict.get('asr') is not None: res += ', '
res += ' (%5dHz)' % fdict['asr'] res += 'audio'
if fdict.get('filesize') is not None: if fdict.get('abr') is not None:
if res: res += '@%3dk' % fdict['abr']
res += ', ' if fdict.get('asr') is not None:
res += format_bytes(fdict['filesize']) res += ' (%5dHz)' % fdict['asr']
return res if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
return res
def list_formats(self, info_dict):
def line(format, idlen=20): def line(format, idlen=20):
return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % ( return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
format['format_id'], format['format_id'],
format['ext'], format['ext'],
self.format_resolution(format), self.format_resolution(format),
format_note(format), self._format_note(format),
)) ))
formats = info_dict.get('formats', [info_dict]) formats = info_dict.get('formats', [info_dict])
@ -1221,8 +1224,8 @@ class YoutubeDL(object):
max(len(f['format_id']) for f in formats)) max(len(f['format_id']) for f in formats))
formats_s = [line(f, idlen) for f in formats] formats_s = [line(f, idlen) for f in formats]
if len(formats) > 1: if len(formats) > 1:
formats_s[0] += (' ' if format_note(formats[0]) else '') + '(worst)' formats_s[0] += (' ' if self._format_note(formats[0]) else '') + '(worst)'
formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)' formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
header_line = line({ header_line = line({
'format_id': 'format code', 'ext': 'extension', 'format_id': 'format code', 'ext': 'extension',

View File

@ -53,6 +53,10 @@ __authors__ = (
'Mattias Harrysson', 'Mattias Harrysson',
'phaer', 'phaer',
'Sainyam Kapoor', 'Sainyam Kapoor',
'Nicolas Évrard',
'Jason Normore',
'Hoje Lee',
'Adam Thalhammer',
) )
__license__ = 'Public Domain' __license__ = 'Public Domain'
@ -72,6 +76,7 @@ from .utils import (
compat_getpass, compat_getpass,
compat_print, compat_print,
DateRange, DateRange,
DEFAULT_OUTTMPL,
decodeOption, decodeOption,
get_term_width, get_term_width,
DownloadError, DownloadError,
@ -679,7 +684,7 @@ def _real_main(argv=None):
if not opts.audioquality.isdigit(): if not opts.audioquality.isdigit():
parser.error(u'invalid audio quality specified') parser.error(u'invalid audio quality specified')
if opts.recodevideo is not None: if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg']: if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
parser.error(u'invalid video recode format specified') parser.error(u'invalid video recode format specified')
if opts.date is not None: if opts.date is not None:
date = DateRange.day(opts.date) date = DateRange.day(opts.date)
@ -708,7 +713,7 @@ def _real_main(argv=None):
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
or (opts.useid and u'%(id)s.%(ext)s') or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(title)s-%(id)s.%(ext)s') or DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio: if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error(u'Cannot download a video and extract audio into the same' parser.error(u'Cannot download a video and extract audio into the same'
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output' u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'

View File

@ -14,6 +14,8 @@ from ..utils import (
class HttpFD(FileDownloader): class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
url = info_dict['url'] url = info_dict['url']
tmpfilename = self.temp_name(filename) tmpfilename = self.temp_name(filename)
@ -28,8 +30,10 @@ class HttpFD(FileDownloader):
basic_request = compat_urllib_request.Request(url, None, headers) basic_request = compat_urllib_request.Request(url, None, headers)
request = compat_urllib_request.Request(url, None, headers) request = compat_urllib_request.Request(url, None, headers)
if self.params.get('test', False): is_test = self.params.get('test', False)
request.add_header('Range', 'bytes=0-10240')
if is_test:
request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))
# Establish possible resume length # Establish possible resume length
if os.path.isfile(encodeFilename(tmpfilename)): if os.path.isfile(encodeFilename(tmpfilename)):
@ -100,6 +104,15 @@ class HttpFD(FileDownloader):
return False return False
data_len = data.info().get('Content-length', None) data_len = data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None: if data_len is not None:
data_len = int(data_len) + resume_len data_len = int(data_len) + resume_len
min_data_len = self.params.get("min_filesize", None) min_data_len = self.params.get("min_filesize", None)
@ -118,7 +131,7 @@ class HttpFD(FileDownloader):
while True: while True:
# Download and write # Download and write
before = time.time() before = time.time()
data_block = data.read(block_size) data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
after = time.time() after = time.time()
if len(data_block) == 0: if len(data_block) == 0:
break break
@ -162,6 +175,9 @@ class HttpFD(FileDownloader):
'speed': speed, 'speed': speed,
}) })
if is_test and byte_counter == data_len:
break
# Apply rate limit # Apply rate limit
self.slow_down(start, byte_counter - resume_len) self.slow_down(start, byte_counter - resume_len)

View File

@ -10,6 +10,7 @@ from .common import FileDownloader
from ..utils import ( from ..utils import (
encodeFilename, encodeFilename,
format_bytes, format_bytes,
compat_str,
) )
@ -127,7 +128,10 @@ class RtmpFD(FileDownloader):
basic_args += ['--flashVer', flash_version] basic_args += ['--flashVer', flash_version]
if live: if live:
basic_args += ['--live'] basic_args += ['--live']
if conn: if isinstance(conn, list):
for entry in conn:
basic_args += ['--conn', entry]
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn] basic_args += ['--conn', conn]
args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)] args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)]

View File

@ -20,6 +20,7 @@ from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE from .bbccouk import BBCCoUkIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE from .bloomberg import BloombergIE
@ -40,6 +41,7 @@ from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE from .clipsyndicate import ClipsyndicateIE
from .clubic import ClubicIE
from .cmt import CMTIE from .cmt import CMTIE
from .cnet import CNETIE from .cnet import CNETIE
from .cnn import ( from .cnn import (
@ -70,6 +72,7 @@ from .ehow import EHowIE
from .eighttracks import EightTracksIE from .eighttracks import EightTracksIE
from .eitb import EitbIE from .eitb import EitbIE
from .elpais import ElPaisIE from .elpais import ElPaisIE
from .empflix import EmpflixIE
from .engadget import EngadgetIE from .engadget import EngadgetIE
from .escapist import EscapistIE from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE from .everyonesmixtape import EveryonesMixtapeIE
@ -77,6 +80,7 @@ from .exfm import ExfmIE
from .extremetube import ExtremeTubeIE from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE from .facebook import FacebookIE
from .faz import FazIE from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE from .firstpost import FirstpostIE
from .firsttv import FirstTVIE from .firsttv import FirstTVIE
from .fivemin import FiveMinIE from .fivemin import FiveMinIE
@ -107,10 +111,12 @@ from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE from .googlesearch import GoogleSearchIE
from .hark import HarkIE from .hark import HarkIE
from .helsinki import HelsinkiIE from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hotnewhiphop import HotNewHipHopIE from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE from .howcast import HowcastIE
from .huffpost import HuffPostIE from .huffpost import HuffPostIE
from .hypem import HypemIE from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE from .ign import IGNIE, OneUPIE
from .imdb import ( from .imdb import (
ImdbIE, ImdbIE,
@ -158,6 +164,7 @@ from .mofosex import MofosexIE
from .mooshare import MooshareIE from .mooshare import MooshareIE
from .morningstar import MorningstarIE from .morningstar import MorningstarIE
from .motorsport import MotorsportIE from .motorsport import MotorsportIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE from .movshare import MovShareIE
from .mtv import ( from .mtv import (
MTVIE, MTVIE,
@ -177,15 +184,23 @@ from .nbc import (
from .ndr import NDRIE from .ndr import NDRIE
from .ndtv import NDTVIE from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE from .nfb import NFBIE
from .nhl import NHLIE, NHLVideocenterIE from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE from .niconico import NiconicoIE
from .ninegag import NineGagIE from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE from .normalboots import NormalbootsIE
from .novamov import NovaMovIE from .novamov import NovaMovIE
from .nowness import NownessIE from .nowness import NownessIE
from .nowvideo import NowVideoIE from .nowvideo import NowVideoIE
from .nrk import (
NRKIE,
NRKTVIE,
)
from .ntv import NTVIE from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oe1 import OE1IE from .oe1 import OE1IE
from .ooyala import OoyalaIE from .ooyala import OoyalaIE
from .orf import ORFIE from .orf import ORFIE
@ -206,6 +221,7 @@ from .ringtv import RingTVIE
from .ro220 import Ro220IE from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rtlnow import RTLnowIE from .rtlnow import RTLnowIE
from .rts import RTSIE from .rts import RTSIE
from .rtve import RTVEALaCartaIE from .rtve import RTVEALaCartaIE
@ -217,9 +233,11 @@ from .rutube import (
) )
from .rutv import RUTVIE from .rutv import RUTVIE
from .savefrom import SaveFromIE from .savefrom import SaveFromIE
from .scivee import SciVeeIE
from .servingsys import ServingSysIE from .servingsys import ServingSysIE
from .sina import SinaIE from .sina import SinaIE
from .slideshare import SlideshareIE from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import ( from .smotri import (
SmotriIE, SmotriIE,
SmotriCommunityIE, SmotriCommunityIE,
@ -227,7 +245,12 @@ from .smotri import (
SmotriBroadcastIE, SmotriBroadcastIE,
) )
from .sohu import SohuIE from .sohu import SohuIE
from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .southparkstudios import ( from .southparkstudios import (
SouthParkStudiosIE, SouthParkStudiosIE,
SouthparkDeIE, SouthparkDeIE,
@ -237,10 +260,10 @@ from .spankwire import SpankwireIE
from .spiegel import SpiegelIE from .spiegel import SpiegelIE
from .spike import SpikeIE from .spike import SpikeIE
from .stanfordoc import StanfordOpenClassroomIE from .stanfordoc import StanfordOpenClassroomIE
from .statigram import StatigramIE
from .steam import SteamIE from .steam import SteamIE
from .streamcloud import StreamcloudIE from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE from .streamcz import StreamCZIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE from .syfy import SyfyIE
from .sztvhu import SztvHuIE from .sztvhu import SztvHuIE
from .teamcoco import TeamcocoIE from .teamcoco import TeamcocoIE
@ -251,6 +274,7 @@ from .tf1 import TF1IE
from .theplatform import ThePlatformIE from .theplatform import ThePlatformIE
from .thisav import ThisAVIE from .thisav import ThisAVIE
from .tinypic import TinyPicIE from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .toutv import TouTvIE from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE from .traileraddict import TrailerAddictIE
@ -280,6 +304,7 @@ from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE from .videofyme import VideofyMeIE
from .videopremium import VideoPremiumIE from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE from .videoweed import VideoWeedIE
from .vimeo import ( from .vimeo import (
VimeoIE, VimeoIE,
@ -288,15 +313,21 @@ from .vimeo import (
VimeoAlbumIE, VimeoAlbumIE,
VimeoGroupsIE, VimeoGroupsIE,
VimeoReviewIE, VimeoReviewIE,
VimeoWatchLaterIE,
)
from .vine import (
VineIE,
VineUserIE,
) )
from .vine import VineIE
from .viki import VikiIE from .viki import VikiIE
from .vk import VKIE from .vk import VKIE
from .vube import VubeIE from .vube import VubeIE
from .vuclip import VuClipIE
from .washingtonpost import WashingtonPostIE from .washingtonpost import WashingtonPostIE
from .wat import WatIE from .wat import WatIE
from .wdr import ( from .wdr import (
WDRIE, WDRIE,
WDRMobileIE,
WDRMausIE, WDRMausIE,
) )
from .weibo import WeiboIE from .weibo import WeiboIE

View File

@ -1,7 +1,6 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import re import re
from .common import InfoExtractor from .common import InfoExtractor
@ -16,6 +15,7 @@ class AftonbladetIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna', 'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna',
'description': 'Jupiters måne mest aktiv av alla himlakroppar', 'description': 'Jupiters måne mest aktiv av alla himlakroppar',
'timestamp': 1394142732,
'upload_date': '20140306', 'upload_date': '20140306',
}, },
} }
@ -27,17 +27,17 @@ class AftonbladetIE(InfoExtractor):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
# find internal video meta data # find internal video meta data
META_URL = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json' meta_url = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
internal_meta_id = self._html_search_regex( internal_meta_id = self._html_search_regex(
r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id') r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id')
internal_meta_url = META_URL % internal_meta_id internal_meta_url = meta_url % internal_meta_id
internal_meta_json = self._download_json( internal_meta_json = self._download_json(
internal_meta_url, video_id, 'Downloading video meta data') internal_meta_url, video_id, 'Downloading video meta data')
# find internal video formats # find internal video formats
FORMATS_URL = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s' format_url = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s'
internal_video_id = internal_meta_json['videoId'] internal_video_id = internal_meta_json['videoId']
internal_formats_url = FORMATS_URL % internal_video_id internal_formats_url = format_url % internal_video_id
internal_formats_json = self._download_json( internal_formats_json = self._download_json(
internal_formats_url, video_id, 'Downloading video formats') internal_formats_url, video_id, 'Downloading video formats')
@ -54,16 +54,13 @@ class AftonbladetIE(InfoExtractor):
}) })
self._sort_formats(formats) self._sort_formats(formats)
timestamp = datetime.datetime.fromtimestamp(internal_meta_json['timePublished'])
upload_date = timestamp.strftime('%Y%m%d')
return { return {
'id': video_id, 'id': video_id,
'title': internal_meta_json['title'], 'title': internal_meta_json['title'],
'formats': formats, 'formats': formats,
'thumbnail': internal_meta_json['imageUrl'], 'thumbnail': internal_meta_json['imageUrl'],
'description': internal_meta_json['shortPreamble'], 'description': internal_meta_json['shortPreamble'],
'upload_date': upload_date, 'timestamp': internal_meta_json['timePublished'],
'duration': internal_meta_json['duration'], 'duration': internal_meta_json['duration'],
'view_count': internal_meta_json['views'], 'view_count': internal_meta_json['views'],
} }

View File

@ -74,7 +74,8 @@ class ArteTVPlus7IE(InfoExtractor):
return self._extract_from_webpage(webpage, video_id, lang) return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang): def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url') json_url = self._html_search_regex(
r'arte_vp_url="(.*?)"', webpage, 'json vp url')
return self._extract_from_json_url(json_url, video_id, lang) return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang): def _extract_from_json_url(self, json_url, video_id, lang):
@ -120,14 +121,17 @@ class ArteTVPlus7IE(InfoExtractor):
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality']) return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else: else:
def sort_key(f): def sort_key(f):
versionCode = f.get('versionCode')
if versionCode is None:
versionCode = ''
return ( return (
# Sort first by quality # Sort first by quality
int(f.get('height',-1)), int(f.get('height', -1)),
int(f.get('bitrate',-1)), int(f.get('bitrate', -1)),
# The original version with subtitles has lower relevance # The original version with subtitles has lower relevance
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None, re.match(r'VO-ST(F|A)', versionCode) is None,
# The version with sourds/mal subtitles has also lower relevance # The version with sourds/mal subtitles has also lower relevance
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None, re.match(r'VO?(F|A)-STM\1', versionCode) is None,
# Prefer http downloads over m3u8 # Prefer http downloads over m3u8
0 if f['url'].endswith('m3u8') else 1, 0 if f['url'].endswith('m3u8') else 1,
) )

View File

@ -12,14 +12,14 @@ from ..utils import (
class BandcampIE(InfoExtractor): class BandcampIE(InfoExtractor):
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)' _VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{ _TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'file': '1812978515.mp3', 'file': '1812978515.mp3',
'md5': 'c557841d5e50261777a6585648adf439', 'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': { 'info_dict': {
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad", "title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
"duration": 10, "duration": 9.8485,
}, },
'_skip': 'There is a limit of 200 free downloads / month for the test song' '_skip': 'There is a limit of 200 free downloads / month for the test song'
}] }]
@ -28,36 +28,32 @@ class BandcampIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
title = mobj.group('title') title = mobj.group('title')
webpage = self._download_webpage(url, title) webpage = self._download_webpage(url, title)
# We get the link to the free download page
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage) m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
if m_download is None: if not m_download:
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage) m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
if m_trackinfo: if m_trackinfo:
json_code = m_trackinfo.group(1) json_code = m_trackinfo.group(1)
data = json.loads(json_code) data = json.loads(json_code)[0]
d = data[0]
duration = int(round(d['duration']))
formats = [] formats = []
for format_id, format_url in d['file'].items(): for format_id, format_url in data['file'].items():
ext, _, abr_str = format_id.partition('-') ext, abr_str = format_id.split('-', 1)
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
'url': format_url, 'url': format_url,
'ext': format_id.partition('-')[0], 'ext': ext,
'vcodec': 'none', 'vcodec': 'none',
'acodec': format_id.partition('-')[0], 'acodec': ext,
'abr': int(format_id.partition('-')[2]), 'abr': int(abr_str),
}) })
self._sort_formats(formats) self._sort_formats(formats)
return { return {
'id': compat_str(d['id']), 'id': compat_str(data['id']),
'title': d['title'], 'title': data['title'],
'formats': formats, 'formats': formats,
'duration': duration, 'duration': float(data['duration']),
} }
else: else:
raise ExtractorError('No free songs found') raise ExtractorError('No free songs found')
@ -67,11 +63,9 @@ class BandcampIE(InfoExtractor):
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$', r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE | re.DOTALL).group('id') webpage, re.MULTILINE | re.DOTALL).group('id')
download_webpage = self._download_webpage(download_link, video_id, download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
'Downloading free downloads page') # We get the dictionary of the track from some javascript code
# We get the dictionary of the track from some javascrip code info = re.search(r'items: (.*?),$', download_webpage, re.MULTILINE).group(1)
info = re.search(r'items: (.*?),$',
download_webpage, re.MULTILINE).group(1)
info = json.loads(info)[0] info = json.loads(info)[0]
# We pick mp3-320 for now, until format selection can be easily implemented. # We pick mp3-320 for now, until format selection can be easily implemented.
mp3_info = info['downloads']['mp3-320'] mp3_info = info['downloads']['mp3-320']
@ -100,7 +94,7 @@ class BandcampIE(InfoExtractor):
class BandcampAlbumIE(InfoExtractor): class BandcampAlbumIE(InfoExtractor):
IE_NAME = 'Bandcamp:album' IE_NAME = 'Bandcamp:album'
_VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)' _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))'
_TEST = { _TEST = {
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
@ -123,13 +117,15 @@ class BandcampAlbumIE(InfoExtractor):
'params': { 'params': {
'playlistend': 2 'playlistend': 2
}, },
'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' 'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
} }
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('subdomain')
title = mobj.group('title') title = mobj.group('title')
webpage = self._download_webpage(url, title) display_id = title or playlist_id
webpage = self._download_webpage(url, display_id)
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage) tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
if not tracks_paths: if not tracks_paths:
raise ExtractorError('The page doesn\'t contain any tracks') raise ExtractorError('The page doesn\'t contain any tracks')
@ -139,6 +135,8 @@ class BandcampAlbumIE(InfoExtractor):
title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title') title = self._search_regex(r'album_title : "(.*?)"', webpage, 'title')
return { return {
'_type': 'playlist', '_type': 'playlist',
'id': playlist_id,
'display_id': display_id,
'title': title, 'title': title,
'entries': entries, 'entries': entries,
} }

View File

@ -0,0 +1,106 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_parse_qs,
ExtractorError,
int_or_none,
unified_strdate,
)
class BiliBiliIE(InfoExtractor):
_VALID_URL = r'http://www\.bilibili\.tv/video/av(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '2c301e4dab317596e837c3e7633e7d86',
'info_dict': {
'id': '1074402',
'ext': 'flv',
'title': '【金坷垃】金泡沫',
'duration': 308,
'upload_date': '20140420',
'thumbnail': 're:^https?://.+\.jpg',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_code = self._search_regex(
r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
title = self._html_search_meta(
'media:title', video_code, 'title', fatal=True)
duration_str = self._html_search_meta(
'duration', video_code, 'duration')
if duration_str is None:
duration = None
else:
duration_mobj = re.match(
r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
duration_str)
duration = (
int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
int(duration_mobj.group('minutes')) * 60 +
int(duration_mobj.group('seconds')))
upload_date = unified_strdate(self._html_search_meta(
'uploadDate', video_code, fatal=False))
thumbnail = self._html_search_meta(
'thumbnailUrl', video_code, 'thumbnail', fatal=False)
player_params = compat_parse_qs(self._html_search_regex(
r'<iframe .*?class="player" src="https://secure.bilibili.tv/secure,([^"]+)"',
webpage, 'player params'))
if 'cid' in player_params:
cid = player_params['cid'][0]
lq_doc = self._download_xml(
'http://interface.bilibili.cn/v_cdn_play?cid=%s' % cid,
video_id,
note='Downloading LQ video info'
)
lq_durl = lq_doc.find('.//durl')
formats = [{
'format_id': 'lq',
'quality': 1,
'url': lq_durl.find('./url').text,
'filesize': int_or_none(
lq_durl.find('./size'), get_attr='text'),
}]
hq_doc = self._download_xml(
'http://interface.bilibili.cn/playurl?cid=%s' % cid,
video_id,
note='Downloading HQ video info',
fatal=False,
)
if hq_doc is not False:
hq_durl = hq_doc.find('.//durl')
formats.append({
'format_id': 'hq',
'quality': 2,
'ext': 'flv',
'url': hq_durl.find('./url').text,
'filesize': int_or_none(
hq_durl.find('./size'), get_attr='text'),
})
else:
raise ExtractorError('Unsupported player parameters: %r' % (player_params,))
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'duration': duration,
'upload_date': upload_date,
'thumbnail': thumbnail,
}

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import json import json
import re import re
@ -19,15 +18,16 @@ class BlinkxIE(InfoExtractor):
'file': '8aQUy7GV.mp4', 'file': '8aQUy7GV.mp4',
'md5': '2e9a07364af40163a908edbf10bb2492', 'md5': '2e9a07364af40163a908edbf10bb2492',
'info_dict': { 'info_dict': {
"title": "Police Car Rolls Away", 'title': 'Police Car Rolls Away',
"uploader": "stupidvideos.com", 'uploader': 'stupidvideos.com',
"upload_date": "20131215", 'upload_date': '20131215',
"description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!", 'timestamp': 1387068000,
"duration": 14.886, 'description': 'A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!',
"thumbnails": [{ 'duration': 14.886,
"width": 100, 'thumbnails': [{
"height": 76, 'width': 100,
"url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg", 'height': 76,
'url': 'http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg',
}], }],
}, },
} }
@ -41,9 +41,6 @@ class BlinkxIE(InfoExtractor):
'video=%s' % video_id) 'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id) data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0] data = json.loads(data_json)['api']['results'][0]
dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
pload_date = dt.strftime('%Y%m%d')
duration = None duration = None
thumbnails = [] thumbnails = []
formats = [] formats = []
@ -64,10 +61,7 @@ class BlinkxIE(InfoExtractor):
vcodec = remove_start(m['vcodec'], 'ff') vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff') acodec = remove_start(m['acodec'], 'ff')
tbr = (int(m['vbr']) + int(m['abr'])) // 1000 tbr = (int(m['vbr']) + int(m['abr'])) // 1000
format_id = (u'%s-%sk-%s' % format_id = u'%s-%sk-%s' % (vcodec, tbr, m['w'])
(vcodec,
tbr,
m['w']))
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
'url': m['link'], 'url': m['link'],
@ -88,7 +82,7 @@ class BlinkxIE(InfoExtractor):
'title': data['title'], 'title': data['title'],
'formats': formats, 'formats': formats,
'uploader': data['channel_name'], 'uploader': data['channel_name'],
'upload_date': pload_date, 'timestamp': data['pubdate_epoch'],
'description': data.get('description'), 'description': data.get('description'),
'thumbnails': thumbnails, 'thumbnails': thumbnails,
'duration': duration, 'duration': duration,

View File

@ -1,102 +1,124 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor from .subtitles import SubtitlesInfoExtractor
from ..utils import ( from ..utils import (
compat_str,
compat_urllib_request, compat_urllib_request,
unescapeHTML, unescapeHTML,
parse_iso8601,
compat_urlparse,
clean_html,
compat_str,
) )
class BlipTVIE(SubtitlesInfoExtractor): class BlipTVIE(SubtitlesInfoExtractor):
"""Information extractor for blip.tv""" _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z]+)))'
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$' _TESTS = [
{
_TESTS = [{ 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', 'md5': 'c6934ad0b6acf2bd920720ec888eb812',
'md5': 'c6934ad0b6acf2bd920720ec888eb812', 'info_dict': {
'info_dict': { 'id': '5779306',
'id': '5779306', 'ext': 'mov',
'ext': 'mov', 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
'upload_date': '20111205', 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596', 'timestamp': 1323138843,
'uploader': 'Comic Book Resources - CBR TV', 'upload_date': '20111206',
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3', 'uploader': 'cbr',
'uploader_id': '679425',
'duration': 81,
}
},
{
# https://github.com/rg3/youtube-dl/pull/2274
'note': 'Video with subtitles',
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
'md5': '309f9d25b820b086ca163ffac8031806',
'info_dict': {
'id': '6586561',
'ext': 'mp4',
'title': 'Red vs. Blue Season 11 Episode 1',
'description': 'One-Zero-One',
'timestamp': 1371261608,
'upload_date': '20130615',
'uploader': 'redvsblue',
'uploader_id': '792887',
'duration': 279,
}
} }
}, { ]
# https://github.com/rg3/youtube-dl/pull/2274
'note': 'Video with subtitles',
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
'md5': '309f9d25b820b086ca163ffac8031806',
'info_dict': {
'id': '6586561',
'ext': 'mp4',
'uploader': 'Red vs. Blue',
'description': 'One-Zero-One',
'upload_date': '20130614',
'title': 'Red vs. Blue Season 11 Episode 1',
}
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id') lookup_id = mobj.group('lookup_id')
# See https://github.com/rg3/youtube-dl/issues/857 # See https://github.com/rg3/youtube-dl/issues/857
embed_mobj = re.match(r'https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url) if lookup_id:
if embed_mobj: info_page = self._download_webpage(
info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1) 'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
info_page = self._download_webpage(info_url, embed_mobj.group(1)) video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
video_id = self._search_regex(
r'data-episode-id="([0-9]+)', info_page, 'video_id')
return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
cchar = '&' if '?' in url else '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url)
request.add_header('User-Agent', 'iTunes/10.6.1')
json_data = self._download_json(request, video_id=presumptive_id)
if 'Post' in json_data:
data = json_data['Post']
else: else:
data = json_data video_id = mobj.group('id')
rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
def blip(s):
return '{http://blip.tv/dtd/blip/1.0}%s' % s
def media(s):
return '{http://search.yahoo.com/mrss/}%s' % s
def itunes(s):
return '{http://www.itunes.com/dtds/podcast-1.0.dtd}%s' % s
item = rss.find('channel/item')
video_id = item.find(blip('item_id')).text
title = item.find('./title').text
description = clean_html(compat_str(item.find(blip('puredescription')).text))
timestamp = parse_iso8601(item.find(blip('datestamp')).text)
uploader = item.find(blip('user')).text
uploader_id = item.find(blip('userid')).text
duration = int(item.find(blip('runtime')).text)
media_thumbnail = item.find(media('thumbnail'))
thumbnail = media_thumbnail.get('url') if media_thumbnail is not None else item.find(itunes('image')).text
categories = [category.text for category in item.findall('category')]
video_id = compat_str(data['item_id'])
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
subtitles = {}
formats = [] formats = []
if 'additionalMedia' in data: subtitles = {}
for f in data['additionalMedia']:
if f.get('file_type_srt') == 1: media_group = item.find(media('group'))
LANGS = { for media_content in media_group.findall(media('content')):
'english': 'en', url = media_content.get('url')
} role = media_content.get(blip('role'))
lang = f['role'].rpartition('-')[-1].strip().lower() msg = self._download_webpage(
langcode = LANGS.get(lang, lang) url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
subtitles[langcode] = f['url'] video_id, 'Resolving URL for %s' % role)
continue real_url = compat_urlparse.parse_qs(msg)['message'][0]
if not int(f['media_width']): # filter m3u8
continue media_type = media_content.get('type')
if media_type == 'text/srt' or url.endswith('.srt'):
LANGS = {
'english': 'en',
}
lang = role.rpartition('-')[-1].strip().lower()
langcode = LANGS.get(lang, lang)
subtitles[langcode] = url
elif media_type.startswith('video/'):
formats.append({ formats.append({
'url': f['url'], 'url': real_url,
'format_id': f['role'], 'format_id': role,
'width': int(f['media_width']), 'format_note': media_type,
'height': int(f['media_height']), 'vcodec': media_content.get(blip('vcodec')),
'acodec': media_content.get(blip('acodec')),
'filesize': media_content.get('filesize'),
'width': int(media_content.get('width')),
'height': int(media_content.get('height')),
}) })
else:
formats.append({
'url': data['media']['url'],
'width': int(data['media']['width']),
'height': int(data['media']['height']),
})
self._sort_formats(formats) self._sort_formats(formats)
# subtitles # subtitles
@ -107,12 +129,14 @@ class BlipTVIE(SubtitlesInfoExtractor):
return { return {
'id': video_id, 'id': video_id,
'uploader': data['display_name'], 'title': title,
'upload_date': upload_date, 'description': description,
'title': data['title'], 'timestamp': timestamp,
'thumbnail': data['thumbnailUrl'], 'uploader': uploader,
'description': data['description'], 'uploader_id': uploader_id,
'user_agent': 'iTunes/10.6.1', 'duration': duration,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats, 'formats': formats,
'subtitles': video_subtitles, 'subtitles': video_subtitles,
} }

View File

@ -1,53 +1,72 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import unified_strdate from ..utils import (
unified_strdate,
url_basename,
)
class CanalplusIE(InfoExtractor): class CanalplusIE(InfoExtractor):
_VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))' _VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s' _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
IE_NAME = u'canalplus.fr' IE_NAME = 'canalplus.fr'
_TEST = { _TEST = {
u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470', 'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
u'file': u'922470.flv', 'md5': '3db39fb48b9685438ecf33a1078023e4',
u'info_dict': { 'info_dict': {
u'title': u'Zapping - 26/08/13', 'id': '922470',
u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013', 'ext': 'flv',
u'upload_date': u'20130826', 'title': 'Zapping - 26/08/13',
}, 'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
u'params': { 'upload_date': '20130826',
u'skip_download': True,
}, },
} }
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.groupdict().get('id') video_id = mobj.groupdict().get('id')
# Beware, some subclasses do not define an id group
display_id = url_basename(mobj.group('path'))
if video_id is None: if video_id is None:
webpage = self._download_webpage(url, mobj.group('path')) webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, u'video id') video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
info_url = self._VIDEO_INFO_TEMPLATE % video_id info_url = self._VIDEO_INFO_TEMPLATE % video_id
doc = self._download_xml(info_url,video_id, doc = self._download_xml(info_url, video_id, 'Downloading video XML')
u'Downloading video info')
self.report_extraction(video_id)
video_info = [video for video in doc if video.find('ID').text == video_id][0] video_info = [video for video in doc if video.find('ID').text == video_id][0]
infos = video_info.find('INFOS')
media = video_info.find('MEDIA') media = video_info.find('MEDIA')
formats = [media.find('VIDEOS/%s' % format) infos = video_info.find('INFOS')
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
video_url = [format.text for format in formats if format is not None][-1]
return {'id': video_id, preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
'title': u'%s - %s' % (infos.find('TITRAGE/TITRE').text,
infos.find('TITRAGE/SOUS_TITRE').text), formats = [
'url': video_url, {
'ext': 'flv', 'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text), 'format_id': fmt.tag,
'thumbnail': media.find('IMAGES/GRAND').text, 'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
'description': infos.find('DESCRIPTION').text, 'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
'view_count': int(infos.find('NB_VUES').text), } for fmt in media.find('VIDEOS') if fmt.text
} ]
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': '%s - %s' % (infos.find('TITRAGE/TITRE').text,
infos.find('TITRAGE/SOUS_TITRE').text),
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
'thumbnail': media.find('IMAGES/GRAND').text,
'description': infos.find('DESCRIPTION').text,
'view_count': int(infos.find('NB_VUES').text),
'like_count': int(infos.find('NB_LIKES').text),
'comment_count': int(infos.find('NB_COMMENTS').text),
'formats': formats,
}

View File

@ -1,10 +1,12 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none,
) )
@ -13,9 +15,10 @@ class CinemassacreIE(InfoExtractor):
_TESTS = [ _TESTS = [
{ {
'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/', 'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
'file': '19911.mp4', 'md5': 'fde81fbafaee331785f58cd6c0d46190',
'md5': '782f8504ca95a0eba8fc9177c373eec7',
'info_dict': { 'info_dict': {
'id': '19911',
'ext': 'mp4',
'upload_date': '20121110', 'upload_date': '20121110',
'title': '“Angry Video Game Nerd: The Movie” Trailer', 'title': '“Angry Video Game Nerd: The Movie” Trailer',
'description': 'md5:fb87405fcb42a331742a0dce2708560b', 'description': 'md5:fb87405fcb42a331742a0dce2708560b',
@ -23,9 +26,10 @@ class CinemassacreIE(InfoExtractor):
}, },
{ {
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940', 'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
'file': '521be8ef82b16.mp4', 'md5': 'd72f10cd39eac4215048f62ab477a511',
'md5': 'dec39ee5118f8d9cc067f45f9cbe3a35',
'info_dict': { 'info_dict': {
'id': '521be8ef82b16',
'ext': 'mp4',
'upload_date': '20131002', 'upload_date': '20131002',
'title': 'The Mummys Hand (1940)', 'title': 'The Mummys Hand (1940)',
}, },
@ -50,29 +54,40 @@ class CinemassacreIE(InfoExtractor):
r'<div class="entry-content">(?P<description>.+?)</div>', r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False) webpage, 'description', flags=re.DOTALL, fatal=False)
playerdata = self._download_webpage(playerdata_url, video_id) playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
video_thumbnail = self._search_regex(
r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
sd_url = self._html_search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file') videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
hd_url = self._html_search_regex(
r'file: \'([^\']+)\', label: \'HD\'', playerdata, 'hd_file',
default=None)
video_thumbnail = self._html_search_regex(r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
formats = [{ formats = []
'url': sd_url, baseurl = sd_url[:sd_url.rfind('/')+1]
'ext': 'mp4', for video in videolist.findall('.//video'):
'format': 'sd', src = video.get('src')
'format_id': 'sd', if not src:
'quality': 1, continue
}] file_ = src.partition(':')[-1]
if hd_url: width = int_or_none(video.get('width'))
formats.append({ height = int_or_none(video.get('height'))
'url': hd_url, bitrate = int_or_none(video.get('system-bitrate'))
'ext': 'mp4', format = {
'format': 'hd', 'url': baseurl + file_,
'format_id': 'hd', 'format_id': src.rpartition('.')[0].rpartition('_')[-1],
'quality': 2, }
}) if width or height:
format.update({
'tbr': bitrate // 1000 if bitrate else None,
'width': width,
'height': height,
})
else:
format.update({
'abr': bitrate // 1000 if bitrate else None,
'vcodec': 'none',
})
formats.append(format)
self._sort_formats(formats) self._sort_formats(formats)
return { return {

View File

@ -0,0 +1,58 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
qualities,
)
class ClubicIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
'md5': '1592b694ba586036efac1776b0b43cd3',
'info_dict': {
'id': '448474',
'ext': 'mp4',
'title': 'Clubic Week 2.0 : le FBI se lance dans la photo d\u0092identité',
'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*',
'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
player_url = 'http://player.m6web.fr/v1/player/clubic/%s.html' % video_id
player_page = self._download_webpage(player_url, video_id)
config_json = self._search_regex(
r'(?m)M6\.Player\.config\s*=\s*(\{.+?\});$', player_page,
'configuration')
config = json.loads(config_json)
video_info = config['videoInfo']
sources = config['sources']
quality_order = qualities(['sd', 'hq'])
formats = [{
'format_id': src['streamQuality'],
'url': src['src'],
'quality': quality_order(src['streamQuality']),
} for src in sources]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_info['title'],
'formats': formats,
'description': clean_html(video_info.get('description')),
'thumbnail': config.get('poster'),
}

View File

@ -33,7 +33,7 @@ class CNETIE(InfoExtractor):
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
data_json = self._html_search_regex( data_json = self._html_search_regex(
r"<div class=\"cnetVideoPlayer\" data-cnet-video-options='([^']+)'", r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
webpage, 'data json') webpage, 'data json')
data = json.loads(data_json) data = json.loads(data_json)
vdata = data['video'] vdata = data['video']

View File

@ -117,6 +117,8 @@ class InfoExtractor(object):
webpage_url: The url to the video webpage, if given to youtube-dl it webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set should allow to get the same result again. (It will be set
by YoutubeDL if it's missing) by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, the fields should be Unicode strings.
@ -246,10 +248,11 @@ class InfoExtractor(object):
url = url_or_request.get_full_url() url = url_or_request.get_full_url()
except AttributeError: except AttributeError:
url = url_or_request url = url_or_request
if len(url) > 200: basen = '%s_%s' % (video_id, url)
h = u'___' + hashlib.md5(url.encode('utf-8')).hexdigest() if len(basen) > 240:
url = url[:200 - len(h)] + h h = u'___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
raw_filename = ('%s_%s.dump' % (video_id, url)) basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True) filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen(u'Saving request to ' + filename) self.to_screen(u'Saving request to ' + filename)
with open(filename, 'wb') as outf: with open(filename, 'wb') as outf:
@ -283,9 +286,12 @@ class InfoExtractor(object):
def _download_xml(self, url_or_request, video_id, def _download_xml(self, url_or_request, video_id,
note=u'Downloading XML', errnote=u'Unable to download XML', note=u'Downloading XML', errnote=u'Unable to download XML',
transform_source=None): transform_source=None, fatal=True):
"""Return the xml as an xml.etree.ElementTree.Element""" """Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(url_or_request, video_id, note, errnote) xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal)
if xml_string is False:
return xml_string
if transform_source: if transform_source:
xml_string = transform_source(xml_string) xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
@ -549,6 +555,23 @@ class InfoExtractor(object):
) )
formats.sort(key=_formats_key) formats.sort(key=_formats_key)
def http_scheme(self):
""" Either "https:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _entry_formats_to_parts(self, entries): def _entry_formats_to_parts(self, entries):
'''Transforms entries with formats to formats with parts. Used when joinparts is set.''' '''Transforms entries with formats to formats with parts. Used when joinparts is set.'''
ekeys = None ekeys = None
@ -615,3 +638,4 @@ class SearchInfoExtractor(InfoExtractor):
@property @property
def SEARCH_KEY(self): def SEARCH_KEY(self):
return self._SEARCH_KEY return self._SEARCH_KEY

View File

@ -28,16 +28,18 @@ class CondeNastIE(InfoExtractor):
'glamour': 'Glamour', 'glamour': 'Glamour',
'wmagazine': 'W Magazine', 'wmagazine': 'W Magazine',
'vanityfair': 'Vanity Fair', 'vanityfair': 'Vanity Fair',
'cnevids': 'Condé Nast',
} }
_VALID_URL = r'http://(video|www)\.(?P<site>%s)\.com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys()) _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
_TEST = { _TEST = {
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', 'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'file': '5171b343c2b4c00dd0c1ccb3.mp4',
'md5': '1921f713ed48aabd715691f774c451f7', 'md5': '1921f713ed48aabd715691f774c451f7',
'info_dict': { 'info_dict': {
'id': '5171b343c2b4c00dd0c1ccb3',
'ext': 'mp4',
'title': '3D Printed Speakers Lit With LED', 'title': '3D Printed Speakers Lit With LED',
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', 'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
} }
@ -55,12 +57,16 @@ class CondeNastIE(InfoExtractor):
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths] entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
return self.playlist_result(entries, playlist_title=title) return self.playlist_result(entries, playlist_title=title)
def _extract_video(self, webpage): def _extract_video(self, webpage, url_type):
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>', if url_type != 'embed':
r'<div class="video-post-content">(.+?)</div>', description = self._html_search_regex(
], [
webpage, 'description', r'<div class="cne-video-description">(.+?)</div>',
fatal=False, flags=re.DOTALL) r'<div class="video-post-content">(.+?)</div>',
],
webpage, 'description', fatal=False, flags=re.DOTALL)
else:
description = None
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage, params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
'player params', flags=re.DOTALL) 'player params', flags=re.DOTALL)
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id') video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
@ -99,12 +105,12 @@ class CondeNastIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
site = mobj.group('site') site = mobj.group('site')
url_type = mobj.group('type') url_type = mobj.group('type')
id = mobj.group('id') item_id = mobj.group('id')
self.to_screen(u'Extracting from %s with the Condé Nast extractor' % self._SITES[site]) self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, id) webpage = self._download_webpage(url, item_id)
if url_type == 'series': if url_type == 'series':
return self._extract_series(url, webpage) return self._extract_series(url, webpage)
else: else:
return self._extract_video(webpage) return self._extract_video(webpage, url_type)

View File

@ -8,12 +8,11 @@ from .subtitles import SubtitlesInfoExtractor
from ..utils import ( from ..utils import (
compat_urllib_request, compat_urllib_request,
compat_str, compat_str,
get_element_by_id,
orderedSet, orderedSet,
str_to_int, str_to_int,
int_or_none, int_or_none,
ExtractorError, ExtractorError,
unescapeHTML,
) )
class DailymotionBaseInfoExtractor(InfoExtractor): class DailymotionBaseInfoExtractor(InfoExtractor):
@ -189,7 +188,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
webpage = self._download_webpage(request, webpage = self._download_webpage(request,
id, u'Downloading page %s' % pagenum) id, u'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-id="(.+?)"', webpage)) video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None: if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break break
@ -218,9 +217,9 @@ class DailymotionUserIE(DailymotionPlaylistIE):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
user = mobj.group('user') user = mobj.group('user')
webpage = self._download_webpage(url, user) webpage = self._download_webpage(url, user)
full_user = self._html_search_regex( full_user = unescapeHTML(self._html_search_regex(
r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user), r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, u'user', flags=re.DOTALL) webpage, u'user', flags=re.DOTALL))
return { return {
'_type': 'playlist', '_type': 'playlist',

View File

@ -0,0 +1,54 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class EmpflixIE(InfoExtractor):
_VALID_URL = r'^https?://www\.empflix\.com/videos/.*?-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'name="title" value="(?P<title>[^"]*)"', webpage, 'title')
video_description = self._html_search_regex(
r'name="description" value="([^"]*)"', webpage, 'description', fatal=False)
cfg_url = self._html_search_regex(
r'flashvars\.config = escape\("([^"]+)"',
webpage, 'flashvars.config')
cfg_xml = self._download_xml(
cfg_url, video_id, note='Downloading metadata')
formats = [
{
'url': item.find('videoLink').text,
'format_id': item.find('res').text,
} for item in cfg_xml.findall('./quality/item')
]
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
'age_limit': age_limit,
}

View File

@ -1,4 +1,5 @@
import os from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
@ -8,18 +9,23 @@ from ..utils import (
compat_urllib_parse, compat_urllib_parse,
) )
class ExtremeTubeIE(InfoExtractor): class ExtremeTubeIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)' _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
_TEST = { _TESTS = [{
u'url': u'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
u'file': u'652431.mp4', 'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
u'md5': u'1fb9228f5e3332ec8c057d6ac36f33e0', 'info_dict': {
u'info_dict': { 'id': '652431',
u"title": u"Music Video 14 british euro brit european cumshots swallow", 'ext': 'mp4',
u"uploader": u"unknown", 'title': 'Music Video 14 british euro brit european cumshots swallow',
u"age_limit": 18, 'uploader': 'unknown',
'age_limit': 18,
} }
} }, {
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
'only_matching': True,
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -30,11 +36,14 @@ class ExtremeTubeIE(InfoExtractor):
req.add_header('Cookie', 'age_verified=1') req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id) webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, u'title') video_title = self._html_search_regex(
uploader = self._html_search_regex(r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, u'uploader', fatal=False) r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, 'title')
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&amp;', webpage, u'video_url')) uploader = self._html_search_regex(
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
fatal=False)
video_url = compat_urllib_parse.unquote(self._html_search_regex(
r'video_url=(.+?)&amp;', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2] format = path.split('/')[5].split('_')[:2]
format = "-".join(format) format = "-".join(format)
@ -43,7 +52,6 @@ class ExtremeTubeIE(InfoExtractor):
'title': video_title, 'title': video_title,
'uploader': uploader, 'uploader': uploader,
'url': video_url, 'url': video_url,
'ext': extension,
'format': format, 'format': format,
'format_id': format, 'format_id': format,
'age_limit': 18, 'age_limit': 18,

View File

@ -76,9 +76,8 @@ class FacebookIE(InfoExtractor):
check_form = { check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, 'nh'), 'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'),
'name_action_selected': 'dont_save', 'name_action_selected': 'dont_save',
'submit[Continue]': self._search_regex(r'<button[^>]+value="(.*?)"[^>]+name="submit\[Continue\]"', login_results, 'continue'),
} }
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')

View File

@ -0,0 +1,60 @@
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_request,
compat_urlparse,
)
class FC2IE(InfoExtractor):
_VALID_URL = r'^http://video\.fc2\.com/(?P<lang>[^/]+)/content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_TEST = {
'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
'md5': 'a6ebe8ebe0396518689d963774a54eb7',
'info_dict': {
'id': '20121103kUan1KHs',
'ext': 'flv',
'title': 'Boxing again with Puff',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
self._downloader.cookiejar.clear_session_cookies() # must clear
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
refer = url.replace('/content/', '/a/content/')
mimi = hashlib.md5(video_id + '_gGddgPfeaf_gzyr').hexdigest()
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.','%2E')))
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
info = compat_urlparse.parse_qs(info_webpage)
if 'err_code' in info:
raise ExtractorError('Error code: %s' % info['err_code'][0])
video_url = info['filepath'][0] + '?mid=' + info['mid'][0]
return {
'id': video_id,
'title': info['title'][0],
'url': video_url,
'ext': 'flv',
'thumbnail': thumbnail,
}

View File

@ -6,6 +6,7 @@ from .common import InfoExtractor
from ..utils import ( from ..utils import (
compat_str, compat_str,
compat_urllib_parse, compat_urllib_parse,
ExtractorError,
) )
@ -58,9 +59,17 @@ class FiveMinIE(InfoExtractor):
'isPlayerSeed': 'true', 'isPlayerSeed': 'true',
'url': embed_url, 'url': embed_url,
}) })
info = self._download_json( response = self._download_json(
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query, 'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
video_id)['binding'][0] video_id)
if not response['success']:
err_msg = response['errorMessage']
if err_msg == 'ErrorVideoUserNotGeo':
msg = 'Video not available from your location'
else:
msg = 'Aol said: %s' % err_msg
raise ExtractorError(msg, expected=True, video_id=video_id)
info = response['binding'][0]
second_id = compat_str(int(video_id[:-2]) + 1) second_id = compat_str(int(video_id[:-2]) + 1)
formats = [] formats = []

View File

@ -48,24 +48,36 @@ class PluzzIE(FranceTVBaseInfoExtractor):
class FranceTvInfoIE(FranceTVBaseInfoExtractor): class FranceTvInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr' IE_NAME = 'francetvinfo.fr'
_VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+)\.html' _VALID_URL = r'https?://www\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
_TEST = { _TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html', 'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'file': '84981923.mp4',
'info_dict': { 'info_dict': {
'id': '84981923',
'ext': 'mp4',
'title': 'Soir 3', 'title': 'Soir 3',
}, },
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
} }, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
'info_dict': {
'id': 'EV_20019',
'ext': 'mp4',
'title': 'Débat des candidats à la Commission européenne',
'description': 'Débat des candidats à la Commission européenne',
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
}
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title') page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title) webpage = self._download_webpage(url, page_title)
video_id = self._search_regex(r'id-video=(\d+?)[@"]', webpage, 'video id') video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
return self._extract_video(video_id) return self._extract_video(video_id)

View File

@ -4,22 +4,32 @@ import json
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError
class FunnyOrDieIE(InfoExtractor): class FunnyOrDieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])' _VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
_TEST = { _TESTS = [{
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version', 'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
'file': '0732f586d7.mp4', 'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
'md5': 'f647e9e90064b53b6e046e75d0241fbd',
'info_dict': { 'info_dict': {
'description': ('Lyrics changed to match the video. Spoken cameo ' 'id': '0732f586d7',
'by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a ' 'ext': 'mp4',
'concept by Dustin McLean (DustFilms.com). Performed, edited, '
'and written by David A. Scott.'),
'title': 'Heart-Shaped Box: Literal Video Version', 'title': 'Heart-Shaped Box: Literal Video Version',
'description': 'md5:ea09a01bc9a1c46d9ab696c01747c338',
'thumbnail': 're:^http:.*\.jpg$',
}, },
} }, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'md5': 'ff4d83318f89776ed0250634cfaa8d36',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',
'title': 'Please Use This Song (Jon Lajoie)',
'description': 'md5:2ed27d364f5a805a6dba199faaf6681d',
'thumbnail': 're:^http:.*\.jpg$',
},
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -27,27 +37,34 @@ class FunnyOrDieIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
video_url = self._search_regex( links = re.findall(r'<source src="([^"]+/v)\d+\.([^"]+)" type=\'video', webpage)
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], if not links:
webpage, 'video URL', flags=re.DOTALL) raise ExtractorError('No media links available for %s' % video_id)
if mobj.group('type') == 'embed': links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
post_json = self._search_regex(
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details') bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates')
post = json.loads(post_json) bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]
title = post['name'] bitrates.sort()
description = post.get('description')
thumbnail = post.get('picture') formats = []
else:
title = self._og_search_title(webpage) for bitrate in bitrates:
description = self._og_search_description(webpage) for link in links:
thumbnail = None formats.append({
'url': '%s%d.%s' % (link[0], bitrate, link[1]),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
post_json = self._search_regex(
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
post = json.loads(post_json)
return { return {
'id': video_id, 'id': video_id,
'url': video_url, 'title': post['name'],
'ext': 'mp4', 'description': post.get('description'),
'title': title, 'thumbnail': post.get('picture'),
'description': description, 'formats': formats,
'thumbnail': thumbnail,
} }

View File

@ -15,7 +15,7 @@ class GamekingsIE(InfoExtractor):
'id': '20130811', 'id': '20130811',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review', 'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
'description': 'md5:632e61a9f97d700e83f43d77ddafb6a4', 'description': 'md5:36fd701e57e8c15ac8682a2374c99731',
} }
} }

View File

@ -15,11 +15,12 @@ from ..utils import (
class GameSpotIE(InfoExtractor): class GameSpotIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?' _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
_TEST = { _TEST = {
"url": "http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", 'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
"file": "gs-2300-6410818.mp4", 'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
"md5": "b2a30deaa8654fcccd43713a6b6a4825", 'info_dict': {
"info_dict": { 'id': 'gs-2300-6410818',
"title": "Arma 3 - Community Guide: SITREP I", 'ext': 'mp4',
'title': 'Arma 3 - Community Guide: SITREP I',
'description': 'Check out this video where some of the basics of Arma 3 is explained.', 'description': 'Check out this video where some of the basics of Arma 3 is explained.',
} }
} }

View File

@ -239,6 +239,28 @@ class GenericIE(InfoExtractor):
'uploader_id': 'rbctv_2012_4', 'uploader_id': 'rbctv_2012_4',
}, },
}, },
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Hondas New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'uploader': 'Spi0n',
},
'add_ie': ['Dailymotion'],
}
] ]
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
@ -323,6 +345,12 @@ class GenericIE(InfoExtractor):
} }
def _real_extract(self, url): def _real_extract(self, url):
if url.startswith('//'):
return {
'_type': 'url',
'url': self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url) parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme: if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search') default_search = self._downloader.params.get('default_search')
@ -335,8 +363,13 @@ class GenericIE(InfoExtractor):
return self.url_result('http://' + url) return self.url_result('http://' + url)
else: else:
if default_search == 'auto_warning': if default_search == 'auto_warning':
self._downloader.report_warning( if re.match(r'^(?:url|URL)$', url):
'Falling back to youtube search for %s . Set --default-search to "auto" to suppress this warning.' % url) raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search to "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url) return self.url_result('ytsearch:' + url)
else: else:
assert ':' in default_search assert ':' in default_search
@ -459,7 +492,7 @@ class GenericIE(InfoExtractor):
matches = re.findall( matches = re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage) r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/embed/video/.+?)\1', webpage)
if matches: if matches:
urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Dailymotion') urlrs = [self.url_result(unescapeHTML(tuppl[1]))
for tuppl in matches] for tuppl in matches]
return self.playlist_result( return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title) urlrs, playlist_id=video_id, playlist_title=video_title)
@ -485,6 +518,22 @@ class GenericIE(InfoExtractor):
if mobj: if mobj:
return self.url_result(mobj.group(1), 'BlipTV') return self.url_result(mobj.group(1), 'BlipTV')
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage)
if matches:
return {
'_type': 'playlist',
'entries': [{
'_type': 'url',
'ie_key': 'CondeNast',
'url': ma,
} for ma in matches],
'title': video_title,
'id': video_id,
}
# Look for Bandcamp pages with custom domain # Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage) mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None: if mobj is not None:
@ -505,7 +554,7 @@ class GenericIE(InfoExtractor):
return OoyalaIE._build_url_result(mobj.group('ec')) return OoyalaIE._build_url_result(mobj.group('ec'))
# Look for Aparat videos # Look for Aparat videos
mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage) mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None: if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat') return self.url_result(mobj.group(1), 'Aparat')
@ -516,7 +565,7 @@ class GenericIE(InfoExtractor):
# Look for embedded NovaMov-based player # Look for embedded NovaMov-based player
mobj = re.search( mobj = re.search(
r'''(?x)<iframe[^>]+?src=(["\']) r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)? (?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com| (?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)| nowvideo\.(?:ch|sx|eu|at|ag|co)|
@ -589,65 +638,86 @@ class GenericIE(InfoExtractor):
if smotri_url: if smotri_url:
return self.url_result(smotri_url, 'Smotri') return self.url_result(smotri_url, 'Smotri')
# Start with something easy: JW Player in SWFObject # Look for embeded soundcloud player
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) mobj = re.search(
if mobj is None: r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
# Look for gorilla-vid style embedding webpage)
mobj = re.search(r'(?s)(?:jw_plugins|JWPlayerOptions).*?file\s*:\s*["\'](.*?)["\']', webpage) if mobj is not None:
if mobj is None: url = unescapeHTML(mobj.group('url'))
# Broaden the search a little bit return self.url_result(url)
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if mobj is None:
# Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
if mobj is None: # Start with something easy: JW Player in SWFObject
found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if not found:
# Look for gorilla-vid style embedding
found = re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?file\s*:\s*["\'](.*?)["\']''', webpage)
if not found:
# Broaden the search a little bit
found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
if not found:
# Try to find twitter cards info # Try to find twitter cards info
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage) found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
if mobj is None: if not found:
# We look for Open Graph info: # We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am) # We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage) m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player: # We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None: if m_video_type is not None:
mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage) found = re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
if mobj is None: if not found:
# HTML5 video # HTML5 video
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL) found = re.findall(r'(?s)<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage)
if mobj is None: if not found:
mobj = re.search( found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")' r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"', r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
webpage) webpage)
if mobj: if found:
new_url = mobj.group(1) new_url = found.group(1)
self.report_following_redirect(new_url) self.report_following_redirect(new_url)
return { return {
'_type': 'url', '_type': 'url',
'url': new_url, 'url': new_url,
} }
if mobj is None: if not found:
raise ExtractorError('Unsupported URL: %s' % url) raise ExtractorError('Unsupported URL: %s' % url)
# It's possible that one of the regexes entries = []
# matched, but returned an empty group: for video_url in found:
if mobj.group(1) is None: video_url = compat_urlparse.urljoin(url, video_url)
raise ExtractorError('Did not find a valid video URL at %s' % url) video_id = compat_urllib_parse.unquote(os.path.basename(video_url))
video_url = mobj.group(1) # Sometimes, jwplayer extraction will result in a YouTube URL
video_url = compat_urlparse.urljoin(url, video_url) if YoutubeIE.suitable(video_url):
video_id = compat_urllib_parse.unquote(os.path.basename(video_url)) entries.append(self.url_result(video_url, 'Youtube'))
continue
# Sometimes, jwplayer extraction will result in a YouTube URL # here's a fun little line of code for you:
if YoutubeIE.suitable(video_url): video_id = os.path.splitext(video_id)[0]
return self.url_result(video_url, 'Youtube')
# here's a fun little line of code for you: entries.append({
video_id = os.path.splitext(video_id)[0] 'id': video_id,
'url': video_url,
'uploader': video_uploader,
'title': video_title,
})
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
return {
'id': video_id,
'url': video_url,
'uploader': video_uploader,
'title': video_title,
}

View File

@ -0,0 +1,42 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class HentaiStigmaIE(InfoExtractor):
_VALID_URL = r'^https?://hentai\.animestigma\.com/(?P<id>[^/]+)'
_TEST = {
'url': 'http://hentai.animestigma.com/inyouchuu-etsu-bonus/',
'md5': '4e3d07422a68a4cc363d8f57c8bf0d23',
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
"title": "Inyouchuu Etsu Bonus",
"age_limit": 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h2 class="posttitle"><a[^>]*>([^<]+)</a>',
webpage, 'title')
wrap_url = self._html_search_regex(
r'<iframe src="([^"]+mp4)"', webpage, 'wrapper url')
wrap_webpage = self._download_webpage(wrap_url, video_id)
video_url = self._html_search_regex(
r'clip:\s*{\s*url: "([^"]*)"', wrap_webpage, 'video url')
return {
'id': video_id,
'url': video_url,
'title': title,
'age_limit': 18,
}

View File

@ -5,8 +5,8 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
class StatigramIE(InfoExtractor): class IconosquareIE(InfoExtractor):
_VALID_URL = r'https?://(www\.)?statigr\.am/p/(?P<id>[^/]+)' _VALID_URL = r'https?://(www\.)?(?:iconosquare\.com|statigr\.am)/p/(?P<id>[^/]+)'
_TEST = { _TEST = {
'url': 'http://statigr.am/p/522207370455279102_24101272', 'url': 'http://statigr.am/p/522207370455279102_24101272',
'md5': '6eb93b882a3ded7c378ee1d6884b1814', 'md5': '6eb93b882a3ded7c378ee1d6884b1814',
@ -15,6 +15,7 @@ class StatigramIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'uploader_id': 'aguynamedpatrick', 'uploader_id': 'aguynamedpatrick',
'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)', 'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)',
'description': 'md5:644406a9ec27457ed7aa7a9ebcd4ce3d',
}, },
} }
@ -25,7 +26,7 @@ class StatigramIE(InfoExtractor):
html_title = self._html_search_regex( html_title = self._html_search_regex(
r'<title>(.+?)</title>', r'<title>(.+?)</title>',
webpage, 'title') webpage, 'title')
title = re.sub(r'(?: *\(Videos?\))? \| Statigram$', '', html_title) title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
uploader_id = self._html_search_regex( uploader_id = self._html_search_regex(
r'@([^ ]+)', title, 'uploader name', fatal=False) r'@([^ ]+)', title, 'uploader name', fatal=False)
@ -33,6 +34,7 @@ class StatigramIE(InfoExtractor):
'id': video_id, 'id': video_id,
'url': self._og_search_video_url(webpage), 'url': self._og_search_video_url(webpage),
'title': title, 'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage), 'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': uploader_id 'uploader_id': uploader_id
} }

View File

@ -106,7 +106,7 @@ class OneUPIE(IGNIE):
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>' _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
_TEST = { _TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976', 'url': 'http://gamevideos.1up.com/video/id/34976',
'md5': '68a54ce4ebc772e4b71e3123d413163d', 'md5': '68a54ce4ebc772e4b71e3123d413163d',
'info_dict': { 'info_dict': {
@ -115,10 +115,7 @@ class OneUPIE(IGNIE):
'title': 'Sniper Elite V2 - Trailer', 'title': 'Sniper Elite V2 - Trailer',
'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf', 'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf',
} }
} }]
# Override IGN tests
_TESTS = []
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)

View File

@ -11,16 +11,15 @@ from ..utils import (
class InfoQIE(InfoExtractor): class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/[^/]+/(?P<id>[^/]+)$' _VALID_URL = r'https?://(?:www\.)?infoq\.com/[^/]+/(?P<id>[^/]+)$'
_TEST = { _TEST = {
"name": "InfoQ", 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things", 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
"file": "12-jan-pythonthings.mp4", 'info_dict': {
"info_dict": { 'id': '12-jan-pythonthings',
"description": "Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.", 'ext': 'mp4',
"title": "A Few of My Favorite [Python] Things", 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
}, 'title': 'A Few of My Favorite [Python] Things',
"params": {
"skip_download": True,
}, },
} }
@ -30,26 +29,39 @@ class InfoQIE(InfoExtractor):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL # Extract video URL
encoded_id = self._search_regex(r"jsclassref ?= ?'([^']*)'", webpage, 'encoded id') encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse.unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8')) real_id = compat_urllib_parse.unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id playpath = 'mp4:' + real_id
# Extract title video_filename = playpath.split('/')[-1]
video_title = self._search_regex(r'contentTitle = "(.*?)";',
webpage, 'title')
# Extract description
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
webpage, 'description', fatal=False)
video_filename = video_url.split('/')[-1]
video_id, extension = video_filename.split('.') video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*"(https?://[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': http_base + real_id,
}]
self._sort_formats(formats)
return { return {
'id': video_id, 'id': video_id,
'url': video_url,
'title': video_title, 'title': video_title,
'ext': extension, # Extension is always(?) mp4, but seems to be flv
'description': video_description, 'description': video_description,
'formats': formats,
} }

View File

@ -14,7 +14,7 @@ class JukeboxIE(InfoExtractor):
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html' _VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
_TEST = { _TEST = {
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html', 'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
'md5': '5dc6477e74b1e37042ac5acedd8413e5', 'md5': '1574e9b4d6438446d5b7dbcdf2786276',
'info_dict': { 'info_dict': {
'id': 'r303r', 'id': 'r303r',
'ext': 'flv', 'ext': 'flv',

View File

@ -2,7 +2,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
import datetime
from .common import InfoExtractor from .common import InfoExtractor
@ -19,6 +18,7 @@ class MailRuIE(InfoExtractor):
'id': '46301138', 'id': '46301138',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро', 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
'timestamp': 1393232740,
'upload_date': '20140224', 'upload_date': '20140224',
'uploader': 'sonypicturesrus', 'uploader': 'sonypicturesrus',
'uploader_id': 'sonypicturesrus@mail.ru', 'uploader_id': 'sonypicturesrus@mail.ru',
@ -43,7 +43,6 @@ class MailRuIE(InfoExtractor):
thumbnail = movie['poster'] thumbnail = movie['poster']
duration = movie['duration'] duration = movie['duration']
upload_date = datetime.datetime.fromtimestamp(video_data['timestamp']).strftime('%Y%m%d')
view_count = video_data['views_count'] view_count = video_data['views_count']
formats = [ formats = [
@ -57,7 +56,7 @@ class MailRuIE(InfoExtractor):
'id': content_id, 'id': content_id,
'title': title, 'title': title,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'upload_date': upload_date, 'timestamp': video_data['timestamp'],
'uploader': uploader, 'uploader': uploader,
'uploader_id': uploader_id, 'uploader_id': uploader_id,
'duration': duration, 'duration': duration,

View File

@ -1,15 +1,18 @@
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class MDRIE(InfoExtractor): class MDRIE(InfoExtractor):
_VALID_URL = r'^(?P<domain>(?:https?://)?(?:www\.)?mdr\.de)/mediathek/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)_.*' _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
# No tests, MDR regularily deletes its videos # No tests, MDR regularily deletes its videos
_TEST = {
'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True,
}
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url) m = re.match(self._VALID_URL, url)
@ -19,9 +22,9 @@ class MDRIE(InfoExtractor):
# determine title and media streams from webpage # determine title and media streams from webpage
html = self._download_webpage(url, video_id) html = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h2>(.*?)</h2>', html, u'title') title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title')
xmlurl = self._search_regex( xmlurl = self._search_regex(
r'(/mediathek/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, u'XML URL') r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL')
doc = self._download_xml(domain + xmlurl, video_id) doc = self._download_xml(domain + xmlurl, video_id)
formats = [] formats = []
@ -41,7 +44,7 @@ class MDRIE(InfoExtractor):
if vbr_el is None: if vbr_el is None:
format.update({ format.update({
'vcodec': 'none', 'vcodec': 'none',
'format_id': u'%s-%d' % (media_type, abr), 'format_id': '%s-%d' % (media_type, abr),
}) })
else: else:
vbr = int(vbr_el.text) // 1000 vbr = int(vbr_el.text) // 1000
@ -49,12 +52,9 @@ class MDRIE(InfoExtractor):
'vbr': vbr, 'vbr': vbr,
'width': int(a.find('frameWidth').text), 'width': int(a.find('frameWidth').text),
'height': int(a.find('frameHeight').text), 'height': int(a.find('frameHeight').text),
'format_id': u'%s-%d' % (media_type, vbr), 'format_id': '%s-%d' % (media_type, vbr),
}) })
formats.append(format) formats.append(format)
if not formats:
raise ExtractorError(u'Could not find any valid formats')
self._sort_formats(formats) self._sort_formats(formats)
return { return {

View File

@ -4,9 +4,10 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
unified_strdate,
compat_urllib_parse, compat_urllib_parse,
ExtractorError, ExtractorError,
int_or_none,
parse_iso8601,
) )
@ -24,6 +25,10 @@ class MixcloudIE(InfoExtractor):
'uploader': 'Daniel Holbach', 'uploader': 'Daniel Holbach',
'uploader_id': 'dholbach', 'uploader_id': 'dholbach',
'upload_date': '20111115', 'upload_date': '20111115',
'timestamp': 1321359578,
'thumbnail': 're:https?://.*\.jpg',
'view_count': int,
'like_count': int,
}, },
} }
@ -51,10 +56,6 @@ class MixcloudIE(InfoExtractor):
webpage = self._download_webpage(url, track_id) webpage = self._download_webpage(url, track_id)
api_url = 'http://api.mixcloud.com/%s/%s/' % (uploader, cloudcast_name)
info = self._download_json(
api_url, track_id, 'Downloading cloudcast info')
preview_url = self._search_regex( preview_url = self._search_regex(
r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url') r'\s(?:data-preview-url|m-preview)="(.+?)"', webpage, 'preview url')
song_url = preview_url.replace('/previews/', '/c/originals/') song_url = preview_url.replace('/previews/', '/c/originals/')
@ -65,16 +66,41 @@ class MixcloudIE(InfoExtractor):
template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/') template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
final_song_url = self._get_url(template_url) final_song_url = self._get_url(template_url)
if final_song_url is None: if final_song_url is None:
raise ExtractorError(u'Unable to extract track url') raise ExtractorError('Unable to extract track url')
PREFIX = (
r'<div class="cloudcast-play-button-container"'
r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
title = self._html_search_regex(
PREFIX + r'm-title="([^"]+)"', webpage, 'title')
thumbnail = self._proto_relative_url(self._html_search_regex(
PREFIX + r'm-thumbnail-url="([^"]+)"', webpage, 'thumbnail',
fatal=False))
uploader = self._html_search_regex(
PREFIX + r'm-owner-name="([^"]+)"',
webpage, 'uploader', fatal=False)
uploader_id = self._search_regex(
r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False)
description = self._og_search_description(webpage)
like_count = int_or_none(self._search_regex(
r'<meta itemprop="interactionCount" content="UserLikes:([0-9]+)"',
webpage, 'like count', fatal=False))
view_count = int_or_none(self._search_regex(
r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',
webpage, 'play count', fatal=False))
timestamp = parse_iso8601(self._search_regex(
r'<time itemprop="dateCreated" datetime="([^"]+)">',
webpage, 'upload date'))
return { return {
'id': track_id, 'id': track_id,
'title': info['name'], 'title': title,
'url': final_song_url, 'url': final_song_url,
'description': info.get('description'), 'description': description,
'thumbnail': info['pictures'].get('extra_large'), 'thumbnail': thumbnail,
'uploader': info['user']['name'], 'uploader': uploader,
'uploader_id': info['user']['username'], 'uploader_id': uploader_id,
'upload_date': unified_strdate(info['created_time']), 'timestamp': timestamp,
'view_count': info['play_count'], 'view_count': view_count,
'like_count': like_count,
} }

View File

@ -0,0 +1,45 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MoviezineIE(InfoExtractor):
_VALID_URL = r'https?://www\.moviezine\.se/video/(?P<id>[^?#]+)'
_TEST = {
'url': 'http://www.moviezine.se/video/205866',
'info_dict': {
'id': '205866',
'ext': 'mp4',
'title': 'Oculus - Trailer 1',
'description': 'md5:40cc6790fc81d931850ca9249b40e8a4',
'thumbnail': 're:http://.*\.jpg',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
jsplayer = self._download_webpage('http://www.moviezine.se/api/player.js?video=%s' % video_id, video_id, 'Downloading js api player')
formats =[{
'format_id': 'sd',
'url': self._html_search_regex(r'file: "(.+?)",', jsplayer, 'file'),
'quality': 0,
'ext': 'mp4',
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._search_regex(r'title: "(.+?)",', jsplayer, 'title'),
'thumbnail': self._search_regex(r'image: "(.+?)",', jsplayer, 'image'),
'formats': formats,
'description': self._og_search_description(webpage),
}

View File

@ -4,7 +4,11 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError from ..utils import (
ExtractorError,
int_or_none,
qualities,
)
class NDRIE(InfoExtractor): class NDRIE(InfoExtractor):
@ -45,17 +49,16 @@ class NDRIE(InfoExtractor):
page = self._download_webpage(url, video_id, 'Downloading page') page = self._download_webpage(url, video_id, 'Downloading page')
title = self._og_search_title(page) title = self._og_search_title(page).strip()
description = self._og_search_description(page) description = self._og_search_description(page)
if description:
description = description.strip()
mobj = re.search( duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', fatal=False))
r'<div class="duration"><span class="min">(?P<minutes>\d+)</span>:<span class="sec">(?P<seconds>\d+)</span></div>',
page)
duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
formats = [] formats = []
mp3_url = re.search(r'''{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page) mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
if mp3_url: if mp3_url:
formats.append({ formats.append({
'url': mp3_url.group('audio'), 'url': mp3_url.group('audio'),
@ -64,13 +67,15 @@ class NDRIE(InfoExtractor):
thumbnail = None thumbnail = None
video_url = re.search(r'''3: {src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page) video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
if video_url: if video_url:
thumbnail = self._html_search_regex(r'(?m)title: "NDR PLAYER",\s*poster: "([^"]+)",', thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
page, 'thumbnail', fatal=False) if thumbnails:
if thumbnail: quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
thumbnail = 'http://www.ndr.de' + thumbnail largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
for format_id in ['lo', 'hi', 'hq']: thumbnail = 'http://www.ndr.de' + largest[0]
for format_id in 'lo', 'hi', 'hq':
formats.append({ formats.append({
'url': '%s.%s.mp4' % (video_url.group('video'), format_id), 'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
'format_id': format_id, 'format_id': format_id,

View File

@ -0,0 +1,87 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
'url': 'http://newstube.ru/media/na-korable-progress-prodolzhaetsya-testirovanie-sistemy-kurs',
'info_dict': {
'id': 'd156a237-a6e9-4111-a682-039995f721f1',
'ext': 'flv',
'title': 'На корабле «Прогресс» продолжается тестирование системы «Курс»',
'description': 'md5:d0cbe7b4a6f600552617e48548d5dc77',
'duration': 20.04,
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
video_guid = self._html_search_regex(
r'<meta property="og:video" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
page, 'video GUID')
player = self._download_xml(
'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid),
video_guid, 'Downloading player XML')
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
description = self._og_search_description(page)
thumbnail = media_info.find(ns('./KeyFrame')).text
duration = int(media_info.find(ns('./Duration')).text) / 1000.0
formats = []
for stream_info in media_info.findall(ns('./Streams/StreamInfo')):
media_location = stream_info.find(ns('./MediaLocation'))
if media_location is None:
continue
server = media_location.find(ns('./Server')).text
app = media_location.find(ns('./App')).text
media_id = stream_info.find(ns('./Id')).text
quality_id = stream_info.find(ns('./QualityId')).text
name = stream_info.find(ns('./Name')).text
width = int(stream_info.find(ns('./Width')).text)
height = int(stream_info.find(ns('./Height')).text)
formats.append({
'url': 'rtmp://%s/%s' % (server, app),
'app': app,
'play_path': '01/%s' % video_guid.upper(),
'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'],
'page_url': url,
'ext': 'flv',
'format_id': quality_id,
'format_note': name,
'width': width,
'height': height,
})
self._sort_formats(formats)
return {
'id': video_guid,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}

View File

@ -73,14 +73,16 @@ class NFBIE(InfoExtractor):
title = media.find('title').text title = media.find('title').text
description = media.find('description').text description = media.find('description').text
# It seems assets always go from lower to better quality, so no need to sort # It seems assets always go from lower to better quality, so no need to sort
formats = [{ for asset in media.findall('assets/asset'):
'url': x.find('default/streamerURI').text, for x in asset:
'app': x.find('default/streamerURI').text.split('/', 3)[3], formats.append({
'play_path': x.find('default/url').text, 'url': x.find('streamerURI').text,
'rtmp_live': False, 'app': x.find('streamerURI').text.split('/', 3)[3],
'ext': 'mp4', 'play_path': x.find('url').text,
'format_id': x.get('quality'), 'rtmp_live': False,
} for x in media.findall('assets/asset')] 'ext': 'mp4',
'format_id': '%s-%s' % (x.tag, asset.get('quality')),
})
return { return {
'id': video_id, 'id': video_id,

View File

@ -47,7 +47,7 @@ class NineGagIE(InfoExtractor):
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
post_view = json.loads(self._html_search_regex( post_view = json.loads(self._html_search_regex(
r'var postView = new app\.PostView\({ post: ({.+?}),', webpage, 'post view')) r'var postView = new app\.PostView\({\s*post:\s*({.+?}),', webpage, 'post view'))
youtube_id = post_view['videoExternalId'] youtube_id = post_view['videoExternalId']
title = post_view['title'] title = post_view['title']

View File

@ -0,0 +1,106 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
compat_str,
)
class NocoIE(InfoExtractor):
_VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
_TEST = {
'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/',
'md5': '0a993f0058ddbcd902630b2047ef710e',
'info_dict': {
'id': '11538',
'ext': 'mp4',
'title': 'Ami Ami Idol - Hello! France',
'description': 'md5:4eaab46ab68fa4197a317a88a53d3b86',
'upload_date': '20140412',
'uploader': 'Nolife',
'uploader_id': 'NOL',
'duration': 2851.2,
},
'skip': 'Requires noco account',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
medias = self._download_json(
'http://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
formats = []
for fmt in medias['fr']['video_list']['default']['quality_list']:
format_id = fmt['quality_key']
file = self._download_json(
'http://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
video_id, 'Downloading %s video JSON' % format_id)
file_url = file['file']
if not file_url:
continue
if file_url == 'forbidden':
raise ExtractorError(
'%s returned error: %s - %s' % (
self.IE_NAME, file['popmessage']['title'], file['popmessage']['message']),
expected=True)
formats.append({
'url': file_url,
'format_id': format_id,
'width': fmt['res_width'],
'height': fmt['res_lines'],
'abr': fmt['audiobitrate'],
'vbr': fmt['videobitrate'],
'filesize': fmt['filesize'],
'format_note': fmt['quality_name'],
'preference': fmt['priority'],
})
self._sort_formats(formats)
show = self._download_json(
'http://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
upload_date = unified_strdate(show['indexed'])
uploader = show['partner_name']
uploader_id = show['partner_key']
duration = show['duration_ms'] / 1000.0
thumbnail = show['screenshot']
episode = show.get('show_TT') or show.get('show_OT')
family = show.get('family_TT') or show.get('family_OT')
episode_number = show.get('episode_number')
title = ''
if family:
title += family
if episode_number:
title += ' #' + compat_str(episode_number)
if episode:
title += ' - ' + episode
description = show.get('show_resume') or show.get('family_resume')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'formats': formats,
}

View File

@ -4,9 +4,7 @@ import re
from .brightcove import BrightcoveIE from .brightcove import BrightcoveIE
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import ExtractorError
ExtractorError,
)
class NownessIE(InfoExtractor): class NownessIE(InfoExtractor):
@ -14,9 +12,10 @@ class NownessIE(InfoExtractor):
_TEST = { _TEST = {
'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation', 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
'file': '2520295746001.mp4', 'md5': '068bc0202558c2e391924cb8cc470676',
'md5': '0ece2f70a7bd252c7b00f3070182d418',
'info_dict': { 'info_dict': {
'id': '2520295746001',
'ext': 'mp4',
'description': 'Candor: The Art of Gesticulation', 'description': 'Candor: The Art of Gesticulation',
'uploader': 'Nowness', 'uploader': 'Nowness',
'title': 'Candor: The Art of Gesticulation', 'title': 'Candor: The Art of Gesticulation',

145
youtube_dl/extractor/nrk.py Normal file
View File

@ -0,0 +1,145 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
unified_strdate,
)
class NRKIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?nrk\.no/(?:video|lyd)/[^/]+/(?P<id>[\dA-F]{16})'
_TESTS = [
{
'url': 'http://www.nrk.no/video/dompap_og_andre_fugler_i_piip_show/D0FA54B5C8B6CE59/emne/piipshow/',
'md5': 'a6eac35052f3b242bb6bb7f43aed5886',
'info_dict': {
'id': '150533',
'ext': 'flv',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f'
}
},
{
'url': 'http://www.nrk.no/lyd/lyd_av_oppleser_for_blinde/AEFDDD5473BA0198/',
'md5': '3471f2a51718195164e88f46bf427668',
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id)
video_id = self._html_search_regex(r'<div class="nrk-video" data-nrk-id="(\d+)">', page, 'video id')
data = self._download_json(
'http://v7.psapi.nrk.no/mediaelement/%s' % video_id, video_id, 'Downloading media JSON')
if data['usageRights']['isGeoBlocked']:
raise ExtractorError('NRK har ikke rettig-heter til å vise dette programmet utenfor Norge', expected=True)
video_url = data['mediaUrl'] + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124'
images = data.get('images')
if images:
thumbnails = images['webImages']
thumbnails.sort(key=lambda image: image['pixelWidth'])
thumbnail = thumbnails[-1]['imageUrl']
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': data['title'],
'description': data['description'],
'thumbnail': thumbnail,
}
class NRKTVIE(InfoExtractor):
_VALID_URL = r'http://tv\.nrk\.no/(?:serie/[^/]+|program)/(?P<id>[a-z]{4}\d{8})'
_TESTS = [
{
'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/muhh48000314/23-05-2014',
'md5': '7b96112fbae1faf09a6f9ae1aff6cb84',
'info_dict': {
'id': 'muhh48000314',
'ext': 'flv',
'title': '20 spørsmål',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'upload_date': '20140523',
'duration': 1741.52,
}
},
{
'url': 'http://tv.nrk.no/program/mdfp15000514',
'md5': '383650ece2b25ecec996ad7b5bb2a384',
'info_dict': {
'id': 'mdfp15000514',
'ext': 'flv',
'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
'upload_date': '20140524',
'duration': 4605.0,
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id)
title = self._html_search_meta('title', page, 'title')
description = self._html_search_meta('description', page, 'description')
thumbnail = self._html_search_regex(r'data-posterimage="([^"]+)"', page, 'thumbnail', fatal=False)
upload_date = unified_strdate(self._html_search_meta('rightsfrom', page, 'upload date', fatal=False))
duration = self._html_search_regex(r'data-duration="([^"]+)"', page, 'duration', fatal=False)
if duration:
duration = float(duration)
formats = []
f4m_url = re.search(r'data-media="([^"]+)"', page)
if f4m_url:
formats.append({
'url': f4m_url.group(1) + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
'format_id': 'f4m',
'ext': 'flv',
})
m3u8_url = re.search(r'data-hls-media="([^"]+)"', page)
if m3u8_url:
formats.append({
'url': m3u8_url.group(1),
'format_id': 'm3u8',
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}

View File

@ -24,9 +24,9 @@ class NTVIE(InfoExtractor):
'duration': 136, 'duration': 136,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
{ {
'url': 'http://www.ntv.ru/video/novosti/750370/', 'url': 'http://www.ntv.ru/video/novosti/750370/',
@ -38,9 +38,9 @@ class NTVIE(InfoExtractor):
'duration': 172, 'duration': 172,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
{ {
'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416', 'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416',
@ -52,9 +52,9 @@ class NTVIE(InfoExtractor):
'duration': 1496, 'duration': 1496,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
{ {
'url': 'http://www.ntv.ru/kino/Koma_film', 'url': 'http://www.ntv.ru/kino/Koma_film',
@ -66,9 +66,9 @@ class NTVIE(InfoExtractor):
'duration': 5592, 'duration': 5592,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
{ {
'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/', 'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/',
@ -80,33 +80,25 @@ class NTVIE(InfoExtractor):
'duration': 2590, 'duration': 2590,
}, },
'params': { 'params': {
# rtmp download # rtmp download
'skip_download': True, 'skip_download': True,
}, },
}, },
] ]
_VIDEO_ID_REGEXES = [ _VIDEO_ID_REGEXES = [
r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)', r'<meta property="og:url" content="http://www\.ntv\.ru/video/(\d+)',
r'<video embed=[^>]+><id>(\d+)</id>', r'<video embed=[^>]+><id>(\d+)</id>',
r'<video restriction[^>]+><key>(\d+)</key>' r'<video restriction[^>]+><key>(\d+)</key>',
] ]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page') page = self._download_webpage(url, video_id)
for pattern in self._VIDEO_ID_REGEXES: video_id = self._html_search_regex(self._VIDEO_ID_REGEXES, page, 'video id')
mobj = re.search(pattern, page)
if mobj:
break
if not mobj:
raise ExtractorError('No media links available for %s' % video_id)
video_id = mobj.group(1)
player = self._download_xml('http://www.ntv.ru/vi%s/' % video_id, video_id, 'Downloading video XML') player = self._download_xml('http://www.ntv.ru/vi%s/' % video_id, video_id, 'Downloading video XML')
title = unescapeHTML(player.find('./data/title').text) title = unescapeHTML(player.find('./data/title').text)
@ -124,7 +116,7 @@ class NTVIE(InfoExtractor):
'7': 'video2', '7': 'video2',
} }
app = apps[puid22] if puid22 in apps else apps['4'] app = apps.get(puid22, apps['4'])
formats = [] formats = []
for format_id in ['', 'hi', 'webm']: for format_id in ['', 'hi', 'webm']:

View File

@ -0,0 +1,48 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class NuvidIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://m.nuvid.com/video/1310741/',
'md5': 'eab207b7ac4fccfb4e23c86201f11277',
'info_dict': {
'id': '1310741',
'ext': 'mp4',
"title": "Horny babes show their awesome bodeis and",
"age_limit": 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
murl = url.replace('://www.', '://m.')
webpage = self._download_webpage(murl, video_id)
title = self._html_search_regex(
r'<div class="title">\s+<h2[^>]*>([^<]+)</h2>',
webpage, 'title').strip()
url_end = self._html_search_regex(
r'href="(/[^"]+)"[^>]*data-link_type="mp4"',
webpage, 'video_url')
video_url = 'http://m.nuvid.com' + url_end
thumbnail = self._html_search_regex(
r'href="(/thumbs/[^"]+)"[^>]*data-link_type="thumbs"',
webpage, 'thumbnail URL', fatal=False)
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
'age_limit': 18,
}

View File

@ -0,0 +1,77 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_iso8601
class NYTimesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nytimes\.com/video/(?:[^/]+/)+(?P<id>\d+)'
_TEST = {
'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
'md5': '18a525a510f942ada2720db5f31644c0',
'info_dict': {
'id': '100000002847155',
'ext': 'mov',
'title': 'Verbatim: What Is a Photocopier?',
'description': 'md5:93603dada88ddbda9395632fdc5da260',
'timestamp': 1398631707,
'upload_date': '20140427',
'uploader': 'Brett Weiner',
'duration': 419,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_data = self._download_json(
'http://www.nytimes.com/svc/video/api/v2/video/%s' % video_id, video_id, 'Downloading video JSON')
title = video_data['headline']
description = video_data['summary']
duration = video_data['duration'] / 1000.0
uploader = video_data['byline']
timestamp = parse_iso8601(video_data['publication_date'][:-8])
def get_file_size(file_size):
if isinstance(file_size, int):
return file_size
elif isinstance(file_size, dict):
return int(file_size.get('value', 0))
else:
return 0
formats = [
{
'url': video['url'],
'format_id': video['type'],
'vcodec': video['video_codec'],
'width': video['width'],
'height': video['height'],
'filesize': get_file_size(video['fileSize']),
} for video in video_data['renditions']
]
self._sort_formats(formats)
thumbnails = [
{
'url': 'http://www.nytimes.com/%s' % image['url'],
'resolution': '%dx%d' % (image['width'], image['height']),
} for image in video_data['images']
]
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'uploader': uploader,
'duration': duration,
'formats': formats,
'thumbnails': thumbnails,
}

View File

@ -1,10 +1,10 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import json import json
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import compat_urllib_parse
class PhotobucketIE(InfoExtractor): class PhotobucketIE(InfoExtractor):
@ -14,6 +14,7 @@ class PhotobucketIE(InfoExtractor):
'file': 'zpsc0c3b9fa.mp4', 'file': 'zpsc0c3b9fa.mp4',
'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99', 'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
'info_dict': { 'info_dict': {
'timestamp': 1367669341,
'upload_date': '20130504', 'upload_date': '20130504',
'uploader': 'rachaneronas', 'uploader': 'rachaneronas',
'title': 'Tired of Link Building? Try BacklinkMyDomain.com!', 'title': 'Tired of Link Building? Try BacklinkMyDomain.com!',
@ -32,11 +33,12 @@ class PhotobucketIE(InfoExtractor):
info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);', info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
webpage, 'info json') webpage, 'info json')
info = json.loads(info_json) info = json.loads(info_json)
url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
return { return {
'id': video_id, 'id': video_id,
'url': info['downloadUrl'], 'url': url,
'uploader': info['username'], 'uploader': info['username'],
'upload_date': datetime.date.fromtimestamp(info['creationDate']).strftime('%Y%m%d'), 'timestamp': info['creationDate'],
'title': info['title'], 'title': info['title'],
'ext': video_extension, 'ext': video_extension,
'thumbnail': info['thumbUrl'], 'thumbnail': info['thumbUrl'],

View File

@ -6,22 +6,36 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import int_or_none from ..utils import int_or_none
class PodomaticIE(InfoExtractor): class PodomaticIE(InfoExtractor):
IE_NAME = 'podomatic' IE_NAME = 'podomatic'
_VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)' _VALID_URL = r'^(?P<proto>https?)://(?P<channel>[^.]+)\.podomatic\.com/entry/(?P<id>[^?]+)'
_TEST = { _TESTS = [
"url": "http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00", {
"file": "2009-01-02T16_03_35-08_00.mp3", 'url': 'http://scienceteachingtips.podomatic.com/entry/2009-01-02T16_03_35-08_00',
"md5": "84bb855fcf3429e6bf72460e1eed782d", 'md5': '84bb855fcf3429e6bf72460e1eed782d',
"info_dict": { 'info_dict': {
"uploader": "Science Teaching Tips", 'id': '2009-01-02T16_03_35-08_00',
"uploader_id": "scienceteachingtips", 'ext': 'mp3',
"title": "64. When the Moon Hits Your Eye", 'uploader': 'Science Teaching Tips',
"duration": 446, 'uploader_id': 'scienceteachingtips',
} 'title': '64. When the Moon Hits Your Eye',
} 'duration': 446,
}
},
{
'url': 'http://ostbahnhof.podomatic.com/entry/2013-11-15T16_31_21-08_00',
'md5': 'd2cf443931b6148e27638650e2638297',
'info_dict': {
'id': '2013-11-15T16_31_21-08_00',
'ext': 'mp3',
'uploader': 'Ostbahnhof / Techno Mix',
'uploader_id': 'ostbahnhof',
'title': 'Einunddreizig',
'duration': 3799,
}
},
]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -32,10 +46,12 @@ class PodomaticIE(InfoExtractor):
'?permalink=true&rtmp=0') % '?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id)) (mobj.group('proto'), channel, video_id))
data_json = self._download_webpage( data_json = self._download_webpage(
json_url, video_id, note=u'Downloading video info') json_url, video_id, 'Downloading video info')
data = json.loads(data_json) data = json.loads(data_json)
video_url = data['downloadLink'] video_url = data['downloadLink']
if not video_url:
video_url = '%s/%s' % (data['streamer'].replace('rtmp', 'http'), data['mediaLocation'])
uploader = data['podcast'] uploader = data['podcast']
title = data['title'] title = data['title']
thumbnail = data['imageLocation'] thumbnail = data['imageLocation']

View File

@ -45,7 +45,7 @@ class PornHubIE(InfoExtractor):
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title') video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
video_uploader = self._html_search_regex( video_uploader = self._html_search_regex(
r'(?s)<div class="video-info-row">\s*From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<', r'(?s)From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False) webpage, 'uploader', fatal=False)
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False) thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
if thumbnail: if thumbnail:

View File

@ -8,8 +8,6 @@ from .common import InfoExtractor
from ..utils import ( from ..utils import (
compat_urllib_parse, compat_urllib_parse,
unified_strdate, unified_strdate,
clean_html,
RegexNotFoundError,
) )
@ -188,16 +186,7 @@ class ProSiebenSat1IE(InfoExtractor):
page = self._download_webpage(url, video_id, 'Downloading page') page = self._download_webpage(url, video_id, 'Downloading page')
def extract(patterns, name, page, fatal=False): clip_id = self._html_search_regex(self._CLIPID_REGEXES, page, 'clip id')
for pattern in patterns:
mobj = re.search(pattern, page)
if mobj:
return clean_html(mobj.group(1))
if fatal:
raise RegexNotFoundError(u'Unable to extract %s' % name)
return None
clip_id = extract(self._CLIPID_REGEXES, 'clip id', page, fatal=True)
access_token = 'testclient' access_token = 'testclient'
client_name = 'kolibri-1.2.5' client_name = 'kolibri-1.2.5'
@ -246,13 +235,12 @@ class ProSiebenSat1IE(InfoExtractor):
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON') urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
title = extract(self._TITLE_REGEXES, 'title', page, fatal=True) title = self._html_search_regex(self._TITLE_REGEXES, page, 'title')
description = extract(self._DESCRIPTION_REGEXES, 'description', page) description = self._html_search_regex(self._DESCRIPTION_REGEXES, page, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(page) thumbnail = self._og_search_thumbnail(page)
upload_date = extract(self._UPLOAD_DATE_REGEXES, 'upload date', page) upload_date = unified_strdate(self._html_search_regex(
if upload_date: self._UPLOAD_DATE_REGEXES, page, 'upload date', fatal=False))
upload_date = unified_strdate(upload_date)
formats = [] formats = []

View File

@ -0,0 +1,49 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
class RTBFIE(InfoExtractor):
_VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)'
_TEST = {
'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274',
'md5': '799f334ddf2c0a582ba80c44655be570',
'info_dict': {
'id': '1921274',
'ext': 'mp4',
'title': 'Les Diables au coeur (épisode 2)',
'description': 'Football - Diables Rouges',
'duration': 3099,
'timestamp': 1398456336,
'upload_date': '20140425',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage('https://www.rtbf.be/video/embed?id=%s' % video_id, video_id)
data = json.loads(self._html_search_regex(
r'<div class="js-player-embed" data-video="([^"]+)"', page, 'data video'))['data']
video_url = data.get('downloadUrl') or data.get('url')
if data['provider'].lower() == 'youtube':
return self.url_result(video_url, 'Youtube')
return {
'id': video_id,
'url': video_url,
'title': data['title'],
'description': data.get('description') or data.get('subtitle'),
'thumbnail': data['thumbnail']['large'],
'duration': data.get('duration') or data.get('realDuration'),
'timestamp': data['created'],
'view_count': data['viewCount'],
}

View File

@ -43,13 +43,14 @@ class RutubeIE(InfoExtractor):
'http://rutube.ru/api/video/%s/?format=json' % video_id, 'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON') video_id, 'Downloading video JSON')
trackinfo = self._download_json(
'http://rutube.ru/api/play/trackinfo/%s/?format=json' % video_id,
video_id, 'Downloading trackinfo JSON')
# Some videos don't have the author field # Some videos don't have the author field
author = trackinfo.get('author') or {} author = video.get('author') or {}
m3u8_url = trackinfo['video_balancer'].get('m3u8')
options = self._download_json(
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
m3u8_url = options['video_balancer'].get('m3u8')
if m3u8_url is None: if m3u8_url is None:
raise ExtractorError('Couldn\'t find m3u8 manifest url') raise ExtractorError('Couldn\'t find m3u8 manifest url')

View File

@ -12,7 +12,12 @@ from ..utils import (
class RUTVIE(InfoExtractor): class RUTVIE(InfoExtractor):
IE_DESC = 'RUTV.RU' IE_DESC = 'RUTV.RU'
_VALID_URL = r'https?://player\.(?:rutv\.ru|vgtrk\.com)/(?:flash2v/container\.swf\?id=|iframe/(?P<type>swf|video|live)/id/)(?P<id>\d+)' _VALID_URL = r'''(?x)
https?://player\.(?:rutv\.ru|vgtrk\.com)/
(?P<path>flash2v/container\.swf\?id=
|iframe/(?P<type>swf|video|live)/id/
|index/iframe/cast_id/)
(?P<id>\d+)'''
_TESTS = [ _TESTS = [
{ {
@ -90,7 +95,7 @@ class RUTVIE(InfoExtractor):
@classmethod @classmethod
def _extract_url(cls, webpage): def _extract_url(cls, webpage):
mobj = re.search( mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.rutv\.ru/iframe/(?:swf|video|live)/id/.+?)\1', webpage) r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.rutv\.ru/(?:iframe/(?:swf|video|live)/id|index/iframe/cast_id)/.+?)\1', webpage)
if mobj: if mobj:
return mobj.group('url') return mobj.group('url')
@ -103,10 +108,16 @@ class RUTVIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
video_type = mobj.group('type') video_path = mobj.group('path')
if not video_type or video_type == 'swf': if video_path.startswith('flash2v'):
video_type = 'video' video_type = 'video'
elif video_path.startswith('iframe'):
video_type = mobj.group('type')
if video_type == 'swf':
video_type = 'video'
elif video_path.startswith('index/iframe/cast_id'):
video_type = 'live'
json_data = self._download_json( json_data = self._download_json(
'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if video_type == 'live' else '', video_id), 'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if video_type == 'live' else '', video_id),

View File

@ -0,0 +1,56 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class SciVeeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)'
_TEST = {
'url': 'http://www.scivee.tv/node/62352',
'md5': 'b16699b74c9e6a120f6772a44960304f',
'info_dict': {
'id': '62352',
'ext': 'mp4',
'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting',
'description': 'md5:81f1710638e11a481358fab1b11059d7',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# annotations XML is malformed
annotations = self._download_webpage(
'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations')
title = self._html_search_regex(r'<title>([^<]+)</title>', annotations, 'title')
description = self._html_search_regex(r'<abstract>([^<]+)</abstract>', annotations, 'abstract', fatal=False)
filesize = int_or_none(self._html_search_regex(
r'<filesize>([^<]+)</filesize>', annotations, 'filesize', fatal=False))
formats = [
{
'url': 'http://www.scivee.tv/assets/audio/%s' % video_id,
'ext': 'mp3',
'format_id': 'audio',
},
{
'url': 'http://www.scivee.tv/assets/video/%s' % video_id,
'ext': 'mp4',
'format_id': 'video',
'filesize': filesize,
},
]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
'formats': formats,
}

View File

@ -0,0 +1,47 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class SlutloadIE(InfoExtractor):
_VALID_URL = r'^https?://(?:\w+\.)?slutload\.com/video/[^/]+/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/',
'md5': '0cf531ae8006b530bd9df947a6a0df77',
'info_dict': {
'id': 'TD73btpBqSxc',
'ext': 'mp4',
"title": "virginie baisee en cam",
"age_limit": 18,
'thumbnail': 're:https?://.*?\.jpg'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>',
webpage, 'title').strip()
video_url = self._html_search_regex(
r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"',
webpage, 'video URL')
thumbnail = self._html_search_regex(
r'(?s)<div id="vidPlayer"\s+.*?previewer-file="([^"]+)"',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'thumbnail': thumbnail,
'age_limit': 18
}

View File

@ -25,7 +25,7 @@ class SoundcloudIE(InfoExtractor):
of the stream token and uid of the stream token and uid
""" """
_VALID_URL = r'''^(?:https?://)? _VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/ (?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/ (?P<uploader>[\w\d-]+)/
(?!sets/)(?P<title>[\w\d-]+)/? (?!sets/)(?P<title>[\w\d-]+)/?
@ -94,10 +94,6 @@ class SoundcloudIE(InfoExtractor):
_CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28' _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28'
_IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf' _IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'
@classmethod
def suitable(cls, url):
return re.match(cls._VALID_URL, url, flags=re.VERBOSE) is not None
def report_resolve(self, video_id): def report_resolve(self, video_id):
"""Report information extraction.""" """Report information extraction."""
self.to_screen('%s: Resolving id' % video_id) self.to_screen('%s: Resolving id' % video_id)
@ -141,11 +137,10 @@ class SoundcloudIE(InfoExtractor):
# We have to retrieve the url # We have to retrieve the url
streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?' streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token)) 'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
stream_json = self._download_webpage( format_dict = self._download_json(
streams_url, streams_url,
track_id, 'Downloading track url') track_id, 'Downloading track url')
format_dict = json.loads(stream_json)
for key, stream_url in format_dict.items(): for key, stream_url in format_dict.items():
if key.startswith('http'): if key.startswith('http'):
formats.append({ formats.append({
@ -198,7 +193,7 @@ class SoundcloudIE(InfoExtractor):
full_title = track_id full_title = track_id
elif mobj.group('player'): elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
return self.url_result(query['url'][0], ie='Soundcloud') return self.url_result(query['url'][0])
else: else:
# extract uploader (which is in the url) # extract uploader (which is in the url)
uploader = mobj.group('uploader') uploader = mobj.group('uploader')
@ -213,11 +208,11 @@ class SoundcloudIE(InfoExtractor):
url = 'http://soundcloud.com/%s' % resolve_title url = 'http://soundcloud.com/%s' % resolve_title
info_json_url = self._resolv_url(url) info_json_url = self._resolv_url(url)
info_json = self._download_webpage(info_json_url, full_title, 'Downloading info JSON') info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
info = json.loads(info_json)
return self._extract_info_dict(info, full_title, secret_token=token) return self._extract_info_dict(info, full_title, secret_token=token)
class SoundcloudSetIE(SoundcloudIE): class SoundcloudSetIE(SoundcloudIE):
_VALID_URL = r'https?://(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)' _VALID_URL = r'https?://(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
IE_NAME = 'soundcloud:set' IE_NAME = 'soundcloud:set'
@ -232,16 +227,15 @@ class SoundcloudSetIE(SoundcloudIE):
# extract uploader (which is in the url) # extract uploader (which is in the url)
uploader = mobj.group(1) uploader = mobj.group(1)
# extract simple title (uploader + slug of song title) # extract simple title (uploader + slug of song title)
slug_title = mobj.group(2) slug_title = mobj.group(2)
full_title = '%s/sets/%s' % (uploader, slug_title) full_title = '%s/sets/%s' % (uploader, slug_title)
self.report_resolve(full_title) self.report_resolve(full_title)
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title) url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
resolv_url = self._resolv_url(url) resolv_url = self._resolv_url(url)
info_json = self._download_webpage(resolv_url, full_title) info = self._download_json(resolv_url, full_title)
info = json.loads(info_json)
if 'errors' in info: if 'errors' in info:
for err in info['errors']: for err in info['errors']:
self._downloader.report_error('unable to download video webpage: %s' % compat_str(err['error_message'])) self._downloader.report_error('unable to download video webpage: %s' % compat_str(err['error_message']))
@ -268,26 +262,55 @@ class SoundcloudUserIE(SoundcloudIE):
url = 'http://soundcloud.com/%s/' % uploader url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url) resolv_url = self._resolv_url(url)
user_json = self._download_webpage(resolv_url, uploader, user = self._download_json(
'Downloading user info') resolv_url, uploader, 'Downloading user info')
user = json.loads(user_json) base_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % uploader
tracks = [] entries = []
for i in itertools.count(): for i in itertools.count():
data = compat_urllib_parse.urlencode({'offset': i*50, data = compat_urllib_parse.urlencode({
'client_id': self._CLIENT_ID, 'offset': i * 50,
}) 'client_id': self._CLIENT_ID,
tracks_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % user['id'] + data })
response = self._download_webpage(tracks_url, uploader, new_entries = self._download_json(
'Downloading tracks page %s' % (i+1)) base_url + data, uploader, 'Downloading track page %s' % (i + 1))
new_tracks = json.loads(response) entries.extend(self._extract_info_dict(e, quiet=True) for e in new_entries)
tracks.extend(self._extract_info_dict(track, quiet=True) for track in new_tracks) if len(new_entries) < 50:
if len(new_tracks) < 50:
break break
return { return {
'_type': 'playlist', '_type': 'playlist',
'id': compat_str(user['id']), 'id': compat_str(user['id']),
'title': user['username'], 'title': user['username'],
'entries': tracks, 'entries': entries,
}
class SoundcloudPlaylistIE(SoundcloudIE):
_VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)'
IE_NAME = 'soundcloud:playlist'
# it's in tests/test_playlists.py
_TESTS = []
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id)
data = compat_urllib_parse.urlencode({
'client_id': self._CLIENT_ID,
})
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
entries = [
self._extract_info_dict(t, quiet=True) for t in data['tracks']]
return {
'_type': 'playlist',
'id': playlist_id,
'title': data.get('title'),
'description': data.get('description'),
'entries': entries,
} }

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
@ -8,78 +10,114 @@ from ..utils import (
class SteamIE(InfoExtractor): class SteamIE(InfoExtractor):
_VALID_URL = r"""http://store\.steampowered\.com/ _VALID_URL = r"""(?x)
(agecheck/)? https?://store\.steampowered\.com/
(?P<urltype>video|app)/ #If the page is only for videos or for a game (agecheck/)?
(?P<gameID>\d+)/? (?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID (?P<gameID>\d+)/?
""" (?P<videoID>\d*)(?P<extra>\??) # For urltype == video we sometimes get the videoID
|
https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P<fileID>[0-9]+)
"""
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/' _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970' _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TEST = { _TESTS = [{
u"url": u"http://store.steampowered.com/video/105600/", "url": "http://store.steampowered.com/video/105600/",
u"playlist": [ "playlist": [
{ {
u"file": u"81300.flv", "md5": "f870007cee7065d7c76b88f0a45ecc07",
u"md5": u"f870007cee7065d7c76b88f0a45ecc07", "info_dict": {
u"info_dict": { 'id': '81300',
u"title": u"Terraria 1.1 Trailer", 'ext': 'flv',
u'playlist_index': 1, "title": "Terraria 1.1 Trailer",
'playlist_index': 1,
} }
}, },
{ {
u"file": u"80859.flv", "md5": "61aaf31a5c5c3041afb58fb83cbb5751",
u"md5": u"61aaf31a5c5c3041afb58fb83cbb5751", "info_dict": {
u"info_dict": { 'id': '80859',
u"title": u"Terraria Trailer", 'ext': 'flv',
u'playlist_index': 2, "title": "Terraria Trailer",
'playlist_index': 2,
} }
} }
] ],
} 'params': {
'playlistend': 2,
}
@classmethod }, {
def suitable(cls, url): 'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205',
"""Receives a URL and returns True if suitable for this IE.""" 'info_dict': {
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None 'id': 'WB5DvDOOvAY',
'ext': 'mp4',
'upload_date': '20140329',
'title': 'FRONTIERS - Final Greenlight Trailer',
'description': "The final trailer for the Steam Greenlight launch. Hooray, progress! Here's the official Greenlight page: http://steamcommunity.com/sharedfiles/filedetails/?id=242472205",
'uploader': 'AAD Productions',
'uploader_id': 'AtomicAgeDogGames',
}
}]
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE) m = re.match(self._VALID_URL, url)
gameID = m.group('gameID') fileID = m.group('fileID')
if fileID:
videourl = self._VIDEO_PAGE_TEMPLATE % gameID videourl = url
webpage = self._download_webpage(videourl, gameID) playlist_id = fileID
else:
gameID = m.group('gameID')
playlist_id = gameID
videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id
webpage = self._download_webpage(videourl, playlist_id)
if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None: if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
videourl = self._AGECHECK_TEMPLATE % gameID videourl = self._AGECHECK_TEMPLATE % playlist_id
self.report_age_confirmation() self.report_age_confirmation()
webpage = self._download_webpage(videourl, gameID) webpage = self._download_webpage(videourl, playlist_id)
self.report_extraction(gameID) if fileID:
game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>', playlist_title = self._html_search_regex(
webpage, 'game title') r'<div class="workshopItemTitle">(.+)</div>', webpage, 'title')
mweb = re.finditer(r'''(?x)
'movie_(?P<videoID>[0-9]+)':\s*\{\s*
YOUTUBE_VIDEO_ID:\s*"(?P<youtube_id>[^"]+)",
''', webpage)
videos = [{
'_type': 'url',
'url': vid.group('youtube_id'),
'ie_key': 'Youtube',
} for vid in mweb]
else:
playlist_title = self._html_search_regex(
r'<h2 class="pageheader">(.*?)</h2>', webpage, 'game title')
urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\}," mweb = re.finditer(r'''(?x)
mweb = re.finditer(urlRE, webpage) 'movie_(?P<videoID>[0-9]+)':\s*\{\s*
namesRE = r'<span class="title">(?P<videoName>.+?)</span>' FILENAME:\s*"(?P<videoURL>[\w:/\.\?=]+)"
titles = re.finditer(namesRE, webpage) (,\s*MOVIE_NAME:\s*\"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},
thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">' ''', webpage)
thumbs = re.finditer(thumbsRE, webpage) titles = re.finditer(
videos = [] r'<span class="title">(?P<videoName>.+?)</span>', webpage)
for vid,vtitle,thumb in zip(mweb,titles,thumbs): thumbs = re.finditer(
video_id = vid.group('videoID') r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">', webpage)
title = vtitle.group('videoName') videos = []
video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail') for vid, vtitle, thumb in zip(mweb, titles, thumbs):
if not video_url: video_id = vid.group('videoID')
raise ExtractorError(u'Cannot find video url for %s' % video_id) title = vtitle.group('videoName')
info = { video_url = vid.group('videoURL')
'id':video_id, video_thumb = thumb.group('thumbnail')
'url':video_url, if not video_url:
'ext': 'flv', raise ExtractorError('Cannot find video url for %s' % video_id)
'title': unescapeHTML(title), videos.append({
'thumbnail': video_thumb 'id': video_id,
} 'url': video_url,
videos.append(info) 'ext': 'flv',
return [self.playlist_result(videos, gameID, game_title)] 'title': unescapeHTML(title),
'thumbnail': video_thumb
})
if not videos:
raise ExtractorError('Could not find any videos')
return self.playlist_result(videos, playlist_id, playlist_title)

View File

@ -5,13 +5,16 @@ import re
import json import json
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import int_or_none from ..utils import (
int_or_none,
compat_str,
)
class StreamCZIE(InfoExtractor): class StreamCZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<videoid>.+)' _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<videoid>.+)'
_TEST = { _TESTS = [{
'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti', 'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti',
'md5': '6d3ca61a8d0633c9c542b92fcb936b0c', 'md5': '6d3ca61a8d0633c9c542b92fcb936b0c',
'info_dict': { 'info_dict': {
@ -22,7 +25,18 @@ class StreamCZIE(InfoExtractor):
'thumbnail': 'http://im.stream.cz/episode/52961d7e19d423f8f06f0100', 'thumbnail': 'http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
'duration': 256, 'duration': 256,
}, },
} }, {
'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka',
'md5': '246272e753e26bbace7fcd9deca0650c',
'info_dict': {
'id': '10002447',
'ext': 'mp4',
'title': 'Kancelář Blaník: Tři roky pro Mazánka',
'description': 'md5:9177695a8b756a0a8ab160de4043b392',
'thumbnail': 'http://im.stream.cz/episode/537f838c50c11f8d21320000',
'duration': 368,
},
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
@ -57,7 +71,7 @@ class StreamCZIE(InfoExtractor):
self._sort_formats(formats) self._sort_formats(formats)
return { return {
'id': str(jsonData['id']), 'id': compat_str(jsonData['episode_id']),
'title': self._og_search_title(webpage), 'title': self._og_search_title(webpage),
'thumbnail': jsonData['episode_image_original_url'].replace('//', 'http://'), 'thumbnail': jsonData['episode_image_original_url'].replace('//', 'http://'),
'formats': formats, 'formats': formats,

View File

@ -0,0 +1,104 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class SWRMediathekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
'md5': '8c5f6f0172753368547ca8413a7768ac',
'info_dict': {
'id': '849790d0-dab8-11e3-a953-0026b975f2e6',
'ext': 'mp4',
'title': 'SWR odysso',
'description': 'md5:2012e31baad36162e97ce9eb3f157b8a',
'thumbnail': 're:^http:.*\.jpg$',
'duration': 2602,
'upload_date': '20140515',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'md5': 'b10ab854f912eecc5a6b55cd6fc1f545',
'info_dict': {
'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'ext': 'mp4',
'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen',
'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2',
'thumbnail': 're:http://.*\.jpg',
'duration': 5305,
'upload_date': '20140516',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'md5': '4382e4ef2c9d7ce6852535fa867a0dd3',
'info_dict': {
'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'ext': 'mp3',
'title': 'Saša Stanišic: Vor dem Fest',
'description': 'md5:5b792387dc3fbb171eb709060654e8c9',
'thumbnail': 're:http://.*\.jpg',
'duration': 3366,
'upload_date': '20140520',
'uploader': 'SWR 2',
'uploader_id': '284670',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON')
attr = video['attr']
media_type = attr['entry_etype']
formats = []
for entry in video['sub']:
if entry['name'] != 'entry_media':
continue
entry_attr = entry['attr']
codec = entry_attr['val0']
quality = int(entry_attr['val1'])
fmt = {
'url': entry_attr['val2'],
'quality': quality,
}
if media_type == 'Video':
fmt.update({
'format_note': ['144p', '288p', '544p'][quality-1],
'vcodec': codec,
})
elif media_type == 'Audio':
fmt.update({
'acodec': codec,
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': attr['entry_title'],
'description': attr['entry_descl'],
'thumbnail': attr['entry_image_16_9'],
'duration': parse_duration(attr['entry_durat']),
'upload_date': attr['entry_pdatet'][:-4],
'uploader': attr['channel_title'],
'uploader_id': attr['channel_idkey'],
'formats': formats,
}

View File

@ -6,9 +6,9 @@ from .common import InfoExtractor
class SyfyIE(InfoExtractor): class SyfyIE(InfoExtractor):
_VALID_URL = r'https?://www\.syfy\.com/videos/.+?vid:(?P<id>\d+)' _VALID_URL = r'https?://www\.syfy\.com/(?:videos/.+?vid:(?P<id>[0-9]+)|(?!videos)(?P<video_name>[^/]+)(?:$|[?#]))'
_TEST = { _TESTS = [{
'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458', 'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
'md5': 'e07de1d52c7278adbb9b9b1c93a66849', 'md5': 'e07de1d52c7278adbb9b9b1c93a66849',
'info_dict': { 'info_dict': {
@ -18,10 +18,30 @@ class SyfyIE(InfoExtractor):
'description': 'Listen to what insights George Lucas give his daughter Amanda.', 'description': 'Listen to what insights George Lucas give his daughter Amanda.',
}, },
'add_ie': ['ThePlatform'], 'add_ie': ['ThePlatform'],
} }, {
'url': 'http://www.syfy.com/wilwheaton',
'md5': '94dfa54ee3ccb63295b276da08c415f6',
'info_dict': {
'id': '4yoffOOXC767',
'ext': 'flv',
'title': 'The Wil Wheaton Project - Premiering May 27th at 10/9c.',
'description': 'The Wil Wheaton Project premieres May 27th at 10/9c. Don\'t miss it.',
},
'add_ie': ['ThePlatform'],
'skip': 'Blocked outside the US',
}]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_name = mobj.group('video_name')
if video_name:
generic_webpage = self._download_webpage(url, video_name)
video_id = self._search_regex(
r'<iframe.*?class="video_iframe_page"\s+src="/_utils/video/thP_video_controller.php.*?_vid([0-9]+)">',
generic_webpage, 'video ID')
url = 'http://www.syfy.com/videos/%s/%s/vid:%s' % (
video_name, video_name, video_id)
else:
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
return self.url_result(self._og_search_video_url(webpage)) return self.url_result(self._og_search_video_url(webpage))

View File

@ -3,9 +3,6 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class TeamcocoIE(InfoExtractor): class TeamcocoIE(InfoExtractor):

View File

@ -51,16 +51,13 @@ class TEDIE(SubtitlesInfoExtractor):
} }
}, { }, {
'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best', 'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
'md5': '49144e345a899b8cb34d315f3b9cfeeb',
'info_dict': { 'info_dict': {
'id': '1972', 'id': '1972',
'ext': 'flv', 'ext': 'mp4',
'title': 'Be passionate. Be courageous. Be your best.', 'title': 'Be passionate. Be courageous. Be your best.',
'uploader': 'Gabby Giffords and Mark Kelly', 'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:d89e1d8ebafdac8e55df4c219ecdbfe9', 'description': 'md5:5174aed4d0f16021b704120360f72b92',
},
'params': {
# rtmp download
'skip_download': True,
}, },
}] }]
@ -97,7 +94,7 @@ class TEDIE(SubtitlesInfoExtractor):
playlist_info = info['playlist'] playlist_info = info['playlist']
playlist_entries = [ playlist_entries = [
self.url_result(u'http://www.ted.com/talks/' + talk['slug'], self.ie_key()) self.url_result('http://www.ted.com/talks/' + talk['slug'], self.ie_key())
for talk in info['talks'] for talk in info['talks']
] ]
return self.playlist_result( return self.playlist_result(
@ -163,7 +160,7 @@ class TEDIE(SubtitlesInfoExtractor):
sub_lang_list[l] = url sub_lang_list[l] = url
return sub_lang_list return sub_lang_list
else: else:
self._downloader.report_warning(u'video doesn\'t have subtitles') self._downloader.report_warning('video doesn\'t have subtitles')
return {} return {}
def _watch_info(self, url, name): def _watch_info(self, url, name):
@ -178,7 +175,10 @@ class TEDIE(SubtitlesInfoExtractor):
title = self._html_search_regex( title = self._html_search_regex(
r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title') r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title')
description = self._html_search_regex( description = self._html_search_regex(
r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>', [
r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>',
r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>',
],
webpage, 'description', fatal=False) webpage, 'description', fatal=False)
return { return {

View File

@ -52,7 +52,7 @@ class ThePlatformIE(InfoExtractor):
head = meta.find(_x('smil:head')) head = meta.find(_x('smil:head'))
body = meta.find(_x('smil:body')) body = meta.find(_x('smil:body'))
f4m_node = body.find(_x('smil:seq/smil:video')) f4m_node = body.find(_x('smil:seq//smil:video'))
if f4m_node is not None: if f4m_node is not None:
f4m_url = f4m_node.attrib['src'] f4m_url = f4m_node.attrib['src']
if 'manifest.f4m?' not in f4m_url: if 'manifest.f4m?' not in f4m_url:

View File

@ -0,0 +1,60 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveIE
from .discovery import DiscoveryIE
class TlcIE(DiscoveryIE):
IE_NAME = 'tlc.com'
_VALID_URL = r'http://www\.tlc\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
_TEST = {
'url': 'http://www.tlc.com/tv-shows/cake-boss/videos/too-big-to-fly.htm',
'md5': 'c4038f4a9b44d0b5d74caaa64ed2a01a',
'info_dict': {
'id': '853232',
'ext': 'mp4',
'title': 'Cake Boss: Too Big to Fly',
'description': 'Buddy has taken on a high flying task.',
'duration': 119,
},
}
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'http://www\.tlc\.de/sendungen/[^/]+/videos/(?P<title>[^/?]+)'
_TEST = {
'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001',
'info_dict': {
'id': '3235167922001',
'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen',
'uploader': 'Discovery Networks - Germany',
'description': 'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
iframe_url = self._search_regex(
'<iframe src="(http://www\.tlc\.de/wp-content/.+?)"', webpage,
'iframe url')
# Otherwise we don't get the correct 'BrightcoveExperience' element,
# example: http://www.tlc.de/sendungen/cake-boss/videos/cake-boss-cannoli-drama/
iframe_url = iframe_url.replace('.htm?', '.php?')
iframe = self._download_webpage(iframe_url, title)
return {
'_type': 'url',
'url': BrightcoveIE._extract_brightcove_url(iframe),
'ie': BrightcoveIE.ie_key(),
}

View File

@ -134,7 +134,13 @@ class VevoIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
video_info = self._download_json(json_url, video_id)['video'] response = self._download_json(json_url, video_id)
video_info = response['video']
if not video_info:
if 'statusMessage' in response:
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True)
raise ExtractorError('Unable to extract videos')
formats = self._formats_from_json(video_info) formats = self._formats_from_json(video_info)

View File

@ -0,0 +1,58 @@
from __future__ import unicode_literals
import re
import base64
from .common import InfoExtractor
from ..utils import unified_strdate
class VideoTtIE(InfoExtractor):
ID_NAME = 'video.tt'
IE_DESC = 'video.tt - Your True Tube'
_VALID_URL = r'http://(?:www\.)?video\.tt/(?:video/|watch_video\.php\?v=)(?P<id>[\da-zA-Z]{9})'
_TEST = {
'url': 'http://www.video.tt/watch_video.php?v=amd5YujV8',
'md5': 'b13aa9e2f267effb5d1094443dff65ba',
'info_dict': {
'id': 'amd5YujV8',
'ext': 'flv',
'title': 'Motivational video Change your mind in just 2.50 mins',
'description': '',
'upload_date': '20130827',
'uploader': 'joseph313',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
settings = self._download_json(
'http://www.video.tt/player_control/settings.php?v=%s' % video_id, video_id,
'Downloading video JSON')['settings']
video = settings['video_details']['video']
formats = [
{
'url': base64.b64decode(res['u']).decode('utf-8'),
'ext': 'flv',
'format_id': res['l'],
} for res in settings['res'] if res['u']
]
return {
'id': video_id,
'title': video['title'],
'description': video['description'],
'thumbnail': settings['config']['thumbnail'],
'upload_date': unified_strdate(video['added']),
'uploader': video['owner'],
'view_count': int(video['view_count']),
'comment_count': int(video['comment_count']),
'like_count': int(video['liked']),
'dislike_count': int(video['disliked']),
'formats': formats,
}

View File

@ -17,10 +17,39 @@ from ..utils import (
RegexNotFoundError, RegexNotFoundError,
std_headers, std_headers,
unsmuggle_url, unsmuggle_url,
urlencode_postdata,
int_or_none,
) )
class VimeoIE(SubtitlesInfoExtractor): class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
login_url = 'https://vimeo.com/log_in'
webpage = self._download_webpage(login_url, None, False)
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
data = urlencode_postdata({
'email': username,
'password': password,
'action': 'login',
'service': 'vimeo',
'token': token,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Cookie', 'xsrft=%s' % token)
self._download_webpage(login_request, None, False, 'Wrong login info')
class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information extractor for vimeo.com.""" """Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs # _VALID_URL matches Vimeo URLs
@ -33,7 +62,6 @@ class VimeoIE(SubtitlesInfoExtractor):
(?:videos?/)? (?:videos?/)?
(?P<id>[0-9]+) (?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$''' /?(?:[?&].*)?(?:[#].*)?$'''
_NETRC_MACHINE = 'vimeo'
IE_NAME = 'vimeo' IE_NAME = 'vimeo'
_TESTS = [ _TESTS = [
{ {
@ -47,40 +75,47 @@ class VimeoIE(SubtitlesInfoExtractor):
"uploader_id": "user7108434", "uploader_id": "user7108434",
"uploader": "Filippo Valsorda", "uploader": "Filippo Valsorda",
"title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
"duration": 10,
}, },
}, },
{ {
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876', 'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'file': '68093876.mp4',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82', 'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)', 'note': 'Vimeo Pro video (#1197)',
'info_dict': { 'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_id': 'openstreetmapus', 'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US', 'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography', 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'duration': 1595,
}, },
}, },
{ {
'url': 'http://player.vimeo.com/video/54469442', 'url': 'http://player.vimeo.com/video/54469442',
'file': '54469442.mp4',
'md5': '619b811a4417aa4abe78dc653becf511', 'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page', 'note': 'Videos that embed the url in the player page',
'info_dict': { 'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software', 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software',
'uploader': 'The BLN & Business of Software', 'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware', 'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
}, },
}, },
{ {
'url': 'http://vimeo.com/68375962', 'url': 'http://vimeo.com/68375962',
'file': '68375962.mp4',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7', 'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password', 'note': 'Video protected with password',
'info_dict': { 'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video', 'title': 'youtube-dl password protected test video',
'upload_date': '20130614', 'upload_date': '20130614',
'uploader_id': 'user18948128', 'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz', 'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
}, },
'params': { 'params': {
'videopassword': 'youtube-dl', 'videopassword': 'youtube-dl',
@ -98,6 +133,7 @@ class VimeoIE(SubtitlesInfoExtractor):
'upload_date': '20131015', 'upload_date': '20131015',
'uploader_id': 'staff', 'uploader_id': 'staff',
'uploader': 'Vimeo Staff', 'uploader': 'Vimeo Staff',
'duration': 62,
} }
}, },
] ]
@ -111,38 +147,21 @@ class VimeoIE(SubtitlesInfoExtractor):
else: else:
return super(VimeoIE, cls).suitable(url) return super(VimeoIE, cls).suitable(url)
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
login_url = 'https://vimeo.com/log_in'
webpage = self._download_webpage(login_url, None, False)
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
data = compat_urllib_parse.urlencode({'email': username,
'password': password,
'action': 'login',
'service': 'vimeo',
'token': token,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Cookie', 'xsrft=%s' % token)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _verify_video_password(self, url, video_id, webpage): def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None) password = self._downloader.params.get('videopassword', None)
if password is None: if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option') raise ExtractorError('This video is protected by a password, use the --video-password option')
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token') token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
data = compat_urllib_parse.urlencode({'password': password, data = compat_urllib_parse.urlencode({
'token': token}) 'password': password,
'token': token,
})
# I didn't manage to use the password with https # I didn't manage to use the password with https
if url.startswith('https'): if url.startswith('https'):
pass_url = url.replace('https','http') pass_url = url.replace('https', 'http')
else: else:
pass_url = url pass_url = url
password_request = compat_urllib_request.Request(pass_url+'/password', data) password_request = compat_urllib_request.Request(pass_url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Cookie', 'xsrft=%s' % token) password_request.add_header('Cookie', 'xsrft=%s' % token)
self._download_webpage(password_request, video_id, self._download_webpage(password_request, video_id,
@ -249,8 +268,9 @@ class VimeoIE(SubtitlesInfoExtractor):
# Extract video description # Extract video description
video_description = None video_description = None
try: try:
video_description = get_element_by_attribute("itemprop", "description", webpage) video_description = get_element_by_attribute("class", "description_wrapper", webpage)
if video_description: video_description = clean_html(video_description) if video_description:
video_description = clean_html(video_description)
except AssertionError as err: except AssertionError as err:
# On some pages like (http://player.vimeo.com/video/54469442) the # On some pages like (http://player.vimeo.com/video/54469442) the
# html tags are not closed, python 2.6 cannot handle it # html tags are not closed, python 2.6 cannot handle it
@ -259,6 +279,9 @@ class VimeoIE(SubtitlesInfoExtractor):
else: else:
raise raise
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date # Extract upload date
video_upload_date = None video_upload_date = None
mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage) mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
@ -296,7 +319,7 @@ class VimeoIE(SubtitlesInfoExtractor):
file_info = {} file_info = {}
if video_url is None: if video_url is None:
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
%(video_id, sig, timestamp, quality, codec_name.upper()) % (video_id, sig, timestamp, quality, codec_name.upper())
files[key].append({ files[key].append({
'ext': codec_extension, 'ext': codec_extension,
@ -330,6 +353,7 @@ class VimeoIE(SubtitlesInfoExtractor):
'title': video_title, 'title': video_title,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
'duration': video_duration,
'formats': formats, 'formats': formats,
'webpage_url': url, 'webpage_url': url,
'view_count': view_count, 'view_count': view_count,
@ -355,7 +379,7 @@ class VimeoChannelIE(InfoExtractor):
video_ids = [] video_ids = []
for pagenum in itertools.count(1): for pagenum in itertools.count(1):
webpage = self._download_webpage( webpage = self._download_webpage(
self._page_url(base_url, pagenum) ,list_id, self._page_url(base_url, pagenum), list_id,
'Downloading page %s' % pagenum) 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage)) video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
@ -371,7 +395,7 @@ class VimeoChannelIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id') channel_id = mobj.group('id')
return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id) return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id)
@ -438,3 +462,25 @@ class VimeoReviewIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
player_url = 'https://player.vimeo.com/player/' + video_id player_url = 'https://player.vimeo.com/player/' + video_id
return self.url_result(player_url, 'Vimeo', video_id) return self.url_result(player_url, 'Vimeo', video_id)
class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
_LOGIN_REQUIRED = True
_TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = compat_urllib_request.Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
import re import re
import json import json
import itertools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import unified_strdate from ..utils import unified_strdate
@ -57,4 +58,34 @@ class VineIE(InfoExtractor):
'comment_count': data['comments']['count'], 'comment_count': data['comments']['count'],
'repost_count': data['reposts']['count'], 'repost_count': data['reposts']['count'],
'formats': formats, 'formats': formats,
} }
class VineUserIE(InfoExtractor):
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<user>[^/]+)/?(\?.*)?$'
_VINE_BASE_URL = "https://vine.co/"
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
profile_url = "%sapi/users/profiles/vanity/%s" % (
self._VINE_BASE_URL, user)
profile_data = self._download_json(
profile_url, user, note='Downloading user profile data')
user_id = profile_data['data']['userId']
timeline_data = []
for pagenum in itertools.count(1):
timeline_url = "%sapi/timelines/users/%s?page=%s" % (
self._VINE_BASE_URL, user_id, pagenum)
timeline_page = self._download_json(
timeline_url, user, note='Downloading page %d' % pagenum)
timeline_data.extend(timeline_page['data']['records'])
if timeline_page['data']['nextPage'] is None:
break
entries = [
self.url_result(e['permalinkUrl'], 'Vine') for e in timeline_data]
return self.playlist_result(entries, user)

View File

@ -37,7 +37,7 @@ class VKIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '163339118', 'id': '163339118',
'ext': 'mp4', 'ext': 'mp4',
'uploader': 'Elvira Dzhonik', 'uploader': 'Elya Iskhakova',
'title': 'Dream Theater - Hollow Years Live at Budokan 720*', 'title': 'Dream Theater - Hollow Years Live at Budokan 720*',
'duration': 558, 'duration': 558,
} }
@ -108,7 +108,7 @@ class VKIE(InfoExtractor):
m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page) m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
if m_yt is not None: if m_yt is not None:
self.to_screen(u'Youtube video detected') self.to_screen('Youtube video detected')
return self.url_result(m_yt.group(1), 'Youtube') return self.url_result(m_yt.group(1), 'Youtube')
data_json = self._search_regex(r'var vars = ({.*?});', info_page, 'vars') data_json = self._search_regex(r'var vars = ({.*?});', info_page, 'vars')
data = json.loads(data_json) data = json.loads(data_json)

View File

@ -1,47 +1,69 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
import datetime
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import int_or_none
class VubeIE(InfoExtractor): class VubeIE(InfoExtractor):
IE_NAME = 'vube' IE_NAME = 'vube'
IE_DESC = 'Vube.com' IE_DESC = 'Vube.com'
_VALID_URL = r'http://vube\.com/[^/]+/(?P<id>[\da-zA-Z]{10})' _VALID_URL = r'http://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b'
_TEST = { _TESTS = [
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon', {
'md5': 'db7aba89d4603dadd627e9d1973946fe', 'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'info_dict': { 'md5': 'db7aba89d4603dadd627e9d1973946fe',
'id': 'YL2qNPkqon', 'info_dict': {
'ext': 'mp4', 'id': 'YL2qNPkqon',
'title': 'Chiara Grispo - Price Tag by Jessie J', 'ext': 'mp4',
'description': 'md5:8ea652a1f36818352428cb5134933313', 'title': 'Chiara Grispo - Price Tag by Jessie J',
'thumbnail': 'http://frame.thestaticvube.com/snap/228x128/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f.jpg', 'description': 'md5:8ea652a1f36818352428cb5134933313',
'uploader': 'Chiara.Grispo', 'thumbnail': 'http://frame.thestaticvube.com/snap/228x128/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f.jpg',
'uploader_id': '1u3hX0znhP', 'uploader': 'Chiara.Grispo',
'upload_date': '20140103', 'uploader_id': '1u3hX0znhP',
'duration': 170.56 'timestamp': 1388743358,
'upload_date': '20140103',
'duration': 170.56
}
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'md5': '5d4a52492d76f72712117ce6b0d98d08',
'info_dict': {
'id': 'UeBhTudbfS',
'ext': 'mp4',
'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
'description': 'md5:40bcacb97796339f1690642c21d56f4a',
'thumbnail': 'http://frame.thestaticvube.com/snap/228x128/102265d5a9f-0f17-4f6b-5753-adf08484ee1e.jpg',
'uploader': 'Seraina',
'uploader_id': 'XU9VE2BQ2q',
'timestamp': 1396492438,
'upload_date': '20140403',
'duration': 240.107
}
} }
} ]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
video = self._download_json('http://vube.com/api/v2/video/%s' % video_id, video = self._download_json(
video_id, 'Downloading video JSON') 'http://vube.com/api/v2/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id'] public_id = video['public_id']
formats = [{'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (fmt['media_resolution_id'], public_id), formats = [
'height': int(fmt['height']), {
'abr': int(fmt['audio_bitrate']), 'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (fmt['media_resolution_id'], public_id),
'vbr': int(fmt['video_bitrate']), 'height': int(fmt['height']),
'format_id': fmt['media_resolution_id'] 'abr': int(fmt['audio_bitrate']),
} for fmt in video['mtm'] if fmt['transcoding_status'] == 'processed'] 'vbr': int(fmt['video_bitrate']),
'format_id': fmt['media_resolution_id']
} for fmt in video['mtm'] if fmt['transcoding_status'] == 'processed'
]
self._sort_formats(formats) self._sort_formats(formats)
@ -52,16 +74,16 @@ class VubeIE(InfoExtractor):
thumbnail = 'http:' + thumbnail thumbnail = 'http:' + thumbnail
uploader = video['user_alias'] uploader = video['user_alias']
uploader_id = video['user_url_id'] uploader_id = video['user_url_id']
upload_date = datetime.datetime.fromtimestamp(int(video['upload_time'])).strftime('%Y%m%d') timestamp = int(video['upload_time'])
duration = video['duration'] duration = video['duration']
view_count = video['raw_view_count'] view_count = video.get('raw_view_count')
like_count = video['total_likes'] like_count = video.get('total_likes')
dislike_count= video['total_hates'] dislike_count= video.get('total_hates')
comment = self._download_json('http://vube.com/api/video/%s/comment' % video_id, comment = self._download_json(
video_id, 'Downloading video comment JSON') 'http://vube.com/api/video/%s/comment' % video_id, video_id, 'Downloading video comment JSON')
comment_count = comment['total'] comment_count = int_or_none(comment.get('total'))
return { return {
'id': video_id, 'id': video_id,
@ -71,7 +93,7 @@ class VubeIE(InfoExtractor):
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'uploader': uploader, 'uploader': uploader,
'uploader_id': uploader_id, 'uploader_id': uploader_id,
'upload_date': upload_date, 'timestamp': timestamp,
'duration': duration, 'duration': duration,
'view_count': view_count, 'view_count': view_count,
'like_count': like_count, 'like_count': like_count,

View File

@ -0,0 +1,66 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse_urlparse,
parse_duration,
qualities,
)
class VuClipIE(InfoExtractor):
_VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://m.vuclip.com/w?cid=843902317&fid=63532&z=1007&nvar&frm=index.html&bu=4757321434',
'md5': '92ac9d1ccefec4f0bb474661ab144fcf',
'info_dict': {
'id': '843902317',
'ext': '3gp',
'title': 'Movie Trailer: Noah',
'duration': 139,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
ad_m = re.search(
r'''value="No.*?" onClick="location.href='([^"']+)'"''', webpage)
if ad_m:
urlr = compat_urllib_parse_urlparse(url)
adfree_url = urlr.scheme + '://' + urlr.netloc + ad_m.group(1)
webpage = self._download_webpage(
adfree_url, video_id, note='Download post-ad page')
links_code = self._search_regex(
r'(?s)<div class="social align_c".*?>(.*?)<hr\s*/?>', webpage,
'links')
title = self._html_search_regex(
r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
quality_order = qualities(['Reg', 'Hi'])
formats = []
for url, q in re.findall(
r'<a href="(?P<url>[^"]+)".*?>(?P<q>[^<]+)</a>', links_code):
format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
formats.append({
'format_id': format_id,
'url': url,
'quality': quality_order(q),
})
self._sort_formats(formats)
duration = parse_duration(self._search_regex(
r'\(([0-9:]+)\)</span></h1>', webpage, 'duration', fatal=False))
return {
'id': video_id,
'formats': formats,
'title': title,
'duration': duration,
}

View File

@ -115,6 +115,34 @@ class WDRIE(InfoExtractor):
} }
class WDRMobileIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://mobile-ondemand\.wdr\.de/
.*?/fsk(?P<age_limit>[0-9]+)
/[0-9]+/[0-9]+/
(?P<id>[0-9]+)_(?P<title>[0-9]+)'''
IE_NAME = 'wdr:mobile'
_TEST = {
'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4',
'info_dict': {
'title': '4283021',
'id': '421735',
'age_limit': 0,
},
'_skip': 'Will be depublicized shortly'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
return {
'id': mobj.group('id'),
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
'user_agent': 'mobile',
}
class WDRMausIE(InfoExtractor): class WDRMausIE(InfoExtractor):
_VALID_URL = 'http://(?:www\.)?wdrmaus\.de/(?:[^/]+/){,2}(?P<id>[^/?#]+)(?:/index\.php5|(?<!index)\.php5|/(?:$|[?#]))' _VALID_URL = 'http://(?:www\.)?wdrmaus\.de/(?:[^/]+/){,2}(?P<id>[^/?#]+)(?:/index\.php5|(?<!index)\.php5|/(?:$|[?#]))'
IE_DESC = 'Sendung mit der Maus' IE_DESC = 'Sendung mit der Maus'

View File

@ -14,8 +14,8 @@ from ..utils import (
class YahooIE(InfoExtractor): class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen' IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'https?://screen\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html' _VALID_URL = r'https?://(?:screen|movies)\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html'
_TESTS = [ _TESTS = [
{ {
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html', 'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
@ -37,6 +37,16 @@ class YahooIE(InfoExtractor):
'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?', 'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
}, },
}, },
{
'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
'md5': '410b7104aa9893b765bc22787a22f3d9',
'info_dict': {
'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
'ext': 'mp4',
'title': 'The World Loves Spider-Man',
'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
}
}
] ]
def _real_extract(self, url): def _real_extract(self, url):
@ -44,13 +54,20 @@ class YahooIE(InfoExtractor):
video_id = mobj.group('id') video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
items_json = self._search_regex(r'mediaItems: ({.*?})$', items_json = self._search_regex(
webpage, 'items', flags=re.MULTILINE) r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
items = json.loads(items_json) default=None)
info = items['mediaItems']['query']['results']['mediaObj'][0] if items_json is None:
# The 'meta' field is not always in the video webpage, we request it long_id = self._search_regex(
# from another page r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
long_id = info['id'] webpage, 'content ID')
video_id = long_id
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
long_id = info['id']
return self._get_info(long_id, video_id) return self._get_info(long_id, video_id)
def _get_info(self, long_id, video_id): def _get_info(self, long_id, video_id):
@ -104,7 +121,7 @@ class YahooNewsIE(YahooIE):
IE_NAME = 'yahoo:news' IE_NAME = 'yahoo:news'
_VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html' _VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
_TEST = { _TESTS = [{
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html', 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '67010fdf3a08d290e060a4dd96baa07b', 'md5': '67010fdf3a08d290e060a4dd96baa07b',
'info_dict': { 'info_dict': {
@ -113,10 +130,7 @@ class YahooNewsIE(YahooIE):
'title': 'China Moses Is Crazy About the Blues', 'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0', 'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
}, },
} }]
# Overwrite YahooIE properties we don't want
_TESTS = []
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)

View File

@ -210,23 +210,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50}, '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
# Dash webm # Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'acodec': 'none', 'preference': -40}, '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH webm', 'preference': -40}, '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH webm', 'preference': -40}, '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40}, '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40}, '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH webm', 'preference': -40}, '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH webm', 'preference': -40}, '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH webm', 'preference': -40}, '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
# Dash webm audio # Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 48, 'preference': -50}, '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH webm audio', 'abr': 256, 'preference': -50}, '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# RTMP (unnamed) # RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'}, '_rtmp': {'protocol': 'rtmp'},
@ -242,7 +242,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
u"uploader": u"Philipp Hagemeister", u"uploader": u"Philipp Hagemeister",
u"uploader_id": u"phihag", u"uploader_id": u"phihag",
u"upload_date": u"20121002", u"upload_date": u"20121002",
u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
u"categories": [u'Science & Technology'],
} }
}, },
{ {
@ -252,7 +253,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
u"info_dict": { u"info_dict": {
u"upload_date": u"20120506", u"upload_date": u"20120506",
u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]", u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
u"description": u"md5:5b292926389560516e384ac437c0ec07", u"description": u"md5:fea86fda2d5a5784273df5c7cc994d9f",
u"uploader": u"Icona Pop", u"uploader": u"Icona Pop",
u"uploader_id": u"IconaPop" u"uploader_id": u"IconaPop"
} }
@ -304,7 +305,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
u'id': u'IB3lcPjvWLA', u'id': u'IB3lcPjvWLA',
u'ext': u'm4a', u'ext': u'm4a',
u'title': u'Afrojack - The Spark ft. Spree Wilson', u'title': u'Afrojack - The Spark ft. Spree Wilson',
u'description': u'md5:3199ed45ee8836572865580804d7ac0f', u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
u'uploader': u'AfrojackVEVO', u'uploader': u'AfrojackVEVO',
u'uploader_id': u'AfrojackVEVO', u'uploader_id': u'AfrojackVEVO',
u'upload_date': u'20131011', u'upload_date': u'20131011',
@ -1082,9 +1083,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
break break
if 'token' not in video_info: if 'token' not in video_info:
if 'reason' in video_info: if 'reason' in video_info:
raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0], expected=True) raise ExtractorError(
u'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else: else:
raise ExtractorError(u'"token" parameter not in video info for unknown reason') raise ExtractorError(
u'"token" parameter not in video info for unknown reason',
video_id=video_id)
if 'view_count' in video_info: if 'view_count' in video_info:
view_count = int(video_info['view_count'][0]) view_count = int(video_info['view_count'][0])
@ -1113,7 +1118,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# title # title
if 'title' in video_info: if 'title' in video_info:
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) video_title = video_info['title'][0]
else: else:
self._downloader.report_warning(u'Unable to extract video title') self._downloader.report_warning(u'Unable to extract video title')
video_title = u'_' video_title = u'_'
@ -1132,11 +1137,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# upload date # upload date
upload_date = None upload_date = None
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
if mobj is None:
mobj = re.search(
r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded) on (.*?)</strong>',
video_webpage)
if mobj is not None: if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date) upload_date = unified_strdate(upload_date)
m_cat_container = get_element_by_id("eow-category", video_webpage)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
# description # description
video_description = get_element_by_id("eow-description", video_webpage) video_description = get_element_by_id("eow-description", video_webpage)
if video_description: if video_description:
@ -1343,6 +1361,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'title': video_title, 'title': video_title,
'thumbnail': video_thumbnail, 'thumbnail': video_thumbnail,
'description': video_description, 'description': video_description,
'categories': video_categories,
'subtitles': video_subtitles, 'subtitles': video_subtitles,
'duration': video_duration, 'duration': video_duration,
'age_limit': 18 if age_gate else 0, 'age_limit': 18 if age_gate else 0,
@ -1432,6 +1451,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
page = self._download_webpage(url, playlist_id) page = self._download_webpage(url, playlist_id)
more_widget_html = content_html = page more_widget_html = content_html = page
# Check if the playlist exists or is private
if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
raise ExtractorError(
u'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
# Extract the video ids from the playlist pages # Extract the video ids from the playlist pages
ids = [] ids = []
@ -1749,9 +1775,12 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
feed_entries.extend( feed_entries.extend(
self.url_result(video_id, 'Youtube', video_id=video_id) self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in ids) for video_id in ids)
if info['paging'] is None: mobj = re.search(
r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
feed_html)
if mobj is None:
break break
paging = info['paging'] paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE) return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor): class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):

View File

@ -10,6 +10,7 @@ from .common import AudioConversionError, PostProcessor
from ..utils import ( from ..utils import (
check_executable, check_executable,
compat_subprocess_get_DEVNULL, compat_subprocess_get_DEVNULL,
encodeArgument,
encodeFilename, encodeFilename,
PostProcessingError, PostProcessingError,
prepend_extension, prepend_extension,
@ -42,6 +43,9 @@ class FFmpegPostProcessor(PostProcessor):
def _uses_avconv(self): def _uses_avconv(self):
return self._get_executable() == self._exes['avconv'] return self._get_executable() == self._exes['avconv']
def _encode_opts(self, opts):
return [encodeArgument(o) for o in opts]
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, input_opts=[]): def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, input_opts=[]):
if not self._get_executable(): if not self._get_executable():
raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.') raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
@ -50,7 +54,7 @@ class FFmpegPostProcessor(PostProcessor):
for path in input_paths: for path in input_paths:
files_cmd.extend(['-i', encodeFilename(path, True)]) files_cmd.extend(['-i', encodeFilename(path, True)])
cmd = ([self._get_executable(), '-y'] + cmd = ([self._get_executable(), '-y'] +
input_opts + files_cmd + opts + self._encode_opts(input_opts) + files_cmd + self._encode_opts(opts) +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)]) [encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False): if self._downloader.params.get('verbose', False):

View File

@ -6,6 +6,7 @@ from .common import PostProcessor
from ..utils import ( from ..utils import (
check_executable, check_executable,
hyphenate_date, hyphenate_date,
subprocess_check_output
) )
@ -57,7 +58,7 @@ class XAttrMetadataPP(PostProcessor):
elif user_has_xattr: elif user_has_xattr:
cmd = ['xattr', '-w', key, value, path] cmd = ['xattr', '-w', key, value, path]
subprocess.check_output(cmd) subprocess_check_output(cmd)
else: else:
# On Unix, and can't find pyxattr, setfattr, or xattr. # On Unix, and can't find pyxattr, setfattr, or xattr.

View File

@ -540,6 +540,16 @@ def encodeFilename(s, for_subprocess=False):
encoding = 'utf-8' encoding = 'utf-8'
return s.encode(encoding, 'ignore') return s.encode(encoding, 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
#assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeOption(optval): def decodeOption(optval):
if optval is None: if optval is None:
return optval return optval
@ -594,13 +604,15 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
class ExtractorError(Exception): class ExtractorError(Exception):
"""Error during info extraction.""" """Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None): def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out). """ tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl. If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
""" """
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if not expected: if not expected:
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
super(ExtractorError, self).__init__(msg) super(ExtractorError, self).__init__(msg)
@ -608,6 +620,7 @@ class ExtractorError(Exception):
self.traceback = tb self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause self.cause = cause
self.video_id = video_id
def format_traceback(self): def format_traceback(self):
if self.traceback is None: if self.traceback is None:
@ -927,10 +940,11 @@ def _windows_write_string(s, out):
2: -12, 2: -12,
} }
def ucs2_len(s): try:
return sum((2 if ord(c) > 0xffff else 1) for c in s) fileno = out.fileno()
except AttributeError:
fileno = out.fileno() # If the output stream doesn't have a fileno, it's virtual
return False
if fileno not in WIN_OUTPUT_IDS: if fileno not in WIN_OUTPUT_IDS:
return False return False
@ -963,13 +977,25 @@ def _windows_write_string(s, out):
if not_a_console(h): if not_a_console(h):
return False return False
remaining = ucs2_len(s) def next_nonbmp_pos(s):
while remaining > 0: try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW( ret = WriteConsoleW(
h, s, min(remaining, 1024), ctypes.byref(written), None) h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0: if ret == 0:
raise OSError('Failed to write string') raise OSError('Failed to write string')
remaining -= written.value if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True return True
@ -1240,7 +1266,10 @@ class HEADRequest(compat_urllib_request.Request):
return "HEAD" return "HEAD"
def int_or_none(v, scale=1, default=None): def int_or_none(v, scale=1, default=None, get_attr=None):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
return default if v is None else (int(v) // scale) return default if v is None else (int(v) // scale)
@ -1401,3 +1430,28 @@ US_RATINGS = {
def strip_jsonp(code): def strip_jsonp(code):
return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code) return re.sub(r'(?s)^[a-zA-Z_]+\s*\(\s*(.*)\);\s*?\s*$', r'\1', code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output

View File

@ -1,2 +1,2 @@
__version__ = '2014.04.13' __version__ = '2014.05.19'