Merge branch 'master' into tvpleextractor
Conflicts: youtube_dl/extractor/__init__.py
This commit is contained in:
commit
ae74838a11
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.04.01*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.04.06*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.04.01**
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.04.06**
|
||||||
|
|
||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
@ -35,7 +35,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2016.04.01
|
[debug] youtube-dl version 2016.04.06
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -13,6 +13,7 @@ README.txt
|
|||||||
youtube-dl.1
|
youtube-dl.1
|
||||||
youtube-dl.bash-completion
|
youtube-dl.bash-completion
|
||||||
youtube-dl.fish
|
youtube-dl.fish
|
||||||
|
youtube_dl/extractor/lazy_extractors.py
|
||||||
youtube-dl
|
youtube-dl
|
||||||
youtube-dl.exe
|
youtube-dl.exe
|
||||||
youtube-dl.tar.gz
|
youtube-dl.tar.gz
|
||||||
|
@ -140,14 +140,14 @@ After you have ensured this site is distributing it's content legally, you can f
|
|||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/__init__.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
$ git commit -m '[yourextractor] Add new extractor'
|
$ git commit -m '[yourextractor] Add new extractor'
|
||||||
$ git push origin yourextractor
|
$ git push origin yourextractor
|
||||||
|
12
Makefile
12
Makefile
@ -1,7 +1,7 @@
|
|||||||
all: youtube-dl README.md CONTRIBUTING.md ISSUE_TEMPLATE.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp ISSUE_TEMPLATE.md.tmp youtube-dl youtube-dl.exe
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish youtube_dl/extractor/lazy_extractors.py *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp ISSUE_TEMPLATE.md.tmp youtube-dl youtube-dl.exe
|
||||||
find . -name "*.pyc" -delete
|
find . -name "*.pyc" -delete
|
||||||
find . -name "*.class" -delete
|
find . -name "*.class" -delete
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ README.md: youtube_dl/*.py youtube_dl/*/*.py
|
|||||||
CONTRIBUTING.md: README.md
|
CONTRIBUTING.md: README.md
|
||||||
$(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
|
$(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
|
||||||
|
|
||||||
ISSUE_TEMPLATE.md:
|
.github/ISSUE_TEMPLATE.md: devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl.md youtube_dl/version.py
|
||||||
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl.md .github/ISSUE_TEMPLATE.md
|
$(PYTHON) devscripts/make_issue_template.py .github/ISSUE_TEMPLATE_tmpl.md .github/ISSUE_TEMPLATE.md
|
||||||
|
|
||||||
supportedsites:
|
supportedsites:
|
||||||
@ -88,6 +88,12 @@ youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
|||||||
|
|
||||||
fish-completion: youtube-dl.fish
|
fish-completion: youtube-dl.fish
|
||||||
|
|
||||||
|
lazy-extractors: youtube_dl/extractor/lazy_extractors.py
|
||||||
|
|
||||||
|
_EXTRACTOR_FILES != find youtube_dl/extractor -iname '*.py' -and -not -iname 'lazy_extractors.py'
|
||||||
|
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
|
@ -889,14 +889,14 @@ After you have ensured this site is distributing it's content legally, you can f
|
|||||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/__init__.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
$ git commit -m '[yourextractor] Add new extractor'
|
$ git commit -m '[yourextractor] Add new extractor'
|
||||||
$ git push origin yourextractor
|
$ git push origin yourextractor
|
||||||
|
19
devscripts/lazy_load_template.py
Normal file
19
devscripts/lazy_load_template.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class LazyLoadExtractor(object):
|
||||||
|
_module = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def ie_key(cls):
|
||||||
|
return cls.__name__[:-2]
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kwargs):
|
||||||
|
mod = __import__(cls._module, fromlist=(cls.__name__,))
|
||||||
|
real_cls = getattr(mod, cls.__name__)
|
||||||
|
instance = real_cls.__new__(real_cls)
|
||||||
|
instance.__init__(*args, **kwargs)
|
||||||
|
return instance
|
63
devscripts/make_lazy_extractors.py
Normal file
63
devscripts/make_lazy_extractors.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
|
from inspect import getsource
|
||||||
|
import os
|
||||||
|
from os.path import dirname as dirn
|
||||||
|
import sys
|
||||||
|
|
||||||
|
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
|
||||||
|
|
||||||
|
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
|
||||||
|
lazy_extractors_filename = sys.argv[1]
|
||||||
|
if os.path.exists(lazy_extractors_filename):
|
||||||
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
|
from youtube_dl.extractor import _ALL_CLASSES
|
||||||
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
|
|
||||||
|
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||||
|
module_template = f.read()
|
||||||
|
|
||||||
|
module_contents = [module_template + '\n' + getsource(InfoExtractor.suitable)]
|
||||||
|
|
||||||
|
ie_template = '''
|
||||||
|
class {name}(LazyLoadExtractor):
|
||||||
|
_VALID_URL = {valid_url!r}
|
||||||
|
_module = '{module}'
|
||||||
|
'''
|
||||||
|
|
||||||
|
make_valid_template = '''
|
||||||
|
@classmethod
|
||||||
|
def _make_valid_url(cls):
|
||||||
|
return {valid_url!r}
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def build_lazy_ie(ie, name):
|
||||||
|
valid_url = getattr(ie, '_VALID_URL', None)
|
||||||
|
s = ie_template.format(
|
||||||
|
name=name,
|
||||||
|
valid_url=valid_url,
|
||||||
|
module=ie.__module__)
|
||||||
|
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||||
|
s += '\n' + getsource(ie.suitable)
|
||||||
|
if hasattr(ie, '_make_valid_url'):
|
||||||
|
# search extractors
|
||||||
|
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||||
|
return s
|
||||||
|
|
||||||
|
names = []
|
||||||
|
for ie in list(sorted(_ALL_CLASSES[:-1], key=lambda cls: cls.ie_key())) + _ALL_CLASSES[-1:]:
|
||||||
|
name = ie.ie_key() + 'IE'
|
||||||
|
src = build_lazy_ie(ie, name)
|
||||||
|
module_contents.append(src)
|
||||||
|
names.append(name)
|
||||||
|
|
||||||
|
module_contents.append(
|
||||||
|
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
|
||||||
|
|
||||||
|
module_src = '\n'.join(module_contents) + '\n'
|
||||||
|
|
||||||
|
with open(lazy_extractors_filename, 'wt') as f:
|
||||||
|
f.write(module_src)
|
@ -46,7 +46,7 @@ fi
|
|||||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||||
|
|
||||||
/bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..."
|
/bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..."
|
||||||
make README.md CONTRIBUTING.md ISSUE_TEMPLATE.md supportedsites
|
make README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md supportedsites
|
||||||
git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py
|
git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py
|
||||||
git commit -m "release $version"
|
git commit -m "release $version"
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@
|
|||||||
- **AudioBoom**
|
- **AudioBoom**
|
||||||
- **audiomack**
|
- **audiomack**
|
||||||
- **audiomack:album**
|
- **audiomack:album**
|
||||||
|
- **auroravid**: AuroraVid
|
||||||
- **Azubu**
|
- **Azubu**
|
||||||
- **AzubuLive**
|
- **AzubuLive**
|
||||||
- **BaiduVideo**: 百度视频
|
- **BaiduVideo**: 百度视频
|
||||||
@ -92,12 +93,14 @@
|
|||||||
- **BYUtv**
|
- **BYUtv**
|
||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
|
- **CamWithHer**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
||||||
- **Canvas**
|
- **Canvas**
|
||||||
- **CBC**
|
- **CBC**
|
||||||
- **CBCPlayer**
|
- **CBCPlayer**
|
||||||
- **CBS**
|
- **CBS**
|
||||||
|
- **CBSInteractive**
|
||||||
- **CBSNews**: CBS News
|
- **CBSNews**: CBS News
|
||||||
- **CBSNewsLiveVideo**: CBS News Live Videos
|
- **CBSNewsLiveVideo**: CBS News Live Videos
|
||||||
- **CBSSports**
|
- **CBSSports**
|
||||||
@ -119,7 +122,6 @@
|
|||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**
|
- **cmt.com**
|
||||||
- **CNBC**
|
- **CNBC**
|
||||||
- **CNET**
|
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
- **CNNBlogs**
|
- **CNNBlogs**
|
||||||
@ -419,7 +421,6 @@
|
|||||||
- **Normalboots**
|
- **Normalboots**
|
||||||
- **NosVideo**
|
- **NosVideo**
|
||||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||||
- **novamov**: NovaMov
|
|
||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
|
@ -2,5 +2,5 @@
|
|||||||
universal = True
|
universal = True
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/make_issue_template.py,setup.py,build,.git
|
exclude = youtube_dl/extractor/__init__.py,devscripts/buildserver.py,devscripts/lazy_load_template.py,devscripts/make_issue_template.py,setup.py,build,.git
|
||||||
ignore = E402,E501,E731
|
ignore = E402,E501,E731
|
||||||
|
22
setup.py
22
setup.py
@ -8,11 +8,12 @@ import warnings
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from setuptools import setup
|
from setuptools import setup, Command
|
||||||
setuptools_available = True
|
setuptools_available = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from distutils.core import setup
|
from distutils.core import setup, Command
|
||||||
setuptools_available = False
|
setuptools_available = False
|
||||||
|
from distutils.spawn import spawn
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# This will create an exe that needs Microsoft Visual C++ 2008
|
# This will create an exe that needs Microsoft Visual C++ 2008
|
||||||
@ -70,6 +71,22 @@ else:
|
|||||||
else:
|
else:
|
||||||
params['scripts'] = ['bin/youtube-dl']
|
params['scripts'] = ['bin/youtube-dl']
|
||||||
|
|
||||||
|
class build_lazy_extractors(Command):
|
||||||
|
description = "Build the extractor lazy loading module"
|
||||||
|
user_options = []
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
spawn(
|
||||||
|
[sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'],
|
||||||
|
dry_run=self.dry_run,
|
||||||
|
)
|
||||||
|
|
||||||
# Get the version from youtube_dl/version.py without importing the package
|
# Get the version from youtube_dl/version.py without importing the package
|
||||||
exec(compile(open('youtube_dl/version.py').read(),
|
exec(compile(open('youtube_dl/version.py').read(),
|
||||||
'youtube_dl/version.py', 'exec'))
|
'youtube_dl/version.py', 'exec'))
|
||||||
@ -107,5 +124,6 @@ setup(
|
|||||||
"Programming Language :: Python :: 3.4",
|
"Programming Language :: Python :: 3.4",
|
||||||
],
|
],
|
||||||
|
|
||||||
|
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
||||||
**params
|
**params
|
||||||
)
|
)
|
||||||
|
@ -76,6 +76,10 @@ class TestCompat(unittest.TestCase):
|
|||||||
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
|
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
|
||||||
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
|
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
|
||||||
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
|
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def')
|
||||||
|
self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def')
|
||||||
|
|
||||||
def test_compat_shlex_split(self):
|
def test_compat_shlex_split(self):
|
||||||
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
||||||
|
@ -82,7 +82,7 @@ from .utils import (
|
|||||||
YoutubeDLHandler,
|
YoutubeDLHandler,
|
||||||
)
|
)
|
||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .extractor import get_info_extractor, gen_extractors
|
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
|
||||||
from .downloader import get_suitable_downloader
|
from .downloader import get_suitable_downloader
|
||||||
from .downloader.rtmp import rtmpdump_version
|
from .downloader.rtmp import rtmpdump_version
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
@ -378,6 +378,7 @@ class YoutubeDL(object):
|
|||||||
def add_info_extractor(self, ie):
|
def add_info_extractor(self, ie):
|
||||||
"""Add an InfoExtractor object to the end of the list."""
|
"""Add an InfoExtractor object to the end of the list."""
|
||||||
self._ies.append(ie)
|
self._ies.append(ie)
|
||||||
|
if not isinstance(ie, type):
|
||||||
self._ies_instances[ie.ie_key()] = ie
|
self._ies_instances[ie.ie_key()] = ie
|
||||||
ie.set_downloader(self)
|
ie.set_downloader(self)
|
||||||
|
|
||||||
@ -397,7 +398,7 @@ class YoutubeDL(object):
|
|||||||
"""
|
"""
|
||||||
Add the InfoExtractors returned by gen_extractors to the end of the list
|
Add the InfoExtractors returned by gen_extractors to the end of the list
|
||||||
"""
|
"""
|
||||||
for ie in gen_extractors():
|
for ie in gen_extractor_classes():
|
||||||
self.add_info_extractor(ie)
|
self.add_info_extractor(ie)
|
||||||
|
|
||||||
def add_post_processor(self, pp):
|
def add_post_processor(self, pp):
|
||||||
@ -661,6 +662,7 @@ class YoutubeDL(object):
|
|||||||
if not ie.suitable(url):
|
if not ie.suitable(url):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
ie = self.get_info_extractor(ie.ie_key())
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
self.report_warning('The program functionality for this site has been marked as broken, '
|
self.report_warning('The program functionality for this site has been marked as broken, '
|
||||||
'and will probably not work.')
|
'and will probably not work.')
|
||||||
@ -1240,7 +1242,10 @@ class YoutubeDL(object):
|
|||||||
self.list_thumbnails(info_dict)
|
self.list_thumbnails(info_dict)
|
||||||
return
|
return
|
||||||
|
|
||||||
if thumbnails and 'thumbnail' not in info_dict:
|
thumbnail = info_dict.get('thumbnail')
|
||||||
|
if thumbnail:
|
||||||
|
info_dict['thumbnail'] = sanitize_url(thumbnail)
|
||||||
|
elif thumbnails:
|
||||||
info_dict['thumbnail'] = thumbnails[-1]['url']
|
info_dict['thumbnail'] = thumbnails[-1]['url']
|
||||||
|
|
||||||
if 'display_id' not in info_dict and 'id' in info_dict:
|
if 'display_id' not in info_dict and 'id' in info_dict:
|
||||||
@ -1954,6 +1959,8 @@ class YoutubeDL(object):
|
|||||||
write_string(encoding_str, encoding=None)
|
write_string(encoding_str, encoding=None)
|
||||||
|
|
||||||
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
||||||
|
if _LAZY_LOADER:
|
||||||
|
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
|
||||||
try:
|
try:
|
||||||
sp = subprocess.Popen(
|
sp = subprocess.Popen(
|
||||||
['git', 'rev-parse', '--short', 'HEAD'],
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
|
@ -181,7 +181,8 @@ except ImportError: # Python 2
|
|||||||
if isinstance(e, dict):
|
if isinstance(e, dict):
|
||||||
e = encode_dict(e)
|
e = encode_dict(e)
|
||||||
elif isinstance(e, (list, tuple,)):
|
elif isinstance(e, (list, tuple,)):
|
||||||
e = encode_list(e)
|
list_e = encode_list(e)
|
||||||
|
e = tuple(list_e) if isinstance(e, tuple) else list_e
|
||||||
elif isinstance(e, compat_str):
|
elif isinstance(e, compat_str):
|
||||||
e = e.encode(encoding)
|
e = e.encode(encoding)
|
||||||
return e
|
return e
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -2,10 +2,14 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import functools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import int_or_none
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
OnDemandPagedList,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ACastIE(InfoExtractor):
|
class ACastIE(InfoExtractor):
|
||||||
@ -26,13 +30,8 @@ class ACastIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
cast_data = self._download_json(
|
||||||
embed_page = self._download_webpage(
|
'https://embed.acast.com/api/acasts/%s/%s' % (channel, display_id), display_id)
|
||||||
re.sub('(?:www\.)?acast\.com', 'embedcdn.acast.com', url), display_id)
|
|
||||||
cast_data = self._parse_json(self._search_regex(
|
|
||||||
r'window\[\'acast/queries\'\]\s*=\s*([^;]+);', embed_page, 'acast data'),
|
|
||||||
display_id)['GetAcast/%s/%s' % (channel, display_id)]
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': compat_str(cast_data['id']),
|
'id': compat_str(cast_data['id']),
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
@ -58,15 +57,26 @@ class ACastChannelIE(InfoExtractor):
|
|||||||
'playlist_mincount': 20,
|
'playlist_mincount': 20,
|
||||||
}
|
}
|
||||||
_API_BASE_URL = 'https://www.acast.com/api/'
|
_API_BASE_URL = 'https://www.acast.com/api/'
|
||||||
|
_PAGE_SIZE = 10
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _fetch_page(self, channel_slug, page):
|
||||||
display_id = self._match_id(url)
|
casts = self._download_json(
|
||||||
channel_data = self._download_json(self._API_BASE_URL + 'channels/%s' % display_id, display_id)
|
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
|
||||||
casts = self._download_json(self._API_BASE_URL + 'channels/%s/acasts' % display_id, display_id)
|
channel_slug, note='Download page %d of channel data' % page)
|
||||||
entries = [self.url_result('https://www.acast.com/%s/%s' % (display_id, cast['url']), 'ACast') for cast in casts]
|
for cast in casts:
|
||||||
|
yield self.url_result(
|
||||||
|
'https://www.acast.com/%s/%s' % (channel_slug, cast['url']),
|
||||||
|
'ACast', cast['id'])
|
||||||
|
|
||||||
return self.playlist_result(entries, compat_str(channel_data['id']), channel_data['name'], channel_data.get('description'))
|
def _real_extract(self, url):
|
||||||
|
channel_slug = self._match_id(url)
|
||||||
|
channel_data = self._download_json(
|
||||||
|
self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
|
||||||
|
entries = OnDemandPagedList(functools.partial(
|
||||||
|
self._fetch_page, channel_slug), self._PAGE_SIZE)
|
||||||
|
return self.playlist_result(entries, compat_str(
|
||||||
|
channel_data['id']), channel_data['name'], channel_data.get('description'))
|
||||||
|
@ -33,8 +33,33 @@ class BeegIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
cpl_url = self._search_regex(
|
||||||
|
r'<script[^>]+src=(["\'])(?P<url>(?:https?:)?//static\.beeg\.com/cpl/\d+\.js.*?)\1',
|
||||||
|
webpage, 'cpl', default=None, group='url')
|
||||||
|
|
||||||
|
beeg_version, beeg_salt = [None] * 2
|
||||||
|
|
||||||
|
if cpl_url:
|
||||||
|
cpl = self._download_webpage(
|
||||||
|
self._proto_relative_url(cpl_url), video_id,
|
||||||
|
'Downloading cpl JS', fatal=False)
|
||||||
|
if cpl:
|
||||||
|
beeg_version = self._search_regex(
|
||||||
|
r'beeg_version\s*=\s*(\d+)', cpl,
|
||||||
|
'beeg version', default=None) or self._search_regex(
|
||||||
|
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
|
||||||
|
beeg_salt = self._search_regex(
|
||||||
|
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg beeg_salt',
|
||||||
|
default=None, group='beeg_salt')
|
||||||
|
|
||||||
|
beeg_version = beeg_version or '1750'
|
||||||
|
beeg_salt = beeg_salt or 'MIDtGaw96f0N1kMMAM1DE46EC9pmFr'
|
||||||
|
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'https://api.beeg.com/api/v6/1738/video/%s' % video_id, video_id)
|
'http://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
|
||||||
|
video_id)
|
||||||
|
|
||||||
def split(o, e):
|
def split(o, e):
|
||||||
def cut(s, x):
|
def cut(s, x):
|
||||||
@ -51,7 +76,7 @@ class BeegIE(InfoExtractor):
|
|||||||
|
|
||||||
def decrypt_key(key):
|
def decrypt_key(key):
|
||||||
# Reverse engineered from http://static.beeg.com/cpl/1738.js
|
# Reverse engineered from http://static.beeg.com/cpl/1738.js
|
||||||
a = 'GUuyodcfS8FW8gQp4OKLMsZBcX0T7B'
|
a = beeg_salt
|
||||||
e = compat_urllib_parse_unquote(key)
|
e = compat_urllib_parse_unquote(key)
|
||||||
o = ''.join([
|
o = ''.join([
|
||||||
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
|
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
|
||||||
@ -101,5 +126,5 @@ class BeegIE(InfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'tags': tags,
|
'tags': tags,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'age_limit': 18,
|
'age_limit': self._rta_search(webpage),
|
||||||
}
|
}
|
||||||
|
@ -48,7 +48,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
|||||||
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
|
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||||
'timestamp': 1368213670,
|
'timestamp': 1368213670,
|
||||||
'upload_date': '20130510',
|
'upload_date': '20130510',
|
||||||
'uploader_id': 1589608506001,
|
'uploader_id': '1589608506001',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -62,7 +62,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
|||||||
'uploader': 'Oracle',
|
'uploader': 'Oracle',
|
||||||
'timestamp': 1344975024,
|
'timestamp': 1344975024,
|
||||||
'upload_date': '20120814',
|
'upload_date': '20120814',
|
||||||
'uploader_id': 1460825906,
|
'uploader_id': '1460825906',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -76,7 +76,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
|||||||
'uploader': 'Mashable',
|
'uploader': 'Mashable',
|
||||||
'timestamp': 1382041798,
|
'timestamp': 1382041798,
|
||||||
'upload_date': '20131017',
|
'upload_date': '20131017',
|
||||||
'uploader_id': 1130468786001,
|
'uploader_id': '1130468786001',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -104,7 +104,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
|||||||
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
|
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
|
||||||
'timestamp': 1409122195,
|
'timestamp': 1409122195,
|
||||||
'upload_date': '20140827',
|
'upload_date': '20140827',
|
||||||
'uploader_id': 710858724001,
|
'uploader_id': '710858724001',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -307,13 +307,14 @@ class BrightcoveLegacyIE(InfoExtractor):
|
|||||||
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
|
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
|
||||||
|
|
||||||
def _extract_video_info(self, video_info):
|
def _extract_video_info(self, video_info):
|
||||||
|
publisher_id = video_info.get('publisherId')
|
||||||
info = {
|
info = {
|
||||||
'id': compat_str(video_info['id']),
|
'id': compat_str(video_info['id']),
|
||||||
'title': video_info['displayName'].strip(),
|
'title': video_info['displayName'].strip(),
|
||||||
'description': video_info.get('shortDescription'),
|
'description': video_info.get('shortDescription'),
|
||||||
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
||||||
'uploader': video_info.get('publisherName'),
|
'uploader': video_info.get('publisherName'),
|
||||||
'uploader_id': video_info.get('publisherId'),
|
'uploader_id': compat_str(publisher_id) if publisher_id else None,
|
||||||
'duration': float_or_none(video_info.get('length'), 1000),
|
'duration': float_or_none(video_info.get('length'), 1000),
|
||||||
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
|
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
|
||||||
}
|
}
|
||||||
|
@ -232,6 +232,24 @@ class InfoExtractor(object):
|
|||||||
episode_number: Number of the video episode within a season, as an integer.
|
episode_number: Number of the video episode within a season, as an integer.
|
||||||
episode_id: Id of the video episode, as a unicode string.
|
episode_id: Id of the video episode, as a unicode string.
|
||||||
|
|
||||||
|
The following fields should only be used when the media is a track or a part of
|
||||||
|
a music album:
|
||||||
|
|
||||||
|
track: Title of the track.
|
||||||
|
track_number: Number of the track within an album or a disc, as an integer.
|
||||||
|
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
|
||||||
|
as a unicode string.
|
||||||
|
artist: Artist(s) of the track.
|
||||||
|
genre: Genre(s) of the track.
|
||||||
|
album: Title of the album the track belongs to.
|
||||||
|
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
|
||||||
|
album_artist: List of all artists appeared on the album (e.g.
|
||||||
|
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
|
||||||
|
and compilations).
|
||||||
|
disc_number: Number of the disc or other physical medium the track belongs to,
|
||||||
|
as an integer.
|
||||||
|
release_year: Year (YYYY) when the album was released.
|
||||||
|
|
||||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||||
|
|
||||||
Unless mentioned otherwise, None is equivalent to absence of information.
|
Unless mentioned otherwise, None is equivalent to absence of information.
|
||||||
@ -825,7 +843,7 @@ class InfoExtractor(object):
|
|||||||
for input in re.findall(r'(?i)<input([^>]+)>', html):
|
for input in re.findall(r'(?i)<input([^>]+)>', html):
|
||||||
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
|
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
|
||||||
continue
|
continue
|
||||||
name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
|
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
|
||||||
if not name:
|
if not name:
|
||||||
continue
|
continue
|
||||||
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
|
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
|
||||||
@ -1516,7 +1534,7 @@ class InfoExtractor(object):
|
|||||||
media_template = representation_ms_info['media_template']
|
media_template = representation_ms_info['media_template']
|
||||||
media_template = media_template.replace('$RepresentationID$', representation_id)
|
media_template = media_template.replace('$RepresentationID$', representation_id)
|
||||||
media_template = re.sub(r'\$(Number|Bandwidth)\$', r'%(\1)d', media_template)
|
media_template = re.sub(r'\$(Number|Bandwidth)\$', r'%(\1)d', media_template)
|
||||||
media_template = re.sub(r'\$(Number|Bandwidth)%(\d+)\$', r'%(\1)\2d', media_template)
|
media_template = re.sub(r'\$(Number|Bandwidth)%([^$]+)\$', r'%(\1)\2', media_template)
|
||||||
media_template.replace('$$', '$')
|
media_template.replace('$$', '$')
|
||||||
representation_ms_info['segment_urls'] = [
|
representation_ms_info['segment_urls'] = [
|
||||||
media_template % {
|
media_template % {
|
||||||
|
@ -41,7 +41,9 @@ class DeezerPlaylistIE(InfoExtractor):
|
|||||||
'Deezer said: %s' % geoblocking_msg, expected=True)
|
'Deezer said: %s' % geoblocking_msg, expected=True)
|
||||||
|
|
||||||
data_json = self._search_regex(
|
data_json = self._search_regex(
|
||||||
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
|
(r'__DZR_APP_STATE__\s*=\s*({.+?})\s*</script>',
|
||||||
|
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n'),
|
||||||
|
webpage, 'data JSON')
|
||||||
data = json.loads(data_json)
|
data = json.loads(data_json)
|
||||||
|
|
||||||
playlist_title = data.get('DATA', {}).get('TITLE')
|
playlist_title = data.get('DATA', {}).get('TITLE')
|
||||||
|
@ -17,37 +17,53 @@ class DemocracynowIE(InfoExtractor):
|
|||||||
IE_NAME = 'democracynow'
|
IE_NAME = 'democracynow'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.democracynow.org/shows/2015/7/3',
|
'url': 'http://www.democracynow.org/shows/2015/7/3',
|
||||||
'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
|
'md5': '3757c182d3d84da68f5c8f506c18c196',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2015-0703-001',
|
'id': '2015-0703-001',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'July 03, 2015 - Democracy Now!',
|
'title': 'Daily Show',
|
||||||
'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
|
'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
|
||||||
'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2015-0703-001',
|
'id': '2015-0703-001',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
|
'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
|
||||||
'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
|
'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
|
||||||
},
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
description = self._og_search_description(webpage)
|
|
||||||
|
|
||||||
json_data = self._parse_json(self._search_regex(
|
json_data = self._parse_json(self._search_regex(
|
||||||
r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
|
r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
|
||||||
display_id)
|
display_id)
|
||||||
video_id = None
|
|
||||||
|
title = json_data['title']
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
default_lang = 'en'
|
video_id = None
|
||||||
|
|
||||||
|
for key in ('file', 'audio', 'video', 'high_res_video'):
|
||||||
|
media_url = json_data.get(key, '')
|
||||||
|
if not media_url:
|
||||||
|
continue
|
||||||
|
media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
|
||||||
|
video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
|
||||||
|
formats.append({
|
||||||
|
'url': media_url,
|
||||||
|
'vcodec': 'none' if key == 'audio' else None,
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
default_lang = 'en'
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
|
|
||||||
def add_subtitle_item(lang, info_dict):
|
def add_subtitle_item(lang, info_dict):
|
||||||
@ -67,22 +83,13 @@ class DemocracynowIE(InfoExtractor):
|
|||||||
'url': compat_urlparse.urljoin(url, subtitle_item['url']),
|
'url': compat_urlparse.urljoin(url, subtitle_item['url']),
|
||||||
})
|
})
|
||||||
|
|
||||||
for key in ('file', 'audio', 'video'):
|
description = self._og_search_description(webpage, default=None)
|
||||||
media_url = json_data.get(key, '')
|
|
||||||
if not media_url:
|
|
||||||
continue
|
|
||||||
media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
|
|
||||||
video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
|
|
||||||
formats.append({
|
|
||||||
'url': media_url,
|
|
||||||
})
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id or display_id,
|
'id': video_id or display_id,
|
||||||
'title': json_data['title'],
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
|
'thumbnail': json_data.get('image'),
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
991
youtube_dl/extractor/extractors.py
Normal file
991
youtube_dl/extractor/extractors.py
Normal file
@ -0,0 +1,991 @@
|
|||||||
|
# flake8: noqa
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .abc import ABCIE
|
||||||
|
from .abc7news import Abc7NewsIE
|
||||||
|
from .academicearth import AcademicEarthCourseIE
|
||||||
|
from .acast import (
|
||||||
|
ACastIE,
|
||||||
|
ACastChannelIE,
|
||||||
|
)
|
||||||
|
from .addanime import AddAnimeIE
|
||||||
|
from .adobetv import (
|
||||||
|
AdobeTVIE,
|
||||||
|
AdobeTVShowIE,
|
||||||
|
AdobeTVChannelIE,
|
||||||
|
AdobeTVVideoIE,
|
||||||
|
)
|
||||||
|
from .adultswim import AdultSwimIE
|
||||||
|
from .aenetworks import AENetworksIE
|
||||||
|
from .aftonbladet import AftonbladetIE
|
||||||
|
from .airmozilla import AirMozillaIE
|
||||||
|
from .aljazeera import AlJazeeraIE
|
||||||
|
from .alphaporno import AlphaPornoIE
|
||||||
|
from .animeondemand import AnimeOnDemandIE
|
||||||
|
from .anitube import AnitubeIE
|
||||||
|
from .anysex import AnySexIE
|
||||||
|
from .aol import (
|
||||||
|
AolIE,
|
||||||
|
AolFeaturesIE,
|
||||||
|
)
|
||||||
|
from .allocine import AllocineIE
|
||||||
|
from .aparat import AparatIE
|
||||||
|
from .appleconnect import AppleConnectIE
|
||||||
|
from .appletrailers import (
|
||||||
|
AppleTrailersIE,
|
||||||
|
AppleTrailersSectionIE,
|
||||||
|
)
|
||||||
|
from .archiveorg import ArchiveOrgIE
|
||||||
|
from .ard import (
|
||||||
|
ARDIE,
|
||||||
|
ARDMediathekIE,
|
||||||
|
SportschauIE,
|
||||||
|
)
|
||||||
|
from .arte import (
|
||||||
|
ArteTvIE,
|
||||||
|
ArteTVPlus7IE,
|
||||||
|
ArteTVCreativeIE,
|
||||||
|
ArteTVConcertIE,
|
||||||
|
ArteTVFutureIE,
|
||||||
|
ArteTVCinemaIE,
|
||||||
|
ArteTVDDCIE,
|
||||||
|
ArteTVMagazineIE,
|
||||||
|
ArteTVEmbedIE,
|
||||||
|
)
|
||||||
|
from .atresplayer import AtresPlayerIE
|
||||||
|
from .atttechchannel import ATTTechChannelIE
|
||||||
|
from .audimedia import AudiMediaIE
|
||||||
|
from .audioboom import AudioBoomIE
|
||||||
|
from .audiomack import AudiomackIE, AudiomackAlbumIE
|
||||||
|
from .azubu import AzubuIE, AzubuLiveIE
|
||||||
|
from .baidu import BaiduVideoIE
|
||||||
|
from .bambuser import BambuserIE, BambuserChannelIE
|
||||||
|
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||||
|
from .bbc import (
|
||||||
|
BBCCoUkIE,
|
||||||
|
BBCCoUkArticleIE,
|
||||||
|
BBCIE,
|
||||||
|
)
|
||||||
|
from .beeg import BeegIE
|
||||||
|
from .behindkink import BehindKinkIE
|
||||||
|
from .beatportpro import BeatportProIE
|
||||||
|
from .bet import BetIE
|
||||||
|
from .bigflix import BigflixIE
|
||||||
|
from .bild import BildIE
|
||||||
|
from .bilibili import BiliBiliIE
|
||||||
|
from .biobiochiletv import BioBioChileTVIE
|
||||||
|
from .bleacherreport import (
|
||||||
|
BleacherReportIE,
|
||||||
|
BleacherReportCMSIE,
|
||||||
|
)
|
||||||
|
from .blinkx import BlinkxIE
|
||||||
|
from .bloomberg import BloombergIE
|
||||||
|
from .bokecc import BokeCCIE
|
||||||
|
from .bpb import BpbIE
|
||||||
|
from .br import BRIE
|
||||||
|
from .bravotv import BravoTVIE
|
||||||
|
from .breakcom import BreakIE
|
||||||
|
from .brightcove import (
|
||||||
|
BrightcoveLegacyIE,
|
||||||
|
BrightcoveNewIE,
|
||||||
|
)
|
||||||
|
from .buzzfeed import BuzzFeedIE
|
||||||
|
from .byutv import BYUtvIE
|
||||||
|
from .c56 import C56IE
|
||||||
|
from .camdemy import (
|
||||||
|
CamdemyIE,
|
||||||
|
CamdemyFolderIE
|
||||||
|
)
|
||||||
|
from .camwithher import CamWithHerIE
|
||||||
|
from .canalplus import CanalplusIE
|
||||||
|
from .canalc2 import Canalc2IE
|
||||||
|
from .canvas import CanvasIE
|
||||||
|
from .cbc import (
|
||||||
|
CBCIE,
|
||||||
|
CBCPlayerIE,
|
||||||
|
)
|
||||||
|
from .cbs import CBSIE
|
||||||
|
from .cbsinteractive import CBSInteractiveIE
|
||||||
|
from .cbsnews import (
|
||||||
|
CBSNewsIE,
|
||||||
|
CBSNewsLiveVideoIE,
|
||||||
|
)
|
||||||
|
from .cbssports import CBSSportsIE
|
||||||
|
from .ccc import CCCIE
|
||||||
|
from .cda import CDAIE
|
||||||
|
from .ceskatelevize import CeskaTelevizeIE
|
||||||
|
from .channel9 import Channel9IE
|
||||||
|
from .chaturbate import ChaturbateIE
|
||||||
|
from .chilloutzone import ChilloutzoneIE
|
||||||
|
from .chirbit import (
|
||||||
|
ChirbitIE,
|
||||||
|
ChirbitProfileIE,
|
||||||
|
)
|
||||||
|
from .cinchcast import CinchcastIE
|
||||||
|
from .cinemassacre import CinemassacreIE
|
||||||
|
from .clipfish import ClipfishIE
|
||||||
|
from .cliphunter import CliphunterIE
|
||||||
|
from .clipsyndicate import ClipsyndicateIE
|
||||||
|
from .cloudy import CloudyIE
|
||||||
|
from .clubic import ClubicIE
|
||||||
|
from .clyp import ClypIE
|
||||||
|
from .cmt import CMTIE
|
||||||
|
from .cnbc import CNBCIE
|
||||||
|
from .cnn import (
|
||||||
|
CNNIE,
|
||||||
|
CNNBlogsIE,
|
||||||
|
CNNArticleIE,
|
||||||
|
)
|
||||||
|
from .collegehumor import CollegeHumorIE
|
||||||
|
from .collegerama import CollegeRamaIE
|
||||||
|
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||||
|
from .comcarcoff import ComCarCoffIE
|
||||||
|
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||||
|
from .commonprotocols import RtmpIE
|
||||||
|
from .condenast import CondeNastIE
|
||||||
|
from .cracked import CrackedIE
|
||||||
|
from .crackle import CrackleIE
|
||||||
|
from .criterion import CriterionIE
|
||||||
|
from .crooksandliars import CrooksAndLiarsIE
|
||||||
|
from .crunchyroll import (
|
||||||
|
CrunchyrollIE,
|
||||||
|
CrunchyrollShowPlaylistIE
|
||||||
|
)
|
||||||
|
from .cspan import CSpanIE
|
||||||
|
from .ctsnews import CtsNewsIE
|
||||||
|
from .cultureunplugged import CultureUnpluggedIE
|
||||||
|
from .cwtv import CWTVIE
|
||||||
|
from .dailymotion import (
|
||||||
|
DailymotionIE,
|
||||||
|
DailymotionPlaylistIE,
|
||||||
|
DailymotionUserIE,
|
||||||
|
DailymotionCloudIE,
|
||||||
|
)
|
||||||
|
from .daum import (
|
||||||
|
DaumIE,
|
||||||
|
DaumClipIE,
|
||||||
|
DaumPlaylistIE,
|
||||||
|
DaumUserIE,
|
||||||
|
)
|
||||||
|
from .dbtv import DBTVIE
|
||||||
|
from .dcn import (
|
||||||
|
DCNIE,
|
||||||
|
DCNVideoIE,
|
||||||
|
DCNLiveIE,
|
||||||
|
DCNSeasonIE,
|
||||||
|
)
|
||||||
|
from .dctp import DctpTvIE
|
||||||
|
from .deezer import DeezerPlaylistIE
|
||||||
|
from .democracynow import DemocracynowIE
|
||||||
|
from .dfb import DFBIE
|
||||||
|
from .dhm import DHMIE
|
||||||
|
from .dotsub import DotsubIE
|
||||||
|
from .douyutv import DouyuTVIE
|
||||||
|
from .dplay import DPlayIE
|
||||||
|
from .dramafever import (
|
||||||
|
DramaFeverIE,
|
||||||
|
DramaFeverSeriesIE,
|
||||||
|
)
|
||||||
|
from .dreisat import DreiSatIE
|
||||||
|
from .drbonanza import DRBonanzaIE
|
||||||
|
from .drtuber import DrTuberIE
|
||||||
|
from .drtv import DRTVIE
|
||||||
|
from .dvtv import DVTVIE
|
||||||
|
from .dump import DumpIE
|
||||||
|
from .dumpert import DumpertIE
|
||||||
|
from .defense import DefenseGouvFrIE
|
||||||
|
from .discovery import DiscoveryIE
|
||||||
|
from .dropbox import DropboxIE
|
||||||
|
from .dw import (
|
||||||
|
DWIE,
|
||||||
|
DWArticleIE,
|
||||||
|
)
|
||||||
|
from .eagleplatform import EaglePlatformIE
|
||||||
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
|
from .echomsk import EchoMskIE
|
||||||
|
from .ehow import EHowIE
|
||||||
|
from .eighttracks import EightTracksIE
|
||||||
|
from .einthusan import EinthusanIE
|
||||||
|
from .eitb import EitbIE
|
||||||
|
from .ellentv import (
|
||||||
|
EllenTVIE,
|
||||||
|
EllenTVClipsIE,
|
||||||
|
)
|
||||||
|
from .elpais import ElPaisIE
|
||||||
|
from .embedly import EmbedlyIE
|
||||||
|
from .engadget import EngadgetIE
|
||||||
|
from .eporner import EpornerIE
|
||||||
|
from .eroprofile import EroProfileIE
|
||||||
|
from .escapist import EscapistIE
|
||||||
|
from .espn import ESPNIE
|
||||||
|
from .esri import EsriVideoIE
|
||||||
|
from .europa import EuropaIE
|
||||||
|
from .everyonesmixtape import EveryonesMixtapeIE
|
||||||
|
from .exfm import ExfmIE
|
||||||
|
from .expotv import ExpoTVIE
|
||||||
|
from .extremetube import ExtremeTubeIE
|
||||||
|
from .facebook import FacebookIE
|
||||||
|
from .faz import FazIE
|
||||||
|
from .fc2 import FC2IE
|
||||||
|
from .fczenit import FczenitIE
|
||||||
|
from .firstpost import FirstpostIE
|
||||||
|
from .firsttv import FirstTVIE
|
||||||
|
from .fivemin import FiveMinIE
|
||||||
|
from .fivetv import FiveTVIE
|
||||||
|
from .fktv import FKTVIE
|
||||||
|
from .flickr import FlickrIE
|
||||||
|
from .folketinget import FolketingetIE
|
||||||
|
from .footyroom import FootyRoomIE
|
||||||
|
from .fourtube import FourTubeIE
|
||||||
|
from .fox import FOXIE
|
||||||
|
from .foxgay import FoxgayIE
|
||||||
|
from .foxnews import FoxNewsIE
|
||||||
|
from .foxsports import FoxSportsIE
|
||||||
|
from .franceculture import (
|
||||||
|
FranceCultureIE,
|
||||||
|
FranceCultureEmissionIE,
|
||||||
|
)
|
||||||
|
from .franceinter import FranceInterIE
|
||||||
|
from .francetv import (
|
||||||
|
PluzzIE,
|
||||||
|
FranceTvInfoIE,
|
||||||
|
FranceTVIE,
|
||||||
|
GenerationQuoiIE,
|
||||||
|
CultureboxIE,
|
||||||
|
)
|
||||||
|
from .freesound import FreesoundIE
|
||||||
|
from .freespeech import FreespeechIE
|
||||||
|
from .freevideo import FreeVideoIE
|
||||||
|
from .funimation import FunimationIE
|
||||||
|
from .funnyordie import FunnyOrDieIE
|
||||||
|
from .gameinformer import GameInformerIE
|
||||||
|
from .gamekings import GamekingsIE
|
||||||
|
from .gameone import (
|
||||||
|
GameOneIE,
|
||||||
|
GameOnePlaylistIE,
|
||||||
|
)
|
||||||
|
from .gamersyde import GamersydeIE
|
||||||
|
from .gamespot import GameSpotIE
|
||||||
|
from .gamestar import GameStarIE
|
||||||
|
from .gametrailers import GametrailersIE
|
||||||
|
from .gazeta import GazetaIE
|
||||||
|
from .gdcvault import GDCVaultIE
|
||||||
|
from .generic import GenericIE
|
||||||
|
from .gfycat import GfycatIE
|
||||||
|
from .giantbomb import GiantBombIE
|
||||||
|
from .giga import GigaIE
|
||||||
|
from .glide import GlideIE
|
||||||
|
from .globo import (
|
||||||
|
GloboIE,
|
||||||
|
GloboArticleIE,
|
||||||
|
)
|
||||||
|
from .godtube import GodTubeIE
|
||||||
|
from .goldenmoustache import GoldenMoustacheIE
|
||||||
|
from .golem import GolemIE
|
||||||
|
from .googledrive import GoogleDriveIE
|
||||||
|
from .googleplus import GooglePlusIE
|
||||||
|
from .googlesearch import GoogleSearchIE
|
||||||
|
from .goshgay import GoshgayIE
|
||||||
|
from .gputechconf import GPUTechConfIE
|
||||||
|
from .groupon import GrouponIE
|
||||||
|
from .hark import HarkIE
|
||||||
|
from .hbo import HBOIE
|
||||||
|
from .hearthisat import HearThisAtIE
|
||||||
|
from .heise import HeiseIE
|
||||||
|
from .hellporno import HellPornoIE
|
||||||
|
from .helsinki import HelsinkiIE
|
||||||
|
from .hentaistigma import HentaiStigmaIE
|
||||||
|
from .historicfilms import HistoricFilmsIE
|
||||||
|
from .hitbox import HitboxIE, HitboxLiveIE
|
||||||
|
from .hornbunny import HornBunnyIE
|
||||||
|
from .hotnewhiphop import HotNewHipHopIE
|
||||||
|
from .hotstar import HotStarIE
|
||||||
|
from .howcast import HowcastIE
|
||||||
|
from .howstuffworks import HowStuffWorksIE
|
||||||
|
from .huffpost import HuffPostIE
|
||||||
|
from .hypem import HypemIE
|
||||||
|
from .iconosquare import IconosquareIE
|
||||||
|
from .ign import (
|
||||||
|
IGNIE,
|
||||||
|
OneUPIE,
|
||||||
|
PCMagIE,
|
||||||
|
)
|
||||||
|
from .imdb import (
|
||||||
|
ImdbIE,
|
||||||
|
ImdbListIE
|
||||||
|
)
|
||||||
|
from .imgur import (
|
||||||
|
ImgurIE,
|
||||||
|
ImgurAlbumIE,
|
||||||
|
)
|
||||||
|
from .ina import InaIE
|
||||||
|
from .indavideo import (
|
||||||
|
IndavideoIE,
|
||||||
|
IndavideoEmbedIE,
|
||||||
|
)
|
||||||
|
from .infoq import InfoQIE
|
||||||
|
from .instagram import InstagramIE, InstagramUserIE
|
||||||
|
from .internetvideoarchive import InternetVideoArchiveIE
|
||||||
|
from .iprima import IPrimaIE
|
||||||
|
from .iqiyi import IqiyiIE
|
||||||
|
from .ir90tv import Ir90TvIE
|
||||||
|
from .ivi import (
|
||||||
|
IviIE,
|
||||||
|
IviCompilationIE
|
||||||
|
)
|
||||||
|
from .ivideon import IvideonIE
|
||||||
|
from .izlesene import IzleseneIE
|
||||||
|
from .jadorecettepub import JadoreCettePubIE
|
||||||
|
from .jeuxvideo import JeuxVideoIE
|
||||||
|
from .jove import JoveIE
|
||||||
|
from .jwplatform import JWPlatformIE
|
||||||
|
from .jpopsukitv import JpopsukiIE
|
||||||
|
from .kaltura import KalturaIE
|
||||||
|
from .kanalplay import KanalPlayIE
|
||||||
|
from .kankan import KankanIE
|
||||||
|
from .karaoketv import KaraoketvIE
|
||||||
|
from .karrierevideos import KarriereVideosIE
|
||||||
|
from .keezmovies import KeezMoviesIE
|
||||||
|
from .khanacademy import KhanAcademyIE
|
||||||
|
from .kickstarter import KickStarterIE
|
||||||
|
from .keek import KeekIE
|
||||||
|
from .konserthusetplay import KonserthusetPlayIE
|
||||||
|
from .kontrtube import KontrTubeIE
|
||||||
|
from .krasview import KrasViewIE
|
||||||
|
from .ku6 import Ku6IE
|
||||||
|
from .kusi import KUSIIE
|
||||||
|
from .kuwo import (
|
||||||
|
KuwoIE,
|
||||||
|
KuwoAlbumIE,
|
||||||
|
KuwoChartIE,
|
||||||
|
KuwoSingerIE,
|
||||||
|
KuwoCategoryIE,
|
||||||
|
KuwoMvIE,
|
||||||
|
)
|
||||||
|
from .la7 import LA7IE
|
||||||
|
from .laola1tv import Laola1TvIE
|
||||||
|
from .lecture2go import Lecture2GoIE
|
||||||
|
from .lemonde import LemondeIE
|
||||||
|
from .leeco import (
|
||||||
|
LeIE,
|
||||||
|
LePlaylistIE,
|
||||||
|
LetvCloudIE,
|
||||||
|
)
|
||||||
|
from .libsyn import LibsynIE
|
||||||
|
from .lifenews import (
|
||||||
|
LifeNewsIE,
|
||||||
|
LifeEmbedIE,
|
||||||
|
)
|
||||||
|
from .limelight import (
|
||||||
|
LimelightMediaIE,
|
||||||
|
LimelightChannelIE,
|
||||||
|
LimelightChannelListIE,
|
||||||
|
)
|
||||||
|
from .liveleak import LiveLeakIE
|
||||||
|
from .livestream import (
|
||||||
|
LivestreamIE,
|
||||||
|
LivestreamOriginalIE,
|
||||||
|
LivestreamShortenerIE,
|
||||||
|
)
|
||||||
|
from .lnkgo import LnkGoIE
|
||||||
|
from .lovehomeporn import LoveHomePornIE
|
||||||
|
from .lrt import LRTIE
|
||||||
|
from .lynda import (
|
||||||
|
LyndaIE,
|
||||||
|
LyndaCourseIE
|
||||||
|
)
|
||||||
|
from .m6 import M6IE
|
||||||
|
from .macgamestore import MacGameStoreIE
|
||||||
|
from .mailru import MailRuIE
|
||||||
|
from .makerschannel import MakersChannelIE
|
||||||
|
from .makertv import MakerTVIE
|
||||||
|
from .malemotion import MalemotionIE
|
||||||
|
from .matchtv import MatchTVIE
|
||||||
|
from .mdr import MDRIE
|
||||||
|
from .metacafe import MetacafeIE
|
||||||
|
from .metacritic import MetacriticIE
|
||||||
|
from .mgoon import MgoonIE
|
||||||
|
from .minhateca import MinhatecaIE
|
||||||
|
from .ministrygrid import MinistryGridIE
|
||||||
|
from .minoto import MinotoIE
|
||||||
|
from .miomio import MioMioIE
|
||||||
|
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
||||||
|
from .mitele import MiTeleIE
|
||||||
|
from .mixcloud import MixcloudIE
|
||||||
|
from .mlb import MLBIE
|
||||||
|
from .mnet import MnetIE
|
||||||
|
from .mpora import MporaIE
|
||||||
|
from .moevideo import MoeVideoIE
|
||||||
|
from .mofosex import MofosexIE
|
||||||
|
from .mojvideo import MojvideoIE
|
||||||
|
from .moniker import MonikerIE
|
||||||
|
from .mooshare import MooshareIE
|
||||||
|
from .morningstar import MorningstarIE
|
||||||
|
from .motherless import MotherlessIE
|
||||||
|
from .motorsport import MotorsportIE
|
||||||
|
from .movieclips import MovieClipsIE
|
||||||
|
from .moviezine import MoviezineIE
|
||||||
|
from .mtv import (
|
||||||
|
MTVIE,
|
||||||
|
MTVServicesEmbeddedIE,
|
||||||
|
MTVIggyIE,
|
||||||
|
MTVDEIE,
|
||||||
|
)
|
||||||
|
from .muenchentv import MuenchenTVIE
|
||||||
|
from .musicplayon import MusicPlayOnIE
|
||||||
|
from .muzu import MuzuTVIE
|
||||||
|
from .mwave import MwaveIE
|
||||||
|
from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||||
|
from .myspass import MySpassIE
|
||||||
|
from .myvi import MyviIE
|
||||||
|
from .myvideo import MyVideoIE
|
||||||
|
from .myvidster import MyVidsterIE
|
||||||
|
from .nationalgeographic import (
|
||||||
|
NationalGeographicIE,
|
||||||
|
NationalGeographicChannelIE,
|
||||||
|
)
|
||||||
|
from .naver import NaverIE
|
||||||
|
from .nba import NBAIE
|
||||||
|
from .nbc import (
|
||||||
|
CSNNEIE,
|
||||||
|
NBCIE,
|
||||||
|
NBCNewsIE,
|
||||||
|
NBCSportsIE,
|
||||||
|
NBCSportsVPlayerIE,
|
||||||
|
MSNBCIE,
|
||||||
|
)
|
||||||
|
from .ndr import (
|
||||||
|
NDRIE,
|
||||||
|
NJoyIE,
|
||||||
|
NDREmbedBaseIE,
|
||||||
|
NDREmbedIE,
|
||||||
|
NJoyEmbedIE,
|
||||||
|
)
|
||||||
|
from .ndtv import NDTVIE
|
||||||
|
from .netzkino import NetzkinoIE
|
||||||
|
from .nerdcubed import NerdCubedFeedIE
|
||||||
|
from .nerdist import NerdistIE
|
||||||
|
from .neteasemusic import (
|
||||||
|
NetEaseMusicIE,
|
||||||
|
NetEaseMusicAlbumIE,
|
||||||
|
NetEaseMusicSingerIE,
|
||||||
|
NetEaseMusicListIE,
|
||||||
|
NetEaseMusicMvIE,
|
||||||
|
NetEaseMusicProgramIE,
|
||||||
|
NetEaseMusicDjRadioIE,
|
||||||
|
)
|
||||||
|
from .newgrounds import NewgroundsIE
|
||||||
|
from .newstube import NewstubeIE
|
||||||
|
from .nextmedia import (
|
||||||
|
NextMediaIE,
|
||||||
|
NextMediaActionNewsIE,
|
||||||
|
AppleDailyIE,
|
||||||
|
)
|
||||||
|
from .nextmovie import NextMovieIE
|
||||||
|
from .nfb import NFBIE
|
||||||
|
from .nfl import NFLIE
|
||||||
|
from .nhl import (
|
||||||
|
NHLIE,
|
||||||
|
NHLNewsIE,
|
||||||
|
NHLVideocenterIE,
|
||||||
|
)
|
||||||
|
from .nick import NickIE
|
||||||
|
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||||
|
from .ninegag import NineGagIE
|
||||||
|
from .noco import NocoIE
|
||||||
|
from .normalboots import NormalbootsIE
|
||||||
|
from .nosvideo import NosVideoIE
|
||||||
|
from .nova import NovaIE
|
||||||
|
from .novamov import (
|
||||||
|
AuroraVidIE,
|
||||||
|
CloudTimeIE,
|
||||||
|
NowVideoIE,
|
||||||
|
VideoWeedIE,
|
||||||
|
WholeCloudIE,
|
||||||
|
)
|
||||||
|
from .nowness import (
|
||||||
|
NownessIE,
|
||||||
|
NownessPlaylistIE,
|
||||||
|
NownessSeriesIE,
|
||||||
|
)
|
||||||
|
from .nowtv import (
|
||||||
|
NowTVIE,
|
||||||
|
NowTVListIE,
|
||||||
|
)
|
||||||
|
from .noz import NozIE
|
||||||
|
from .npo import (
|
||||||
|
NPOIE,
|
||||||
|
NPOLiveIE,
|
||||||
|
NPORadioIE,
|
||||||
|
NPORadioFragmentIE,
|
||||||
|
SchoolTVIE,
|
||||||
|
VPROIE,
|
||||||
|
WNLIE
|
||||||
|
)
|
||||||
|
from .npr import NprIE
|
||||||
|
from .nrk import (
|
||||||
|
NRKIE,
|
||||||
|
NRKPlaylistIE,
|
||||||
|
NRKSkoleIE,
|
||||||
|
NRKTVIE,
|
||||||
|
)
|
||||||
|
from .ntvde import NTVDeIE
|
||||||
|
from .ntvru import NTVRuIE
|
||||||
|
from .nytimes import (
|
||||||
|
NYTimesIE,
|
||||||
|
NYTimesArticleIE,
|
||||||
|
)
|
||||||
|
from .nuvid import NuvidIE
|
||||||
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
|
from .onionstudios import OnionStudiosIE
|
||||||
|
from .ooyala import (
|
||||||
|
OoyalaIE,
|
||||||
|
OoyalaExternalIE,
|
||||||
|
)
|
||||||
|
from .openload import OpenloadIE
|
||||||
|
from .ora import OraTVIE
|
||||||
|
from .orf import (
|
||||||
|
ORFTVthekIE,
|
||||||
|
ORFOE1IE,
|
||||||
|
ORFFM4IE,
|
||||||
|
ORFIPTVIE,
|
||||||
|
)
|
||||||
|
from .pandoratv import PandoraTVIE
|
||||||
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
|
from .patreon import PatreonIE
|
||||||
|
from .pbs import PBSIE
|
||||||
|
from .periscope import PeriscopeIE
|
||||||
|
from .philharmoniedeparis import PhilharmonieDeParisIE
|
||||||
|
from .phoenix import PhoenixIE
|
||||||
|
from .photobucket import PhotobucketIE
|
||||||
|
from .pinkbike import PinkbikeIE
|
||||||
|
from .planetaplay import PlanetaPlayIE
|
||||||
|
from .pladform import PladformIE
|
||||||
|
from .played import PlayedIE
|
||||||
|
from .playfm import PlayFMIE
|
||||||
|
from .plays import PlaysTVIE
|
||||||
|
from .playtvak import PlaytvakIE
|
||||||
|
from .playvid import PlayvidIE
|
||||||
|
from .playwire import PlaywireIE
|
||||||
|
from .pluralsight import (
|
||||||
|
PluralsightIE,
|
||||||
|
PluralsightCourseIE,
|
||||||
|
)
|
||||||
|
from .podomatic import PodomaticIE
|
||||||
|
from .porn91 import Porn91IE
|
||||||
|
from .pornhd import PornHdIE
|
||||||
|
from .pornhub import (
|
||||||
|
PornHubIE,
|
||||||
|
PornHubPlaylistIE,
|
||||||
|
PornHubUserVideosIE,
|
||||||
|
)
|
||||||
|
from .pornotube import PornotubeIE
|
||||||
|
from .pornovoisines import PornoVoisinesIE
|
||||||
|
from .pornoxo import PornoXOIE
|
||||||
|
from .primesharetv import PrimeShareTVIE
|
||||||
|
from .promptfile import PromptFileIE
|
||||||
|
from .prosiebensat1 import ProSiebenSat1IE
|
||||||
|
from .puls4 import Puls4IE
|
||||||
|
from .pyvideo import PyvideoIE
|
||||||
|
from .qqmusic import (
|
||||||
|
QQMusicIE,
|
||||||
|
QQMusicSingerIE,
|
||||||
|
QQMusicAlbumIE,
|
||||||
|
QQMusicToplistIE,
|
||||||
|
QQMusicPlaylistIE,
|
||||||
|
)
|
||||||
|
from .quickvid import QuickVidIE
|
||||||
|
from .r7 import R7IE
|
||||||
|
from .radiode import RadioDeIE
|
||||||
|
from .radiojavan import RadioJavanIE
|
||||||
|
from .radiobremen import RadioBremenIE
|
||||||
|
from .radiofrance import RadioFranceIE
|
||||||
|
from .rai import (
|
||||||
|
RaiTVIE,
|
||||||
|
RaiIE,
|
||||||
|
)
|
||||||
|
from .rbmaradio import RBMARadioIE
|
||||||
|
from .rds import RDSIE
|
||||||
|
from .redtube import RedTubeIE
|
||||||
|
from .regiotv import RegioTVIE
|
||||||
|
from .restudy import RestudyIE
|
||||||
|
from .reverbnation import ReverbNationIE
|
||||||
|
from .revision3 import Revision3IE
|
||||||
|
from .rice import RICEIE
|
||||||
|
from .ringtv import RingTVIE
|
||||||
|
from .ro220 import Ro220IE
|
||||||
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
|
from .roxwel import RoxwelIE
|
||||||
|
from .rtbf import RTBFIE
|
||||||
|
from .rte import RteIE, RteRadioIE
|
||||||
|
from .rtlnl import RtlNlIE
|
||||||
|
from .rtl2 import RTL2IE
|
||||||
|
from .rtp import RTPIE
|
||||||
|
from .rts import RTSIE
|
||||||
|
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
|
||||||
|
from .rtvnh import RTVNHIE
|
||||||
|
from .ruhd import RUHDIE
|
||||||
|
from .ruleporn import RulePornIE
|
||||||
|
from .rutube import (
|
||||||
|
RutubeIE,
|
||||||
|
RutubeChannelIE,
|
||||||
|
RutubeEmbedIE,
|
||||||
|
RutubeMovieIE,
|
||||||
|
RutubePersonIE,
|
||||||
|
)
|
||||||
|
from .rutv import RUTVIE
|
||||||
|
from .ruutu import RuutuIE
|
||||||
|
from .sandia import SandiaIE
|
||||||
|
from .safari import (
|
||||||
|
SafariIE,
|
||||||
|
SafariApiIE,
|
||||||
|
SafariCourseIE,
|
||||||
|
)
|
||||||
|
from .sapo import SapoIE
|
||||||
|
from .savefrom import SaveFromIE
|
||||||
|
from .sbs import SBSIE
|
||||||
|
from .scivee import SciVeeIE
|
||||||
|
from .screencast import ScreencastIE
|
||||||
|
from .screencastomatic import ScreencastOMaticIE
|
||||||
|
from .screenjunkies import ScreenJunkiesIE
|
||||||
|
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
|
||||||
|
from .senateisvp import SenateISVPIE
|
||||||
|
from .servingsys import ServingSysIE
|
||||||
|
from .sexu import SexuIE
|
||||||
|
from .sexykarma import SexyKarmaIE
|
||||||
|
from .shahid import ShahidIE
|
||||||
|
from .shared import SharedIE
|
||||||
|
from .sharesix import ShareSixIE
|
||||||
|
from .sina import SinaIE
|
||||||
|
from .skynewsarabia import (
|
||||||
|
SkyNewsArabiaIE,
|
||||||
|
SkyNewsArabiaArticleIE,
|
||||||
|
)
|
||||||
|
from .slideshare import SlideshareIE
|
||||||
|
from .slutload import SlutloadIE
|
||||||
|
from .smotri import (
|
||||||
|
SmotriIE,
|
||||||
|
SmotriCommunityIE,
|
||||||
|
SmotriUserIE,
|
||||||
|
SmotriBroadcastIE,
|
||||||
|
)
|
||||||
|
from .snagfilms import (
|
||||||
|
SnagFilmsIE,
|
||||||
|
SnagFilmsEmbedIE,
|
||||||
|
)
|
||||||
|
from .snotr import SnotrIE
|
||||||
|
from .sohu import SohuIE
|
||||||
|
from .soundcloud import (
|
||||||
|
SoundcloudIE,
|
||||||
|
SoundcloudSetIE,
|
||||||
|
SoundcloudUserIE,
|
||||||
|
SoundcloudPlaylistIE,
|
||||||
|
SoundcloudSearchIE
|
||||||
|
)
|
||||||
|
from .soundgasm import (
|
||||||
|
SoundgasmIE,
|
||||||
|
SoundgasmProfileIE
|
||||||
|
)
|
||||||
|
from .southpark import (
|
||||||
|
SouthParkIE,
|
||||||
|
SouthParkDeIE,
|
||||||
|
SouthParkDkIE,
|
||||||
|
SouthParkEsIE,
|
||||||
|
SouthParkNlIE
|
||||||
|
)
|
||||||
|
from .spankbang import SpankBangIE
|
||||||
|
from .spankwire import SpankwireIE
|
||||||
|
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||||
|
from .spiegeltv import SpiegeltvIE
|
||||||
|
from .spike import SpikeIE
|
||||||
|
from .stitcher import StitcherIE
|
||||||
|
from .sport5 import Sport5IE
|
||||||
|
from .sportbox import (
|
||||||
|
SportBoxIE,
|
||||||
|
SportBoxEmbedIE,
|
||||||
|
)
|
||||||
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
|
from .srgssr import (
|
||||||
|
SRGSSRIE,
|
||||||
|
SRGSSRPlayIE,
|
||||||
|
)
|
||||||
|
from .srmediathek import SRMediathekIE
|
||||||
|
from .ssa import SSAIE
|
||||||
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
|
from .steam import SteamIE
|
||||||
|
from .streamcloud import StreamcloudIE
|
||||||
|
from .streamcz import StreamCZIE
|
||||||
|
from .streetvoice import StreetVoiceIE
|
||||||
|
from .sunporno import SunPornoIE
|
||||||
|
from .svt import (
|
||||||
|
SVTIE,
|
||||||
|
SVTPlayIE,
|
||||||
|
)
|
||||||
|
from .swrmediathek import SWRMediathekIE
|
||||||
|
from .syfy import SyfyIE
|
||||||
|
from .sztvhu import SztvHuIE
|
||||||
|
from .tagesschau import TagesschauIE
|
||||||
|
from .tapely import TapelyIE
|
||||||
|
from .tass import TassIE
|
||||||
|
from .teachertube import (
|
||||||
|
TeacherTubeIE,
|
||||||
|
TeacherTubeUserIE,
|
||||||
|
)
|
||||||
|
from .teachingchannel import TeachingChannelIE
|
||||||
|
from .teamcoco import TeamcocoIE
|
||||||
|
from .techtalks import TechTalksIE
|
||||||
|
from .ted import TEDIE
|
||||||
|
from .tele13 import Tele13IE
|
||||||
|
from .telebruxelles import TeleBruxellesIE
|
||||||
|
from .telecinco import TelecincoIE
|
||||||
|
from .telegraaf import TelegraafIE
|
||||||
|
from .telemb import TeleMBIE
|
||||||
|
from .teletask import TeleTaskIE
|
||||||
|
from .testurl import TestURLIE
|
||||||
|
from .tf1 import TF1IE
|
||||||
|
from .theintercept import TheInterceptIE
|
||||||
|
from .theonion import TheOnionIE
|
||||||
|
from .theplatform import (
|
||||||
|
ThePlatformIE,
|
||||||
|
ThePlatformFeedIE,
|
||||||
|
)
|
||||||
|
from .thescene import TheSceneIE
|
||||||
|
from .thesixtyone import TheSixtyOneIE
|
||||||
|
from .thestar import TheStarIE
|
||||||
|
from .thisamericanlife import ThisAmericanLifeIE
|
||||||
|
from .thisav import ThisAVIE
|
||||||
|
from .tinypic import TinyPicIE
|
||||||
|
from .tlc import TlcDeIE
|
||||||
|
from .tmz import (
|
||||||
|
TMZIE,
|
||||||
|
TMZArticleIE,
|
||||||
|
)
|
||||||
|
from .tnaflix import (
|
||||||
|
TNAFlixNetworkEmbedIE,
|
||||||
|
TNAFlixIE,
|
||||||
|
EMPFlixIE,
|
||||||
|
MovieFapIE,
|
||||||
|
)
|
||||||
|
from .toggle import ToggleIE
|
||||||
|
from .thvideo import (
|
||||||
|
THVideoIE,
|
||||||
|
THVideoPlaylistIE
|
||||||
|
)
|
||||||
|
from .toutv import TouTvIE
|
||||||
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
|
from .traileraddict import TrailerAddictIE
|
||||||
|
from .trilulilu import TriluliluIE
|
||||||
|
from .trollvids import TrollvidsIE
|
||||||
|
from .trutube import TruTubeIE
|
||||||
|
from .tube8 import Tube8IE
|
||||||
|
from .tubitv import TubiTvIE
|
||||||
|
from .tudou import (
|
||||||
|
TudouIE,
|
||||||
|
TudouPlaylistIE,
|
||||||
|
TudouAlbumIE,
|
||||||
|
)
|
||||||
|
from .tumblr import TumblrIE
|
||||||
|
from .tunein import (
|
||||||
|
TuneInClipIE,
|
||||||
|
TuneInStationIE,
|
||||||
|
TuneInProgramIE,
|
||||||
|
TuneInTopicIE,
|
||||||
|
TuneInShortenerIE,
|
||||||
|
)
|
||||||
|
from .turbo import TurboIE
|
||||||
|
from .tutv import TutvIE
|
||||||
|
from .tv2 import (
|
||||||
|
TV2IE,
|
||||||
|
TV2ArticleIE,
|
||||||
|
)
|
||||||
|
from .tv3 import TV3IE
|
||||||
|
from .tv4 import TV4IE
|
||||||
|
from .tvc import (
|
||||||
|
TVCIE,
|
||||||
|
TVCArticleIE,
|
||||||
|
)
|
||||||
|
from .tvigle import TvigleIE
|
||||||
|
from .tvland import TVLandIE
|
||||||
|
from .tvp import TvpIE, TvpSeriesIE
|
||||||
|
from .tvplay import TVPlayIE
|
||||||
|
from .tweakers import TweakersIE
|
||||||
|
from .twentyfourvideo import TwentyFourVideoIE
|
||||||
|
from .twentymin import TwentyMinutenIE
|
||||||
|
from .twentytwotracks import (
|
||||||
|
TwentyTwoTracksIE,
|
||||||
|
TwentyTwoTracksGenreIE
|
||||||
|
)
|
||||||
|
from .twitch import (
|
||||||
|
TwitchVideoIE,
|
||||||
|
TwitchChapterIE,
|
||||||
|
TwitchVodIE,
|
||||||
|
TwitchProfileIE,
|
||||||
|
TwitchPastBroadcastsIE,
|
||||||
|
TwitchBookmarksIE,
|
||||||
|
TwitchStreamIE,
|
||||||
|
)
|
||||||
|
from .twitter import (
|
||||||
|
TwitterCardIE,
|
||||||
|
TwitterIE,
|
||||||
|
TwitterAmplifyIE,
|
||||||
|
)
|
||||||
|
from .ubu import UbuIE
|
||||||
|
from .udemy import (
|
||||||
|
UdemyIE,
|
||||||
|
UdemyCourseIE
|
||||||
|
)
|
||||||
|
from .udn import UDNEmbedIE
|
||||||
|
from .digiteka import DigitekaIE
|
||||||
|
from .unistra import UnistraIE
|
||||||
|
from .urort import UrortIE
|
||||||
|
from .usatoday import USATodayIE
|
||||||
|
from .ustream import UstreamIE, UstreamChannelIE
|
||||||
|
from .ustudio import UstudioIE
|
||||||
|
from .varzesh3 import Varzesh3IE
|
||||||
|
from .vbox7 import Vbox7IE
|
||||||
|
from .veehd import VeeHDIE
|
||||||
|
from .veoh import VeohIE
|
||||||
|
from .vessel import VesselIE
|
||||||
|
from .vesti import VestiIE
|
||||||
|
from .vevo import VevoIE
|
||||||
|
from .vgtv import (
|
||||||
|
BTArticleIE,
|
||||||
|
BTVestlendingenIE,
|
||||||
|
VGTVIE,
|
||||||
|
)
|
||||||
|
from .vh1 import VH1IE
|
||||||
|
from .vice import (
|
||||||
|
ViceIE,
|
||||||
|
ViceShowIE,
|
||||||
|
)
|
||||||
|
from .viddler import ViddlerIE
|
||||||
|
from .videodetective import VideoDetectiveIE
|
||||||
|
from .videofyme import VideofyMeIE
|
||||||
|
from .videomega import VideoMegaIE
|
||||||
|
from .videomore import (
|
||||||
|
VideomoreIE,
|
||||||
|
VideomoreVideoIE,
|
||||||
|
VideomoreSeasonIE,
|
||||||
|
)
|
||||||
|
from .videopremium import VideoPremiumIE
|
||||||
|
from .videott import VideoTtIE
|
||||||
|
from .vidme import (
|
||||||
|
VidmeIE,
|
||||||
|
VidmeUserIE,
|
||||||
|
VidmeUserLikesIE,
|
||||||
|
)
|
||||||
|
from .vidzi import VidziIE
|
||||||
|
from .vier import VierIE, VierVideosIE
|
||||||
|
from .viewster import ViewsterIE
|
||||||
|
from .viidea import ViideaIE
|
||||||
|
from .vimeo import (
|
||||||
|
VimeoIE,
|
||||||
|
VimeoAlbumIE,
|
||||||
|
VimeoChannelIE,
|
||||||
|
VimeoGroupsIE,
|
||||||
|
VimeoLikesIE,
|
||||||
|
VimeoOndemandIE,
|
||||||
|
VimeoReviewIE,
|
||||||
|
VimeoUserIE,
|
||||||
|
VimeoWatchLaterIE,
|
||||||
|
)
|
||||||
|
from .vimple import VimpleIE
|
||||||
|
from .vine import (
|
||||||
|
VineIE,
|
||||||
|
VineUserIE,
|
||||||
|
)
|
||||||
|
from .viki import (
|
||||||
|
VikiIE,
|
||||||
|
VikiChannelIE,
|
||||||
|
)
|
||||||
|
from .vk import (
|
||||||
|
VKIE,
|
||||||
|
VKUserVideosIE,
|
||||||
|
)
|
||||||
|
from .vlive import VLiveIE
|
||||||
|
from .vodlocker import VodlockerIE
|
||||||
|
from .voicerepublic import VoiceRepublicIE
|
||||||
|
from .voxmedia import VoxMediaIE
|
||||||
|
from .vporn import VpornIE
|
||||||
|
from .vrt import VRTIE
|
||||||
|
from .vube import VubeIE
|
||||||
|
from .vuclip import VuClipIE
|
||||||
|
from .vulture import VultureIE
|
||||||
|
from .walla import WallaIE
|
||||||
|
from .washingtonpost import WashingtonPostIE
|
||||||
|
from .wat import WatIE
|
||||||
|
from .wayofthemaster import WayOfTheMasterIE
|
||||||
|
from .wdr import (
|
||||||
|
WDRIE,
|
||||||
|
WDRMobileIE,
|
||||||
|
WDRMausIE,
|
||||||
|
)
|
||||||
|
from .webofstories import (
|
||||||
|
WebOfStoriesIE,
|
||||||
|
WebOfStoriesPlaylistIE,
|
||||||
|
)
|
||||||
|
from .weibo import WeiboIE
|
||||||
|
from .weiqitv import WeiqiTVIE
|
||||||
|
from .wimp import WimpIE
|
||||||
|
from .wistia import WistiaIE
|
||||||
|
from .worldstarhiphop import WorldStarHipHopIE
|
||||||
|
from .wrzuta import WrzutaIE
|
||||||
|
from .wsj import WSJIE
|
||||||
|
from .xbef import XBefIE
|
||||||
|
from .xboxclips import XboxClipsIE
|
||||||
|
from .xfileshare import XFileShareIE
|
||||||
|
from .xhamster import (
|
||||||
|
XHamsterIE,
|
||||||
|
XHamsterEmbedIE,
|
||||||
|
)
|
||||||
|
from .xminus import XMinusIE
|
||||||
|
from .xnxx import XNXXIE
|
||||||
|
from .xstream import XstreamIE
|
||||||
|
from .xtube import XTubeUserIE, XTubeIE
|
||||||
|
from .xuite import XuiteIE
|
||||||
|
from .xvideos import XVideosIE
|
||||||
|
from .xxxymovies import XXXYMoviesIE
|
||||||
|
from .yahoo import (
|
||||||
|
YahooIE,
|
||||||
|
YahooSearchIE,
|
||||||
|
)
|
||||||
|
from .yam import YamIE
|
||||||
|
from .yandexmusic import (
|
||||||
|
YandexMusicTrackIE,
|
||||||
|
YandexMusicAlbumIE,
|
||||||
|
YandexMusicPlaylistIE,
|
||||||
|
)
|
||||||
|
from .yesjapan import YesJapanIE
|
||||||
|
from .yinyuetai import YinYueTaiIE
|
||||||
|
from .ynet import YnetIE
|
||||||
|
from .youjizz import YouJizzIE
|
||||||
|
from .youku import YoukuIE
|
||||||
|
from .youporn import YouPornIE
|
||||||
|
from .yourupload import YourUploadIE
|
||||||
|
from .youtube import (
|
||||||
|
YoutubeIE,
|
||||||
|
YoutubeChannelIE,
|
||||||
|
YoutubeFavouritesIE,
|
||||||
|
YoutubeHistoryIE,
|
||||||
|
YoutubeLiveIE,
|
||||||
|
YoutubePlaylistIE,
|
||||||
|
YoutubePlaylistsIE,
|
||||||
|
YoutubeRecommendedIE,
|
||||||
|
YoutubeSearchDateIE,
|
||||||
|
YoutubeSearchIE,
|
||||||
|
YoutubeSearchURLIE,
|
||||||
|
YoutubeShowIE,
|
||||||
|
YoutubeSubscriptionsIE,
|
||||||
|
YoutubeTruncatedIDIE,
|
||||||
|
YoutubeTruncatedURLIE,
|
||||||
|
YoutubeUserIE,
|
||||||
|
YoutubeWatchLaterIE,
|
||||||
|
)
|
||||||
|
from .zapiks import ZapiksIE
|
||||||
|
from .zdf import ZDFIE, ZDFChannelIE
|
||||||
|
from .zingmp3 import (
|
||||||
|
ZingMp3SongIE,
|
||||||
|
ZingMp3AlbumIE,
|
||||||
|
)
|
||||||
|
from .zippcast import ZippCastIE
|
@ -159,9 +159,10 @@ class GDCVaultIE(InfoExtractor):
|
|||||||
'title': title,
|
'title': title,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/player.*?\.html.*?".*?</iframe>'
|
||||||
|
|
||||||
xml_root = self._html_search_regex(
|
xml_root = self._html_search_regex(
|
||||||
r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>',
|
PLAYER_REGEX, start_page, 'xml root', default=None)
|
||||||
start_page, 'xml root', default=None)
|
|
||||||
if xml_root is None:
|
if xml_root is None:
|
||||||
# Probably need to authenticate
|
# Probably need to authenticate
|
||||||
login_res = self._login(webpage_url, display_id)
|
login_res = self._login(webpage_url, display_id)
|
||||||
@ -171,18 +172,19 @@ class GDCVaultIE(InfoExtractor):
|
|||||||
start_page = login_res
|
start_page = login_res
|
||||||
# Grab the url from the authenticated page
|
# Grab the url from the authenticated page
|
||||||
xml_root = self._html_search_regex(
|
xml_root = self._html_search_regex(
|
||||||
r'<iframe src="(.*?)player.html.*?".*?</iframe>',
|
PLAYER_REGEX, start_page, 'xml root')
|
||||||
start_page, 'xml root')
|
|
||||||
|
|
||||||
xml_name = self._html_search_regex(
|
xml_name = self._html_search_regex(
|
||||||
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
|
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
|
||||||
start_page, 'xml filename', default=None)
|
start_page, 'xml filename', default=None)
|
||||||
if xml_name is None:
|
if xml_name is None:
|
||||||
# Fallback to the older format
|
# Fallback to the older format
|
||||||
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
|
xml_name = self._html_search_regex(
|
||||||
|
r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>',
|
||||||
|
start_page, 'xml filename')
|
||||||
|
|
||||||
xml_description_url = xml_root + 'xml/' + xml_name
|
xml_description = self._download_xml(
|
||||||
xml_description = self._download_xml(xml_description_url, display_id)
|
'%s/xml/%s' % (xml_root, xml_name), display_id)
|
||||||
|
|
||||||
video_title = xml_description.find('./metadata/title').text
|
video_title = xml_description.find('./metadata/title').text
|
||||||
video_formats = self._parse_mp4(xml_description)
|
video_formats = self._parse_mp4(xml_description)
|
||||||
|
@ -368,7 +368,10 @@ class IqiyiIE(InfoExtractor):
|
|||||||
auth_req, video_id,
|
auth_req, video_id,
|
||||||
note='Downloading video authentication JSON',
|
note='Downloading video authentication JSON',
|
||||||
errnote='Unable to download video authentication JSON')
|
errnote='Unable to download video authentication JSON')
|
||||||
if auth_result['code'] == 'Q00506': # requires a VIP account
|
|
||||||
|
if auth_result['code'] == 'Q00505': # No preview available (不允许试看鉴权失败)
|
||||||
|
raise ExtractorError('This video requires a VIP account', expected=True)
|
||||||
|
if auth_result['code'] == 'Q00506': # End of preview time (试看结束鉴权失败)
|
||||||
if do_report_warning:
|
if do_report_warning:
|
||||||
self.report_warning('Needs a VIP account for full video')
|
self.report_warning('Needs a VIP account for full video')
|
||||||
return False
|
return False
|
||||||
|
@ -16,7 +16,14 @@ class NovaMovIE(InfoExtractor):
|
|||||||
IE_NAME = 'novamov'
|
IE_NAME = 'novamov'
|
||||||
IE_DESC = 'NovaMov'
|
IE_DESC = 'NovaMov'
|
||||||
|
|
||||||
_VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video|mobile/#/videos)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
|
_VALID_URL_TEMPLATE = r'''(?x)
|
||||||
|
http://
|
||||||
|
(?:
|
||||||
|
(?:www\.)?%(host)s/(?:file|video|mobile/\#/videos)/|
|
||||||
|
(?:(?:embed|www)\.)%(host)s/embed(?:\.php|/)?\?(?:.*?&)?\bv=
|
||||||
|
)
|
||||||
|
(?P<id>[a-z\d]{13})
|
||||||
|
'''
|
||||||
_VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'}
|
_VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'}
|
||||||
|
|
||||||
_HOST = 'www.novamov.com'
|
_HOST = 'www.novamov.com'
|
||||||
@ -27,17 +34,7 @@ class NovaMovIE(InfoExtractor):
|
|||||||
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
|
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
|
||||||
_URL_TEMPLATE = 'http://%s/video/%s'
|
_URL_TEMPLATE = 'http://%s/video/%s'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = None
|
||||||
'url': 'http://www.novamov.com/video/4rurhn9x446jj',
|
|
||||||
'md5': '7205f346a52bbeba427603ba10d4b935',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4rurhn9x446jj',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'search engine optimization',
|
|
||||||
'description': 'search engine optimization is used to rank the web page in the google search engine'
|
|
||||||
},
|
|
||||||
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
|
|
||||||
}
|
|
||||||
|
|
||||||
def _check_existence(self, webpage, video_id):
|
def _check_existence(self, webpage, video_id):
|
||||||
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
||||||
@ -81,7 +78,7 @@ class NovaMovIE(InfoExtractor):
|
|||||||
|
|
||||||
filekey = extract_filekey()
|
filekey = extract_filekey()
|
||||||
|
|
||||||
title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title', fatal=False)
|
title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title')
|
||||||
description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False)
|
description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False)
|
||||||
|
|
||||||
api_response = self._download_webpage(
|
api_response = self._download_webpage(
|
||||||
@ -187,3 +184,29 @@ class CloudTimeIE(NovaMovIE):
|
|||||||
_TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>'
|
_TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>'
|
||||||
|
|
||||||
_TEST = None
|
_TEST = None
|
||||||
|
|
||||||
|
|
||||||
|
class AuroraVidIE(NovaMovIE):
|
||||||
|
IE_NAME = 'auroravid'
|
||||||
|
IE_DESC = 'AuroraVid'
|
||||||
|
|
||||||
|
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'auroravid\.to'}
|
||||||
|
|
||||||
|
_HOST = 'www.auroravid.to'
|
||||||
|
|
||||||
|
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!<'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.auroravid.to/video/4rurhn9x446jj',
|
||||||
|
'md5': '7205f346a52bbeba427603ba10d4b935',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '4rurhn9x446jj',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'search engine optimization',
|
||||||
|
'description': 'search engine optimization is used to rank the web page in the google search engine'
|
||||||
|
},
|
||||||
|
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.auroravid.to/embed/?v=4rurhn9x446jj',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
@ -39,9 +39,14 @@ class RteIE(InfoExtractor):
|
|||||||
duration = float_or_none(self._html_search_meta(
|
duration = float_or_none(self._html_search_meta(
|
||||||
'duration', webpage, 'duration', fatal=False), 1000)
|
'duration', webpage, 'duration', fatal=False), 1000)
|
||||||
|
|
||||||
|
thumbnail = None
|
||||||
|
thumbnail_meta = self._html_search_meta('thumbnail', webpage)
|
||||||
|
if thumbnail_meta:
|
||||||
thumbnail_id = self._search_regex(
|
thumbnail_id = self._search_regex(
|
||||||
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
|
r'uri:irus:(.+)', thumbnail_meta,
|
||||||
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
|
'thumbnail id', fatal=False)
|
||||||
|
if thumbnail_id:
|
||||||
|
thumbnail = 'http://img.rasset.ie/%s.jpg' % thumbnail_id
|
||||||
|
|
||||||
feeds_url = self._html_search_meta('feeds-prefix', webpage, 'feeds url') + video_id
|
feeds_url = self._html_search_meta('feeds-prefix', webpage, 'feeds url') + video_id
|
||||||
json_string = self._download_json(feeds_url, video_id)
|
json_string = self._download_json(feeds_url, video_id)
|
||||||
|
@ -76,7 +76,11 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
cfg_url = self._proto_relative_url(self._html_search_regex(
|
cfg_url = self._proto_relative_url(self._html_search_regex(
|
||||||
self._CONFIG_REGEX, webpage, 'flashvars.config'), 'http:')
|
self._CONFIG_REGEX, webpage, 'flashvars.config', default=None), 'http:')
|
||||||
|
|
||||||
|
if not cfg_url:
|
||||||
|
inputs = self._hidden_inputs(webpage)
|
||||||
|
cfg_url = 'https://cdn-fck.tnaflix.com/tnaflix/%s.fid?key=%s' % (inputs['vkey'], inputs['nkey'])
|
||||||
|
|
||||||
cfg_xml = self._download_xml(
|
cfg_xml = self._download_xml(
|
||||||
cfg_url, display_id, 'Downloading metadata',
|
cfg_url, display_id, 'Downloading metadata',
|
||||||
@ -132,7 +136,7 @@ class TNAFlixNetworkBaseIE(InfoExtractor):
|
|||||||
average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating'))
|
average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating'))
|
||||||
|
|
||||||
categories_str = extract_field(self._CATEGORIES_REGEX, 'categories')
|
categories_str = extract_field(self._CATEGORIES_REGEX, 'categories')
|
||||||
categories = categories_str.split(', ') if categories_str is not None else []
|
categories = [c.strip() for c in categories_str.split(',')] if categories_str is not None else []
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
@ -186,13 +190,14 @@ class TNAFlixIE(TNAFlixNetworkBaseIE):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
|
||||||
|
|
||||||
_TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>'
|
_TITLE_REGEX = r'<title>(.+?) - TNAFlix Porn Videos</title>'
|
||||||
_DESCRIPTION_REGEX = r'<h3 itemprop="description">([^<]+)</h3>'
|
_DESCRIPTION_REGEX = r'<meta[^>]+name="description"[^>]+content="([^"]+)"'
|
||||||
_UPLOADER_REGEX = r'(?s)<span[^>]+class="infoTitle"[^>]*>Uploaded By:</span>(.+?)<div'
|
_UPLOADER_REGEX = r'<i>\s*Verified Member\s*</i>\s*<h1>(.+?)</h1>'
|
||||||
|
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# anonymous uploader, no categories
|
# anonymous uploader, no categories
|
||||||
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
|
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
|
||||||
'md5': 'ecf3498417d09216374fc5907f9c6ec0',
|
'md5': '7e569419fe6d69543d01e6be22f5f7c4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '553878',
|
'id': '553878',
|
||||||
'display_id': 'Carmella-Decesare-striptease',
|
'display_id': 'Carmella-Decesare-striptease',
|
||||||
@ -201,17 +206,16 @@ class TNAFlixIE(TNAFlixNetworkBaseIE):
|
|||||||
'thumbnail': 're:https?://.*\.jpg$',
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
'duration': 91,
|
'duration': 91,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'uploader': 'Anonymous',
|
'categories': ['Porn Stars'],
|
||||||
'categories': [],
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
# non-anonymous uploader, categories
|
# non-anonymous uploader, categories
|
||||||
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
|
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
|
||||||
'md5': '0f5d4d490dbfd117b8607054248a07c0',
|
'md5': 'fcba2636572895aba116171a899a5658',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6538',
|
'id': '6538',
|
||||||
'display_id': 'Educational-xxx-video',
|
'display_id': 'Educational-xxx-video',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': 'Educational xxx video',
|
'title': 'Educational xxx video',
|
||||||
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
|
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
|
||||||
'thumbnail': 're:https?://.*\.jpg$',
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
|
@ -4,7 +4,10 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import float_or_none
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
float_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class VRTIE(InfoExtractor):
|
class VRTIE(InfoExtractor):
|
||||||
@ -52,6 +55,11 @@ class VRTIE(InfoExtractor):
|
|||||||
'duration': 661,
|
'duration': 661,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
# YouTube video
|
||||||
|
'url': 'http://deredactie.be/cm/vrtnieuws/videozone/nieuws/cultuurenmedia/1.2622957',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://cobra.canvas.be/cm/cobra/videozone/rubriek/film-videozone/1.2377055',
|
'url': 'http://cobra.canvas.be/cm/cobra/videozone/rubriek/film-videozone/1.2377055',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -66,7 +74,17 @@ class VRTIE(InfoExtractor):
|
|||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
|
r'data-video-id="([^"]+)_[^"]+"', webpage, 'video id', fatal=False)
|
||||||
|
|
||||||
|
src = self._search_regex(
|
||||||
|
r'data-video-src="([^"]+)"', webpage, 'video src', default=None)
|
||||||
|
|
||||||
|
video_type = self._search_regex(
|
||||||
|
r'data-video-type="([^"]+)"', webpage, 'video type', default=None)
|
||||||
|
|
||||||
|
if video_type == 'YouTubeVideo':
|
||||||
|
return self.url_result(src, 'Youtube')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
|
r'data-video-iphone-server="(?P<server>[^"]+)"\s+data-video-iphone-path="(?P<path>[^"]+)"',
|
||||||
webpage)
|
webpage)
|
||||||
@ -74,11 +92,15 @@ class VRTIE(InfoExtractor):
|
|||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
'%s/%s' % (mobj.group('server'), mobj.group('path')),
|
'%s/%s' % (mobj.group('server'), mobj.group('path')),
|
||||||
video_id, 'mp4', m3u8_id='hls', fatal=False))
|
video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||||
mobj = re.search(r'data-video-src="(?P<src>[^"]+)"', webpage)
|
|
||||||
if mobj:
|
if src:
|
||||||
|
if determine_ext(src) == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
src, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
else:
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
'%s/manifest.f4m' % mobj.group('src'),
|
'%s/manifest.f4m' % src, video_id, f4m_id='hds', fatal=False))
|
||||||
video_id, f4m_id='hds', fatal=False))
|
|
||||||
|
|
||||||
if not formats and 'data-video-geoblocking="true"' in webpage:
|
if not formats and 'data-video-geoblocking="true"' in webpage:
|
||||||
self.raise_geo_restricted('This video is only available in Belgium')
|
self.raise_geo_restricted('This video is only available in Belgium')
|
||||||
|
@ -39,9 +39,14 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4878838',
|
'id': '4878838',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Carlo Ambrosio - Gypsy Eyes 1',
|
'title': 'Carlo Ambrosio & Fabio Di Bari, Carlo Ambrosio - Gypsy Eyes 1',
|
||||||
'filesize': 4628061,
|
'filesize': 4628061,
|
||||||
'duration': 193.04,
|
'duration': 193.04,
|
||||||
|
'track': 'Gypsy Eyes 1',
|
||||||
|
'album': 'Gypsy Soul',
|
||||||
|
'album_artist': 'Carlo Ambrosio',
|
||||||
|
'artist': 'Carlo Ambrosio & Fabio Di Bari, Carlo Ambrosio',
|
||||||
|
'release_year': '2009',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,16 +69,45 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
|
|||||||
thumbnail = cover_uri.replace('%%', 'orig')
|
thumbnail = cover_uri.replace('%%', 'orig')
|
||||||
if not thumbnail.startswith('http'):
|
if not thumbnail.startswith('http'):
|
||||||
thumbnail = 'http://' + thumbnail
|
thumbnail = 'http://' + thumbnail
|
||||||
return {
|
|
||||||
|
track_title = track['title']
|
||||||
|
track_info = {
|
||||||
'id': track['id'],
|
'id': track['id'],
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'url': self._get_track_url(track['storageDir'], track['id']),
|
'url': self._get_track_url(track['storageDir'], track['id']),
|
||||||
'title': '%s - %s' % (track['artists'][0]['name'], track['title']),
|
|
||||||
'filesize': int_or_none(track.get('fileSize')),
|
'filesize': int_or_none(track.get('fileSize')),
|
||||||
'duration': float_or_none(track.get('durationMs'), 1000),
|
'duration': float_or_none(track.get('durationMs'), 1000),
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
|
'track': track_title,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def extract_artist(artist_list):
|
||||||
|
if artist_list and isinstance(artist_list, list):
|
||||||
|
artists_names = [a['name'] for a in artist_list if a.get('name')]
|
||||||
|
if artists_names:
|
||||||
|
return ', '.join(artists_names)
|
||||||
|
|
||||||
|
albums = track.get('albums')
|
||||||
|
if albums and isinstance(albums, list):
|
||||||
|
album = albums[0]
|
||||||
|
if isinstance(album, dict):
|
||||||
|
year = album.get('year')
|
||||||
|
track_info.update({
|
||||||
|
'album': album.get('title'),
|
||||||
|
'album_artist': extract_artist(album.get('artists')),
|
||||||
|
'release_year': compat_str(year) if year else None,
|
||||||
|
})
|
||||||
|
|
||||||
|
track_artist = extract_artist(track.get('artists'))
|
||||||
|
if track_artist:
|
||||||
|
track_info.update({
|
||||||
|
'artist': track_artist,
|
||||||
|
'title': '%s - %s' % (track_artist, track_title),
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
track_info['title'] = track_title
|
||||||
|
return track_info
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
album_id, track_id = mobj.group('album_id'), mobj.group('id')
|
album_id, track_id = mobj.group('album_id'), mobj.group('id')
|
||||||
|
@ -270,7 +270,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
))
|
))
|
||||||
|(?:
|
|(?:
|
||||||
youtu\.be| # just youtu.be/xxxx
|
youtu\.be| # just youtu.be/xxxx
|
||||||
vid\.plus # or vid.plus/xxxx
|
vid\.plus| # or vid.plus/xxxx
|
||||||
|
zwearz\.com/watch| # or zwearz.com/watch/xxxx
|
||||||
)/
|
)/
|
||||||
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
|
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
|
||||||
)
|
)
|
||||||
@ -758,6 +759,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'url': 'http://vid.plus/FlRa-iH7PGw',
|
'url': 'http://vid.plus/FlRa-iH7PGw',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
|
# Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
|
||||||
# Also tests cut-off URL expansion in video description (see
|
# Also tests cut-off URL expansion in video description (see
|
||||||
|
@ -85,6 +85,13 @@ class ZDFIE(InfoExtractor):
|
|||||||
uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
|
uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
|
||||||
uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
|
uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
|
||||||
upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
|
upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
|
||||||
|
subtitles = {}
|
||||||
|
captions_url = doc.find('.//caption/url')
|
||||||
|
if captions_url is not None:
|
||||||
|
subtitles['de'] = [{
|
||||||
|
'url': captions_url.text,
|
||||||
|
'ext': 'ttml',
|
||||||
|
}]
|
||||||
|
|
||||||
def xml_to_thumbnails(fnode):
|
def xml_to_thumbnails(fnode):
|
||||||
thumbnails = []
|
thumbnails = []
|
||||||
@ -190,6 +197,7 @@ class ZDFIE(InfoExtractor):
|
|||||||
'uploader_id': uploader_id,
|
'uploader_id': uploader_id,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -2131,6 +2131,7 @@ def dfxp2srt(dfxp_data):
|
|||||||
_x = functools.partial(xpath_with_ns, ns_map={
|
_x = functools.partial(xpath_with_ns, ns_map={
|
||||||
'ttml': 'http://www.w3.org/ns/ttml',
|
'ttml': 'http://www.w3.org/ns/ttml',
|
||||||
'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
|
'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
|
||||||
|
'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
|
||||||
})
|
})
|
||||||
|
|
||||||
class TTMLPElementParser(object):
|
class TTMLPElementParser(object):
|
||||||
@ -2157,7 +2158,7 @@ def dfxp2srt(dfxp_data):
|
|||||||
|
|
||||||
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
|
dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
|
||||||
out = []
|
out = []
|
||||||
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
|
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
|
||||||
|
|
||||||
if not paras:
|
if not paras:
|
||||||
raise ValueError('Invalid dfxp/TTML subtitle')
|
raise ValueError('Invalid dfxp/TTML subtitle')
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__version__ = '2016.04.01'
|
__version__ = '2016.04.06'
|
||||||
|
Loading…
x
Reference in New Issue
Block a user