Merge branch 'master' into Soundcloud-issue-12878
This commit is contained in:
commit
c8353846c3
11
.travis.yml
11
.travis.yml
@ -14,11 +14,14 @@ env:
|
|||||||
- YTDL_TEST_SET=core
|
- YTDL_TEST_SET=core
|
||||||
- YTDL_TEST_SET=download
|
- YTDL_TEST_SET=download
|
||||||
matrix:
|
matrix:
|
||||||
|
include:
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=core
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=download
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- env: YTDL_TEST_SET=download
|
- env: YTDL_TEST_SET=download
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=core
|
||||||
|
- env: JYTHON=true; YTDL_TEST_SET=download
|
||||||
|
before_install:
|
||||||
|
- if [ "$JYTHON" == "true" ]; then ./devscripts/install_jython.sh; export PATH="$HOME/jython/bin:$PATH"; fi
|
||||||
script: ./devscripts/run_tests.sh
|
script: ./devscripts/run_tests.sh
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
- filippo.valsorda@gmail.com
|
|
||||||
- yasoob.khld@gmail.com
|
|
||||||
|
@ -1,3 +1,10 @@
|
|||||||
|
version <unreleased>
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [openload] Fix extraction (#15166)
|
||||||
|
* [rtve.es:alacarta] Fix extraction of some new URLs
|
||||||
|
|
||||||
|
|
||||||
version 2017.12.31
|
version 2017.12.31
|
||||||
|
|
||||||
Core
|
Core
|
||||||
|
5
devscripts/install_jython.sh
Executable file
5
devscripts/install_jython.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
wget http://central.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar
|
||||||
|
java -jar jython-installer-2.7.1.jar -s -d "$HOME/jython"
|
||||||
|
$HOME/jython/bin/jython -m pip install nose
|
@ -8,7 +8,7 @@ from .common import InfoExtractor
|
|||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
unified_timestamp,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ class ACastIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
# test with multiple blings
|
# test with multiple blings
|
||||||
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
||||||
'md5': '55c0097badd7095f494c99a172f86501',
|
'md5': 'e87d5b8516cd04c0d81b6ee1caca28d0',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
@ -40,23 +40,24 @@ class ACastIE(InfoExtractor):
|
|||||||
'timestamp': 1477346700,
|
'timestamp': 1477346700,
|
||||||
'upload_date': '20161024',
|
'upload_date': '20161024',
|
||||||
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
||||||
'duration': 2797,
|
'duration': 2766,
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||||
cast_data = self._download_json(
|
cast_data = self._download_json(
|
||||||
'https://embed.acast.com/api/acasts/%s/%s' % (channel, display_id), display_id)
|
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id), display_id)
|
||||||
|
e = cast_data['result']['episode']
|
||||||
return {
|
return {
|
||||||
'id': compat_str(cast_data['id']),
|
'id': compat_str(e['id']),
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'url': [b['audio'] for b in cast_data['blings'] if b['type'] == 'BlingAudio'][0],
|
'url': e['mediaUrl'],
|
||||||
'title': cast_data['name'],
|
'title': e['name'],
|
||||||
'description': cast_data.get('description'),
|
'description': e.get('description'),
|
||||||
'thumbnail': cast_data.get('image'),
|
'thumbnail': e.get('image'),
|
||||||
'timestamp': parse_iso8601(cast_data.get('publishingDate')),
|
'timestamp': unified_timestamp(e.get('publishingDate')),
|
||||||
'duration': int_or_none(cast_data.get('duration')),
|
'duration': int_or_none(e.get('duration')),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -244,8 +244,9 @@ class LyndaIE(LyndaBaseIE):
|
|||||||
def _get_subtitles(self, video_id):
|
def _get_subtitles(self, video_id):
|
||||||
url = 'https://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
url = 'https://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
||||||
subs = self._download_json(url, None, False)
|
subs = self._download_json(url, None, False)
|
||||||
if subs:
|
fixed_subs = self._fix_subtitles(subs)
|
||||||
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
|
if fixed_subs:
|
||||||
|
return {'en': [{'ext': 'srt', 'data': fixed_subs}]}
|
||||||
else:
|
else:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -333,7 +333,11 @@ class OpenloadIE(InfoExtractor):
|
|||||||
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
|
webpage, _ = phantom.get(page_url, html=webpage, video_id=video_id, headers=headers)
|
||||||
|
|
||||||
decoded_id = (get_element_by_id('streamurl', webpage) or
|
decoded_id = (get_element_by_id('streamurl', webpage) or
|
||||||
get_element_by_id('streamuri', webpage))
|
get_element_by_id('streamuri', webpage) or
|
||||||
|
get_element_by_id('streamurj', webpage))
|
||||||
|
|
||||||
|
if not decoded_id:
|
||||||
|
raise ExtractorError('Can\'t find stream URL', video_id=video_id)
|
||||||
|
|
||||||
video_url = 'https://openload.co/stream/%s?mime=true' % decoded_id
|
video_url = 'https://openload.co/stream/%s?mime=true' % decoded_id
|
||||||
|
|
||||||
|
@ -31,6 +31,9 @@ def _decrypt_url(png):
|
|||||||
hash_index = data.index('#')
|
hash_index = data.index('#')
|
||||||
alphabet_data = data[:hash_index]
|
alphabet_data = data[:hash_index]
|
||||||
url_data = data[hash_index + 1:]
|
url_data = data[hash_index + 1:]
|
||||||
|
if url_data[0] == 'H' and url_data[3] == '%':
|
||||||
|
# remove useless HQ%% at the start
|
||||||
|
url_data = url_data[4:]
|
||||||
|
|
||||||
alphabet = []
|
alphabet = []
|
||||||
e = 0
|
e = 0
|
||||||
|
@ -358,9 +358,16 @@ class TwitchPlaylistBaseIE(TwitchBaseIE):
|
|||||||
break
|
break
|
||||||
offset += limit
|
offset += limit
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
[self.url_result(entry) for entry in orderedSet(entries)],
|
[self._make_url_result(entry) for entry in orderedSet(entries)],
|
||||||
channel_id, channel_name)
|
channel_id, channel_name)
|
||||||
|
|
||||||
|
def _make_url_result(self, url):
|
||||||
|
try:
|
||||||
|
video_id = 'v%s' % TwitchVodIE._match_id(url)
|
||||||
|
return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
|
||||||
|
except AssertionError:
|
||||||
|
return self.url_result(url)
|
||||||
|
|
||||||
def _extract_playlist_page(self, response):
|
def _extract_playlist_page(self, response):
|
||||||
videos = response.get('videos')
|
videos = response.get('videos')
|
||||||
return [video['url'] for video in videos] if videos else []
|
return [video['url'] for video in videos] if videos else []
|
||||||
|
Loading…
x
Reference in New Issue
Block a user