cleanup code, flake8 passed

This commit is contained in:
Pablo Castorino 2019-01-31 19:33:36 -03:00
parent 13d441b1b1
commit 18b5708685

View File

@ -5,12 +5,12 @@ from .common import InfoExtractor
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
urlencode_postdata, urlencode_postdata,
compat_str,
ExtractorError, ExtractorError,
) )
class ContarBaseIE(InfoExtractor): class ContarBaseIE(InfoExtractor):
_NETRC_MACHINE = 'contar' _NETRC_MACHINE = 'contar'
_API_BASE = 'https://api.cont.ar/api/v2/' _API_BASE = 'https://api.cont.ar/api/v2/'
@ -22,16 +22,16 @@ class ContarBaseIE(InfoExtractor):
raise ExtractorError( raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error), expected=True) '%s said: %s' % (self.IE_NAME, error), expected=True)
def _call_api(self, path, video_id, headers = {}, note='Downloading JSON metadata'): def _call_api(self, path, video_id, headers={}, note='Downloading JSON metadata'):
if self._auth_token: if self._auth_token:
headers['Authorization'] = 'Bearer ' + self._auth_token headers['Authorization'] = 'Bearer ' + self._auth_token
result = self._download_json( result = self._download_json(
self._API_BASE + path, video_id, headers=headers, note=note) self._API_BASE + path, video_id, headers=headers, note=note)
self._handle_errors(result) self._handle_errors(result)
return result['data'] return result['data']
def _real_initialize(self): def _real_initialize(self):
email, password = self._get_login_info() email, password = self._get_login_info()
if email is None: if email is None:
@ -42,21 +42,19 @@ class ContarBaseIE(InfoExtractor):
'email': email, 'email': email,
'password': password, 'password': password,
})) }))
self._handle_errors(result) self._handle_errors(result)
self._auth_token = result['token'] self._auth_token = result['token']
def _get_video_info(self, video, video_id, base = {}): def _get_video_info(self, video, video_id, base={}):
#print(json.dumps(video, indent=4, sort_keys=True))
#print "id = %s S%sE%s" % (video.get('id'), season.get('name') , video.get('episode'))
formats = self._get_formats(video.get('streams', []), video.get('id')) formats = self._get_formats(video.get('streams', []), video.get('id'))
subtitles = self._get_subtitles(video['subtitles'].get('data', []), video.get('id')) subtitles = self._get_subtitles(video['subtitles'].get('data', []), video.get('id'))
serie_info = base.get('serie_info') or self._get_serie_info(video.get('serie')) serie_info = base.get('serie_info') or self._get_serie_info(video.get('serie'))
season_number = base.get('season_number') or self._get_season_number(serie_info, video.get('id')); season_number = base.get('season_number') or self._get_season_number(serie_info, video.get('id'))
episode_number = video.get('episode') episode_number = video.get('episode')
info = { info = {
'id': video.get('id'), 'id': video.get('id'),
'title': video.get('name'), 'title': video.get('name'),
@ -70,34 +68,33 @@ class ContarBaseIE(InfoExtractor):
'duration': int_or_none(video.get('length')), 'duration': int_or_none(video.get('length')),
'thumbnail': video.get('posterImage'), 'thumbnail': video.get('posterImage'),
'release_year': int_or_none(serie_info.get('year')), 'release_year': int_or_none(serie_info.get('year')),
#'timestamp': timestamp, # 'timestamp': timestamp,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
} }
return info return info
def _get_serie_info(self, serie_id, headers={}): def _get_serie_info(self, serie_id, headers={}):
serie = self._call_api('serie/' + serie_id, serie_id, headers=headers, note='Downloading Serie JSON metadata') serie = self._call_api('serie/' + serie_id, serie_id, headers=headers, note='Downloading Serie JSON metadata')
return serie return serie
def _get_season_number(self, serie_info, video_id): def _get_season_number(self, serie_info, video_id):
for season in serie_info['seasons'].get('data', []): for season in serie_info['seasons'].get('data', []):
#print(json.dumps(season, indent=4, sort_keys=True))
season_number = season.get('name') season_number = season.get('name')
for episode in season['videos'].get('data', []): for episode in season['videos'].get('data', []):
if episode.get('id') == video_id: if episode.get('id') == video_id:
return season_number return season_number
return None return None
def _get_subtitles(self, subtitles, video_id): def _get_subtitles(self, subtitles, video_id):
subs = {} subs = {}
for sub in subtitles: for sub in subtitles:
lang = sub.get('lang').lower() lang = sub.get('lang').lower()
subs[lang] = [{ 'url': sub.get('url'), 'ext': 'srt'}] subs[lang] = [{'url': sub.get('url'), 'ext': 'srt'}]
return subs return subs
def _get_formats(self, videos, video_id): def _get_formats(self, videos, video_id):
formats = [] formats = []
for stream in videos: for stream in videos:
@ -105,18 +102,18 @@ class ContarBaseIE(InfoExtractor):
type = stream.get('type') type = stream.get('type')
if (type == 'HLS'): if (type == 'HLS'):
formats.extend(self._extract_m3u8_formats(stream_url, formats.extend(self._extract_m3u8_formats(stream_url,
video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False)) fatal=False))
elif (type == 'DASH'): elif (type == 'DASH'):
formats.extend(self._extract_mpd_formats( formats.extend(self._extract_mpd_formats(
stream_url, video_id, mpd_id='dash', fatal=False)) stream_url, video_id, mpd_id='dash', fatal=False))
self._sort_formats(formats) self._sort_formats(formats)
return formats return formats
class ContarIE(ContarBaseIE): class ContarIE(ContarBaseIE):
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}' _UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
_VALID_URL = r'https?://(?:www\.)?cont\.ar/watch/(?P<id>%s)' % _UUID_RE _VALID_URL = r'https?://(?:www\.)?cont\.ar/watch/(?P<id>%s)' % _UUID_RE
_TEST = { _TEST = {
@ -142,17 +139,17 @@ class ContarIE(ContarBaseIE):
'format': 'hls-4755-1' 'format': 'hls-4755-1'
} }
} }
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
video = self._call_api('videos/' + video_id, video_id, headers={'Referer': url}) video = self._call_api('videos/' + video_id, video_id, headers={'Referer': url})
info = self._get_video_info(video, video_id); info = self._get_video_info(video, video_id)
return info return info
class ContarSerieIE(ContarBaseIE): class ContarSerieIE(ContarBaseIE):
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}' _UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
_VALID_URL = r'https?://(?:www\.)?cont\.ar/serie/(?P<id>%s)' % _UUID_RE _VALID_URL = r'https?://(?:www\.)?cont\.ar/serie/(?P<id>%s)' % _UUID_RE
_TEST = { _TEST = {
@ -161,7 +158,7 @@ class ContarSerieIE(ContarBaseIE):
'id': '353247d5-da97-4cb6-8571-c4fbab28c643', 'id': '353247d5-da97-4cb6-8571-c4fbab28c643',
'title': 'Vidas de Radio', 'title': 'Vidas de Radio',
'description': 'Ana Gerschenson conduce el ciclo que repasa historias de grandes personalidades que le dieron vida al medio; marcaron una época de la Argentina y de tu vida, esas voces amigas que estuvieron siempre y son Vidas De Radio.' 'description': 'Ana Gerschenson conduce el ciclo que repasa historias de grandes personalidades que le dieron vida al medio; marcaron una época de la Argentina y de tu vida, esas voces amigas que estuvieron siempre y son Vidas De Radio.'
#'thumbnail': r're:^https?://.*\.jpg$', # 'thumbnail': r're:^https?://.*\.jpg$',
# TODO more properties, either as: # TODO more properties, either as:
# * A value # * A value
# * MD5 checksum; start the string with md5: # * MD5 checksum; start the string with md5:
@ -206,32 +203,30 @@ class ContarSerieIE(ContarBaseIE):
'format': 'bestvideo', 'format': 'bestvideo',
} }
} }
def _real_extract(self, url): def _real_extract(self, url):
serie_id = self._match_id(url) serie_id = self._match_id(url)
serie_info = self._get_serie_info(serie_id, headers={'Referer': url}) serie_info = self._get_serie_info(serie_id, headers={'Referer': url})
seasons = []
entries = [] entries = []
base = {} base = {}
base['serie_info'] = serie_info base['serie_info'] = serie_info
for season in serie_info['seasons'].get('data', []): for season in serie_info['seasons'].get('data', []):
#print(json.dumps(season, indent=4, sort_keys=True))
base['season_number'] = season.get('name') base['season_number'] = season.get('name')
for episode in season['videos'].get('data', []): for episode in season['videos'].get('data', []):
info = self._get_video_info(episode, serie_id, base); info = self._get_video_info(episode, serie_id, base)
entries.append(info) entries.append(info)
return self.playlist_result( return self.playlist_result(
entries, serie_id, entries, serie_id,
serie_info.get('name'), serie_info.get('story_large')) serie_info.get('name'), serie_info.get('story_large'))
class ContarChannelIE(ContarBaseIE): class ContarChannelIE(ContarBaseIE):
_UUID_RE = r'[\d]{1,}' _UUID_RE = r'[\d]{1,}'
_VALID_URL = r'https?://(?:www\.)?cont\.ar/channel/(?P<id>%s)' % _UUID_RE _VALID_URL = r'https?://(?:www\.)?cont\.ar/channel/(?P<id>%s)' % _UUID_RE
_TEST = { _TEST = {
@ -252,23 +247,24 @@ class ContarChannelIE(ContarBaseIE):
'skip_download': True 'skip_download': True
} }
} }
def _real_extract(self, url): def _real_extract(self, url):
list_id = self._match_id(url) list_id = self._match_id(url)
channel_info = self._call_api('channel/info/' + list_id, list_id, headers={'Referer': url}, note='Downloading Channel Info JSON metadata') channel_info = self._call_api('channel/info/' + list_id, list_id, headers={'Referer': url}, note='Downloading Channel Info JSON metadata')
list = self._call_api('channel/series/' + list_id, list_id, headers={'Referer': url}, note='Downloading Channel List JSON metadata') list = self._call_api('channel/series/' + list_id, list_id, headers={'Referer': url}, note='Downloading Channel List JSON metadata')
entries = [] entries = []
for video in list: for video in list:
if (video.get('type') == 'SERIE'): if (video.get('type') == 'SERIE'):
url = 'www.cont.ar/serie/%s' % video.get('uuid') url = 'www.cont.ar/serie/%s' % video.get('uuid')
entries.append(self.url_result(url, video_id=video.get('uuid'), video_title=video.get('name'))) entries.append(self.url_result(url, video_id=video.get('uuid'), video_title=video.get('name')))
return self.playlist_result( return self.playlist_result(
entries, list_id, channel_info.get('name'), channel_info.get('description')) entries, list_id, channel_info.get('name'), channel_info.get('description'))
class ContarBrowseIE(ContarBaseIE): class ContarBrowseIE(ContarBaseIE):
_UUID_RE = r'[\d]{1,}' _UUID_RE = r'[\d]{1,}'
_VALID_URL = r'https?://(?:www\.)?cont\.ar/browse/genre/(?P<id>%s)' % _UUID_RE _VALID_URL = r'https?://(?:www\.)?cont\.ar/browse/genre/(?P<id>%s)' % _UUID_RE
_TEST = { _TEST = {
@ -288,19 +284,18 @@ class ContarBrowseIE(ContarBaseIE):
'skip_download': True 'skip_download': True
} }
} }
def _real_extract(self, url): def _real_extract(self, url):
list_id = self._match_id(url) list_id = self._match_id(url)
list = self._call_api('full/section/' + list_id, list_id, headers={'Referer': url}) list = self._call_api('full/section/' + list_id, list_id, headers={'Referer': url})
entries = [] entries = []
for video in list['videos'].get('data', []): for video in list['videos'].get('data', []):
if (video.get('type') == 'SERIE'): if (video.get('type') == 'SERIE'):
url = 'www.cont.ar/serie/%s' % video.get('uuid') url = 'www.cont.ar/serie/%s' % video.get('uuid')
entries.append(self.url_result(url, video_id=video.get('uuid'), video_title=video.get('name'))) entries.append(self.url_result(url, video_id=video.get('uuid'), video_title=video.get('name')))
return self.playlist_result( return self.playlist_result(
entries, list_id, entries, list_id,
list.get('title')) list.get('title'))