Merge pull request #108 from ytdl-org/master

[pull] master from ytdl-org:master
This commit is contained in:
pull[bot] 2019-10-05 15:41:36 +00:00 committed by GitHub
commit 7e8a24580f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 35 deletions

View File

@ -403,6 +403,15 @@ class PornHubUserIE(PornHubPlaylistBaseIE):
class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE): class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
@staticmethod
def _has_more(webpage):
return re.search(
r'''(?x)
<li[^>]+\bclass=["\']page_next|
<link[^>]+\brel=["\']next|
<button[^>]+\bid=["\']moreDataBtn
''', webpage) is not None
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') host = mobj.group('host')
@ -411,13 +420,11 @@ class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
page = int_or_none(self._search_regex( page = int_or_none(self._search_regex(
r'\bpage=(\d+)', url, 'page', default=None)) r'\bpage=(\d+)', url, 'page', default=None))
page_url = self._make_page_url(url)
entries = [] entries = []
for page_num in (page, ) if page is not None else itertools.count(1): for page_num in (page, ) if page is not None else itertools.count(1):
try: try:
webpage = self._download_webpage( webpage = self._download_webpage(
page_url, item_id, 'Downloading page %d' % page_num, url, item_id, 'Downloading page %d' % page_num,
query={'page': page_num}) query={'page': page_num})
except ExtractorError as e: except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
@ -547,18 +554,6 @@ class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url) if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url)
else super(PornHubPagedVideoListIE, cls).suitable(url)) else super(PornHubPagedVideoListIE, cls).suitable(url))
def _make_page_url(self, url):
return url
@staticmethod
def _has_more(webpage):
return re.search(
r'''(?x)
<li[^>]+\bclass=["\']page_next|
<link[^>]+\brel=["\']next|
<button[^>]+\bid=["\']moreDataBtn
''', webpage) is not None
class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE): class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)' _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
@ -572,11 +567,3 @@ class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload', 'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload',
'only_matching': True, 'only_matching': True,
}] }]
def _make_page_url(self, url):
mobj = re.match(self._VALID_URL, url)
return '%s/ajax' % mobj.group('url')
@staticmethod
def _has_more(webpage):
return True

View File

@ -6,6 +6,7 @@ from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
merge_dicts,
str_to_int, str_to_int,
unified_strdate, unified_strdate,
url_or_none, url_or_none,
@ -45,11 +46,14 @@ class RedTubeIE(InfoExtractor):
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']): if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
raise ExtractorError('Video %s has been removed' % video_id, expected=True) raise ExtractorError('Video %s has been removed' % video_id, expected=True)
title = self._html_search_regex( info = self._search_json_ld(webpage, video_id, default={})
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',), if not info.get('title'):
webpage, 'title', group='title', info['title'] = self._html_search_regex(
default=None) or self._og_search_title(webpage) (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
formats = [] formats = []
sources = self._parse_json( sources = self._parse_json(
@ -88,28 +92,28 @@ class RedTubeIE(InfoExtractor):
thumbnail = self._og_search_thumbnail(webpage) thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex( upload_date = unified_strdate(self._search_regex(
r'<span[^>]+>ADDED ([^<]+)<', r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
webpage, 'upload date', fatal=False)) webpage, 'upload date', default=None))
duration = int_or_none(self._og_search_property( duration = int_or_none(self._og_search_property(
'video:duration', webpage, default=None) or self._search_regex( 'video:duration', webpage, default=None) or self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None)) r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
view_count = str_to_int(self._search_regex( view_count = str_to_int(self._search_regex(
(r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)', (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)'), r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
webpage, 'view count', fatal=False)) r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
webpage, 'view count', default=None))
# No self-labeling, but they describe themselves as # No self-labeling, but they describe themselves as
# "Home of Videos Porno" # "Home of Videos Porno"
age_limit = 18 age_limit = 18
return { return merge_dicts(info, {
'id': video_id, 'id': video_id,
'ext': 'mp4', 'ext': 'mp4',
'title': title,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'upload_date': upload_date, 'upload_date': upload_date,
'duration': duration, 'duration': duration,
'view_count': view_count, 'view_count': view_count,
'age_limit': age_limit, 'age_limit': age_limit,
'formats': formats, 'formats': formats,
} })