Merge pull request #108 from ytdl-org/master
[pull] master from ytdl-org:master
This commit is contained in:
commit
7e8a24580f
@ -403,6 +403,15 @@ class PornHubUserIE(PornHubPlaylistBaseIE):
|
||||
|
||||
|
||||
class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
|
||||
@staticmethod
|
||||
def _has_more(webpage):
|
||||
return re.search(
|
||||
r'''(?x)
|
||||
<li[^>]+\bclass=["\']page_next|
|
||||
<link[^>]+\brel=["\']next|
|
||||
<button[^>]+\bid=["\']moreDataBtn
|
||||
''', webpage) is not None
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
host = mobj.group('host')
|
||||
@ -411,13 +420,11 @@ class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
|
||||
page = int_or_none(self._search_regex(
|
||||
r'\bpage=(\d+)', url, 'page', default=None))
|
||||
|
||||
page_url = self._make_page_url(url)
|
||||
|
||||
entries = []
|
||||
for page_num in (page, ) if page is not None else itertools.count(1):
|
||||
try:
|
||||
webpage = self._download_webpage(
|
||||
page_url, item_id, 'Downloading page %d' % page_num,
|
||||
url, item_id, 'Downloading page %d' % page_num,
|
||||
query={'page': page_num})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
||||
@ -547,18 +554,6 @@ class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
|
||||
if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url)
|
||||
else super(PornHubPagedVideoListIE, cls).suitable(url))
|
||||
|
||||
def _make_page_url(self, url):
|
||||
return url
|
||||
|
||||
@staticmethod
|
||||
def _has_more(webpage):
|
||||
return re.search(
|
||||
r'''(?x)
|
||||
<li[^>]+\bclass=["\']page_next|
|
||||
<link[^>]+\brel=["\']next|
|
||||
<button[^>]+\bid=["\']moreDataBtn
|
||||
''', webpage) is not None
|
||||
|
||||
|
||||
class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
|
||||
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub\.(?:com|net))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
|
||||
@ -572,11 +567,3 @@ class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
|
||||
'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _make_page_url(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
return '%s/ajax' % mobj.group('url')
|
||||
|
||||
@staticmethod
|
||||
def _has_more(webpage):
|
||||
return True
|
||||
|
@ -6,6 +6,7 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
str_to_int,
|
||||
unified_strdate,
|
||||
url_or_none,
|
||||
@ -45,11 +46,14 @@ class RedTubeIE(InfoExtractor):
|
||||
if any(s in webpage for s in ['video-deleted-info', '>This video has been removed']):
|
||||
raise ExtractorError('Video %s has been removed' % video_id, expected=True)
|
||||
|
||||
title = self._html_search_regex(
|
||||
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
|
||||
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
|
||||
webpage, 'title', group='title',
|
||||
default=None) or self._og_search_title(webpage)
|
||||
info = self._search_json_ld(webpage, video_id, default={})
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = self._html_search_regex(
|
||||
(r'<h(\d)[^>]+class="(?:video_title_text|videoTitle)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
|
||||
r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
|
||||
webpage, 'title', group='title',
|
||||
default=None) or self._og_search_title(webpage)
|
||||
|
||||
formats = []
|
||||
sources = self._parse_json(
|
||||
@ -88,28 +92,28 @@ class RedTubeIE(InfoExtractor):
|
||||
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'<span[^>]+>ADDED ([^<]+)<',
|
||||
webpage, 'upload date', fatal=False))
|
||||
r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
|
||||
webpage, 'upload date', default=None))
|
||||
duration = int_or_none(self._og_search_property(
|
||||
'video:duration', webpage, default=None) or self._search_regex(
|
||||
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
|
||||
view_count = str_to_int(self._search_regex(
|
||||
(r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
|
||||
r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)'),
|
||||
webpage, 'view count', fatal=False))
|
||||
r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
|
||||
r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
|
||||
webpage, 'view count', default=None))
|
||||
|
||||
# No self-labeling, but they describe themselves as
|
||||
# "Home of Videos Porno"
|
||||
age_limit = 18
|
||||
|
||||
return {
|
||||
return merge_dicts(info, {
|
||||
'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
||||
})
|
||||
|
Loading…
x
Reference in New Issue
Block a user