diff options
Diffstat (limited to 'youtube_dl/extractor')
-rw-r--r-- | youtube_dl/extractor/__init__.py | 4 | ||||
-rw-r--r-- | youtube_dl/extractor/beeg.py | 53 | ||||
-rw-r--r-- | youtube_dl/extractor/crunchyroll.py | 97 | ||||
-rw-r--r-- | youtube_dl/extractor/eporner.py | 55 | ||||
-rw-r--r-- | youtube_dl/extractor/hornbunny.py | 44 | ||||
-rw-r--r-- | youtube_dl/extractor/sunporno.py | 68 | ||||
-rw-r--r-- | youtube_dl/extractor/tudou.py | 48 | ||||
-rw-r--r-- | youtube_dl/extractor/youtube.py | 49 |
8 files changed, 387 insertions, 31 deletions
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index a306035f1..9f43bb8f4 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -24,6 +24,7 @@ from .auengine import AUEngineIE from .bambuser import BambuserIE, BambuserChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE from .bbccouk import BBCCoUkIE +from .beeg import BeegIE from .bilibili import BiliBiliIE from .blinkx import BlinkxIE from .bliptv import BlipTVIE, BlipTVUserIE @@ -86,6 +87,7 @@ from .ellentv import ( from .elpais import ElPaisIE from .empflix import EmpflixIE from .engadget import EngadgetIE +from .eporner import EpornerIE from .escapist import EscapistIE from .everyonesmixtape import EveryonesMixtapeIE from .exfm import ExfmIE @@ -135,6 +137,7 @@ from .grooveshark import GroovesharkIE from .hark import HarkIE from .helsinki import HelsinkiIE from .hentaistigma import HentaiStigmaIE +from .hornbunny import HornBunnyIE from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE @@ -323,6 +326,7 @@ from .stanfordoc import StanfordOpenClassroomIE from .steam import SteamIE from .streamcloud import StreamcloudIE from .streamcz import StreamCZIE +from .sunporno import SunPornoIE from .swrmediathek import SWRMediathekIE from .syfy import SyfyIE from .sztvhu import SztvHuIE diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py new file mode 100644 index 000000000..c2692cfdc --- /dev/null +++ b/youtube_dl/extractor/beeg.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class BeegIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)' + _TEST = { + 'url': 'http://beeg.com/5416503', + 'md5': '634526ae978711f6b748fe0dd6c11f57', + 'info_dict': { + 'id': '5416503', + 'ext': 'mp4', + 'title': 'Sultry Striptease', + 'description': 'md5:6db3c6177972822aaba18652ff59c773', + 'categories': list, # NSFW + 'thumbnail': 're:https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + video_url = self._html_search_regex( + r"'480p'\s*:\s*'([^']+)'", webpage, 'video URL') + + title = self._html_search_regex( + r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') + + description = self._html_search_regex( + r'<meta name="description" content="([^"]*)"', + webpage, 'description', fatal=False) + thumbnail = self._html_search_regex( + r'\'previewer.url\'\s*:\s*"([^"]*)"', + webpage, 'thumbnail', fatal=False) + + categories_str = self._html_search_regex( + r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False) + categories = categories_str.split(',') + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'categories': categories, + } diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 026a9177e..4903764f7 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -5,6 +5,7 @@ import re import json import base64 import zlib +import xml.etree.ElementTree from hashlib import sha1 from math import pow, sqrt, floor @@ -17,6 +18,7 @@ from ..utils import ( intlist_to_bytes, unified_strdate, clean_html, + urlencode_postdata, ) from ..aes import ( aes_cbc_decrypt, @@ -51,6 +53,26 @@ class CrunchyrollIE(InfoExtractor): '1080': ('80', '108'), } + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + self.report_login() + login_url = 'https://www.crunchyroll.com/?a=formhandler' + data = urlencode_postdata({ + 'formname': 'RpcApiUser_Login', + 'name': username, + 'password': password, + }) + login_request = compat_urllib_request.Request(login_url, data) + login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') + self._download_webpage(login_request, None, False, 'Wrong login info') + + + def _real_initialize(self): + self._login() + + def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(data) iv = bytes_to_intlist(iv) @@ -97,6 +119,75 @@ class CrunchyrollIE(InfoExtractor): output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text) return output + def _convert_subtitles_to_ass(self, subtitles): + output = '' + + def ass_bool(strvalue): + assvalue = '0' + if strvalue == '1': + assvalue = '-1' + return assvalue + + sub_root = xml.etree.ElementTree.fromstring(subtitles) + if not sub_root: + return output + + output = '[Script Info]\n' + output += 'Title: %s\n' % sub_root.attrib["title"] + output += 'ScriptType: v4.00+\n' + output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"] + output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"] + output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"] + output += """ScaledBorderAndShadow: yes + +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +""" + for style in sub_root.findall('./styles/style'): + output += 'Style: ' + style.attrib["name"] + output += ',' + style.attrib["font_name"] + output += ',' + style.attrib["font_size"] + output += ',' + style.attrib["primary_colour"] + output += ',' + style.attrib["secondary_colour"] + output += ',' + style.attrib["outline_colour"] + output += ',' + style.attrib["back_colour"] + output += ',' + ass_bool(style.attrib["bold"]) + output += ',' + ass_bool(style.attrib["italic"]) + output += ',' + ass_bool(style.attrib["underline"]) + output += ',' + ass_bool(style.attrib["strikeout"]) + output += ',' + style.attrib["scale_x"] + output += ',' + style.attrib["scale_y"] + output += ',' + style.attrib["spacing"] + output += ',' + style.attrib["angle"] + output += ',' + style.attrib["border_style"] + output += ',' + style.attrib["outline"] + output += ',' + style.attrib["shadow"] + output += ',' + style.attrib["alignment"] + output += ',' + style.attrib["margin_l"] + output += ',' + style.attrib["margin_r"] + output += ',' + style.attrib["margin_v"] + output += ',' + style.attrib["encoding"] + output += '\n' + + output += """ +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +""" + for event in sub_root.findall('./events/event'): + output += 'Dialogue: 0' + output += ',' + event.attrib["start"] + output += ',' + event.attrib["end"] + output += ',' + event.attrib["style"] + output += ',' + event.attrib["name"] + output += ',' + event.attrib["margin_l"] + output += ',' + event.attrib["margin_r"] + output += ',' + event.attrib["margin_v"] + output += ',' + event.attrib["effect"] + output += ',' + event.attrib["text"] + output += '\n' + + return output + def _real_extract(self,url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') @@ -158,6 +249,7 @@ class CrunchyrollIE(InfoExtractor): }) subtitles = {} + sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ video_id, note='Downloading subtitles for '+sub_name) @@ -174,7 +266,10 @@ class CrunchyrollIE(InfoExtractor): lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False) if not lang_code: continue - subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) + if sub_format == 'ass': + subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle) + else: + subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle) return { 'id': video_id, diff --git a/youtube_dl/extractor/eporner.py b/youtube_dl/extractor/eporner.py new file mode 100644 index 000000000..4c2c074cb --- /dev/null +++ b/youtube_dl/extractor/eporner.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + str_to_int, +) + + +class EpornerIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<title_dash>[\w-]+)/?' + _TEST = { + 'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/', + 'md5': '3b427ae4b9d60619106de3185c2987cd', + 'info_dict': { + 'id': '95008', + 'ext': 'flv', + 'title': 'Infamous Tiffany Teen Strip Tease Video', + 'duration': 194, + 'view_count': int, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex( + r'<title>(.*?) - EPORNER', webpage, 'title') + + redirect_code = self._html_search_regex( + r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id, + webpage, 'redirect_code') + redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code) + webpage2 = self._download_webpage(redirect_url, video_id) + video_url = self._html_search_regex( + r'file: "(.*?)",', webpage2, 'video_url') + + duration = parse_duration(self._search_regex( + r'class="mbtim">([0-9:]+)</div>', webpage, 'duration', + fatal=False)) + view_count = str_to_int(self._search_regex( + r'id="cinemaviews">\s*([0-9,]+)\s*<small>views', + webpage, 'view count', fatal=False)) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'duration': duration, + 'view_count': view_count, + } diff --git a/youtube_dl/extractor/hornbunny.py b/youtube_dl/extractor/hornbunny.py new file mode 100644 index 000000000..a42fba0cb --- /dev/null +++ b/youtube_dl/extractor/hornbunny.py @@ -0,0 +1,44 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + +class HornBunnyIE(InfoExtractor): + _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html' + _TEST = { + 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html', + 'md5': '95e40865aedd08eff60272b704852ad7', + 'info_dict': { + 'id': '5227', + 'ext': 'flv', + 'title': 'panty slut jerk off instruction', + 'duration': 550 + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'class="title">(.*?)</h2>', webpage, 'title') + redirect_url = self._html_search_regex(r'pg&settings=(.*?)\|0"\);', webpage, 'title') + webpage2 = self._download_webpage(redirect_url, video_id) + video_url = self._html_search_regex(r'flvMask:(.*?);', webpage2, 'video_url') + + mobj = re.search(r'<strong>Runtime:</strong> (?P<minutes>\d+):(?P<seconds>\d+)</div>', webpage) + duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None + + view_count = self._html_search_regex(r'<strong>Views:</strong> (\d+)</div>', webpage, 'view count', fatal=False) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'ext': 'flv', + 'duration': duration, + 'view_count': int_or_none(view_count), + } diff --git a/youtube_dl/extractor/sunporno.py b/youtube_dl/extractor/sunporno.py new file mode 100644 index 000000000..c7a46eb71 --- /dev/null +++ b/youtube_dl/extractor/sunporno.py @@ -0,0 +1,68 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + parse_duration, + int_or_none, + qualities, + determine_ext, +) + + +class SunPornoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?sunporno\.com/videos/(?P<id>\d+)' + _TEST = { + 'url': 'http://www.sunporno.com/videos/807778/', + 'md5': '6457d3c165fd6de062b99ef6c2ff4c86', + 'info_dict': { + 'id': '807778', + 'ext': 'flv', + 'title': 'md5:0a400058e8105d39e35c35e7c5184164', + 'description': 'md5:a31241990e1bd3a64e72ae99afb325fb', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 302, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex(r'<title>([^<]+)</title>', webpage, 'title') + description = self._html_search_meta('description', webpage, 'description') + thumbnail = self._html_search_regex( + r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) + + duration = parse_duration(self._search_regex( + r'<span>Duration: (\d+:\d+)</span>', webpage, 'duration', fatal=False)) + + view_count = int_or_none(self._html_search_regex( + r'<span class="views">(\d+)</span>', webpage, 'view count', fatal=False)) + comment_count = int_or_none(self._html_search_regex( + r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False)) + + formats = [] + quality = qualities(['mp4', 'flv']) + for video_url in re.findall(r'<source src="([^"]+)"', webpage): + video_ext = determine_ext(video_url) + formats.append({ + 'url': video_url, + 'format_id': video_ext, + 'quality': quality(video_ext), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'view_count': view_count, + 'comment_count': comment_count, + 'formats': formats, + } diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index 23816599c..a85065121 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -1,5 +1,7 @@ # coding: utf-8 +from __future__ import unicode_literals + import re import json @@ -9,22 +11,21 @@ from .common import InfoExtractor class TudouIE(InfoExtractor): _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' _TESTS = [{ - u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', - u'file': u'159448201.f4v', - u'md5': u'140a49ed444bd22f93330985d8475fcb', - u'info_dict': { - u"title": u"卡马乔国足开大脚长传冲吊集锦" + 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', + 'md5': '140a49ed444bd22f93330985d8475fcb', + 'info_dict': { + 'id': '159448201', + 'ext': 'f4v', + 'title': '卡马乔国足开大脚长传冲吊集锦', + 'thumbnail': 're:^https?://.*\.jpg$', } - }, - { - u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html', - u'file': u'todo.mp4', - u'md5': u'todo.mp4', - u'info_dict': { - u'title': u'todo.mp4', + }, { + 'url': 'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html', + 'info_dict': { + 'title': 'todo.mp4', }, - u'add_ie': [u'Youku'], - u'skip': u'Only works from China' + 'add_ie': ['Youku'], + 'skip': 'Only works from China' }] def _url_for_id(self, id, quality = None): @@ -44,14 +45,14 @@ class TudouIE(InfoExtractor): if m and m.group(1): return { '_type': 'url', - 'url': u'youku:' + m.group(1), + 'url': 'youku:' + m.group(1), 'ie_key': 'Youku' } title = self._search_regex( - r",kw:\s*['\"](.+?)[\"']", webpage, u'title') + r",kw:\s*['\"](.+?)[\"']", webpage, 'title') thumbnail_url = self._search_regex( - r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False) + r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False) segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments') segments = json.loads(segs_json) @@ -69,12 +70,13 @@ class TudouIE(InfoExtractor): part_id = part['k'] final_url = self._url_for_id(part_id, quality) ext = (final_url.split('?')[0]).split('.')[-1] - part_info = {'id': part_id, - 'url': final_url, - 'ext': ext, - 'title': title, - 'thumbnail': thumbnail_url, - } + part_info = { + 'id': '%s' % part_id, + 'url': final_url, + 'ext': ext, + 'title': title, + 'thumbnail': thumbnail_url, + } result.append(part_info) return result diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 13676c49f..78f3b7e7b 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -316,6 +316,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): u"upload_date": u"20121002", u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .", u"categories": [u'Science & Technology'], + 'like_count': int, + 'dislike_count': int, } }, { @@ -784,7 +786,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = unified_strdate(upload_date) - m_cat_container = get_element_by_id("eow-category", video_webpage) + m_cat_container = self._search_regex( + r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', + video_webpage, 'categories', fatal=False) if m_cat_container: category = self._html_search_regex( r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category', @@ -1430,12 +1434,6 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): paging = mobj.group('paging') return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE) -class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor): - IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)' - _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' - _FEED_NAME = 'subscriptions' - _PLAYLIST_TITLE = u'Youtube Subscriptions' - class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor): IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)' _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?' @@ -1468,6 +1466,43 @@ class YoutubeFavouritesIE(YoutubeBaseInfoExtractor): return self.url_result(playlist_id, 'YoutubePlaylist') +class YoutubeSubscriptionsIE(YoutubePlaylistIE): + IE_NAME = u'youtube:subscriptions' + IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' + + def _real_extract(self, url): + title = u'Youtube Subscriptions' + page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title) + + # The extraction process is the same as for playlists, but the regex + # for the video ids doesn't contain an index + ids = [] + more_widget_html = content_html = page + + for page_num in itertools.count(1): + matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html) + new_ids = orderedSet(matches) + ids.extend(new_ids) + + mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), title, + 'Downloading page #%s' % page_num, + transform_source=uppercase_escape) + content_html = more['content_html'] + more_widget_html = more['load_more_widget_html'] + + return { + '_type': 'playlist', + 'title': title, + 'entries': self._ids_to_results(ids), + } + + class YoutubeTruncatedURLIE(InfoExtractor): IE_NAME = 'youtube:truncated_url' IE_DESC = False # Do not list |