aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
authorRemita Amine <remitamine@gmail.com>2018-11-28 19:53:22 +0100
committerRemita Amine <remitamine@gmail.com>2018-11-28 19:53:45 +0100
commitca01d178844129bd4b6ed74740fbd30e7f84c1c2 (patch)
tree8a163e48b6b8471ae315dae9ce122e689fdd001d /youtube_dl
parentd19600df07128c73ef7242af7e1cd8c819951aba (diff)
[vimeo] Add support for VHX(Vimeo OTT)(#14835)
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/extractor/extractors.py1
-rw-r--r--youtube_dl/extractor/vimeo.py85
2 files changed, 85 insertions, 1 deletions
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 60e6175b1..cd91c0fcb 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -1303,6 +1303,7 @@ from .vimeo import (
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
+ VHXEmbedIE,
)
from .vimple import VimpleIE
from .vine import (
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 88f4d9979..6353c6831 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -14,10 +14,13 @@ from ..compat import (
from ..utils import (
determine_ext,
ExtractorError,
+ js_to_json,
InAdvancePagedList,
int_or_none,
merge_dicts,
NO_DEFAULT,
+ parse_filesize,
+ qualities,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
@@ -27,7 +30,6 @@ from ..utils import (
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
- parse_filesize,
)
@@ -1063,3 +1065,84 @@ class VimeoLikesIE(InfoExtractor):
'description': description,
'entries': pl,
}
+
+
+class VHXEmbedIE(InfoExtractor):
+ IE_NAME = 'vhx:embed'
+ _VALID_URL = r'https?://embed\.vhx\.tv/videos/(?P<id>\d+)'
+
+ def _call_api(self, video_id, access_token, path='', query=None):
+ return self._download_json(
+ 'https://api.vhx.tv/videos/' + video_id + path, video_id, headers={
+ 'Authorization': 'Bearer ' + access_token,
+ }, query=query)
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ credentials = self._parse_json(self._search_regex(
+ r'(?s)credentials\s*:\s*({.+?}),', webpage,
+ 'config'), video_id, js_to_json)
+ access_token = credentials['access_token']
+
+ query = {}
+ for k, v in credentials.items():
+ if k in ('authorization', 'authUserToken', 'ticket') and v and v != 'undefined':
+ if k == 'authUserToken':
+ query['auth_user_token'] = v
+ else:
+ query[k] = v
+ files = self._call_api(video_id, access_token, '/files', query)
+
+ formats = []
+ for f in files:
+ href = try_get(f, lambda x: x['_links']['source']['href'])
+ if not href:
+ continue
+ method = f.get('method')
+ if method == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ href, video_id, 'mp4', 'm3u8_native',
+ m3u8_id='hls', fatal=False))
+ elif method == 'dash':
+ formats.extend(self._extract_mpd_formats(
+ href, video_id, mpd_id='dash', fatal=False))
+ else:
+ fmt = {
+ 'filesize': int_or_none(try_get(f, lambda x: x['size']['bytes'])),
+ 'format_id': 'http',
+ 'preference': 1,
+ 'url': href,
+ 'vcodec': f.get('codec'),
+ }
+ quality = f.get('quality')
+ if quality:
+ fmt.update({
+ 'format_id': 'http-' + quality,
+ 'height': int_or_none(self._search_regex(r'(\d+)p', quality, 'height', default=None)),
+ })
+ formats.append(fmt)
+ self._sort_formats(formats)
+
+ video_data = self._call_api(video_id, access_token)
+ title = video_data.get('title') or video_data['name']
+
+ q = qualities(['small', 'medium', 'large', 'source'])
+ thumbnails = []
+ for thumbnail_id, thumbnail_url in video_data.get('thumbnail', {}).items():
+ thumbnails.append({
+ 'id': thumbnail_id,
+ 'url': thumbnail_url,
+ 'preference': q(thumbnail_id),
+ })
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': video_data.get('description'),
+ 'duration': int_or_none(try_get(video_data, lambda x: x['duration']['seconds'])),
+ 'formats': formats,
+ 'thumbnails': thumbnails,
+ 'timestamp': unified_timestamp(video_data.get('created_at')),
+ 'view_count': int_or_none(video_data.get('plays_count')),
+ }