aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor/tbs.py
diff options
context:
space:
mode:
authorRemita Amine <remitamine@gmail.com>2018-05-30 13:21:07 +0100
committerRemita Amine <remitamine@gmail.com>2018-05-30 13:21:07 +0100
commite0d42dd4b270d06a953822c091afefd946bd93f2 (patch)
treecb4becca6b092335404b00454346c367cc4b4b42 /youtube_dl/extractor/tbs.py
parenta07879d6b2edc474b0595a29932726fa7aa14b3a (diff)
downloadyoutube-dl-e0d42dd4b270d06a953822c091afefd946bd93f2.tar.xz
[teamcoco] Fix extraction for full episodes(closes #16573)
Diffstat (limited to 'youtube_dl/extractor/tbs.py')
-rw-r--r--youtube_dl/extractor/tbs.py61
1 files changed, 19 insertions, 42 deletions
diff --git a/youtube_dl/extractor/tbs.py b/youtube_dl/extractor/tbs.py
index edc31729d..784f8ed66 100644
--- a/youtube_dl/extractor/tbs.py
+++ b/youtube_dl/extractor/tbs.py
@@ -4,6 +4,10 @@ from __future__ import unicode_literals
import re
from .turner import TurnerBaseIE
+from ..compat import (
+ compat_urllib_parse_urlparse,
+ compat_parse_qs,
+)
from ..utils import (
float_or_none,
int_or_none,
@@ -38,48 +42,22 @@ class TBSIE(TurnerBaseIE):
def _real_extract(self, url):
site, display_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, display_id)
- video_data = self._parse_json(self._search_regex(
+ drupal_settings = self._parse_json(self._search_regex(
r'<script[^>]+?data-drupal-selector="drupal-settings-json"[^>]*?>({.+?})</script>',
- webpage, 'drupal setting'), display_id)['turner_playlist'][0]
+ webpage, 'drupal setting'), display_id)
+ video_data = drupal_settings['turner_playlist'][0]
media_id = video_data['mediaID']
title = video_data['title']
+ tokenizer_query = compat_parse_qs(compat_urllib_parse_urlparse(
+ drupal_settings['ngtv_token_url']).query)
- streams_data = self._download_json(
- 'http://medium.ngtv.io/media/%s/tv' % media_id,
- media_id)['media']['tv']
- duration = None
- chapters = []
- formats = []
- for supported_type in ('unprotected', 'bulkaes'):
- stream_data = streams_data.get(supported_type, {})
- m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
- if not m3u8_url:
- continue
- if stream_data.get('playlistProtection') == 'spe':
- m3u8_url = self._add_akamai_spe_token(
- 'http://token.vgtf.net/token/token_spe',
- m3u8_url, media_id, {
- 'url': url,
- 'site_name': site[:3].upper(),
- 'auth_required': video_data.get('authRequired') == '1',
- })
- formats.extend(self._extract_m3u8_formats(
- m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
-
- duration = float_or_none(stream_data.get('totalRuntime') or video_data.get('duration'))
-
- if not chapters:
- for chapter in stream_data.get('contentSegments', []):
- start_time = float_or_none(chapter.get('start'))
- duration = float_or_none(chapter.get('duration'))
- if start_time is None or duration is None:
- continue
- chapters.append({
- 'start_time': start_time,
- 'end_time': start_time + duration,
- })
- self._sort_formats(formats)
+ info = self._extract_ngtv_info(
+ media_id, tokenizer_query, {
+ 'url': url,
+ 'site_name': site[:3].upper(),
+ 'auth_required': video_data.get('authRequired') == '1',
+ })
thumbnails = []
for image_id, image in video_data.get('images', {}).items():
@@ -98,15 +76,14 @@ class TBSIE(TurnerBaseIE):
})
thumbnails.append(i)
- return {
+ info.update({
'id': media_id,
'title': title,
'description': strip_or_none(video_data.get('descriptionNoTags') or video_data.get('shortDescriptionNoTags')),
- 'duration': duration,
+ 'duration': float_or_none(video_data.get('duration')) or info.get('duration'),
'timestamp': int_or_none(video_data.get('created')),
'season_number': int_or_none(video_data.get('season')),
'episode_number': int_or_none(video_data.get('episode')),
- 'cahpters': chapters,
'thumbnails': thumbnails,
- 'formats': formats,
- }
+ })
+ return info