aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/helper.py31
-rw-r--r--test/test_InfoExtractor.py3
-rw-r--r--test/test_download.py22
-rw-r--r--test/test_jsinterp.py8
-rw-r--r--test/test_youtube_signature.py4
-rwxr-xr-xyoutube_dl/YoutubeDL.py49
-rw-r--r--youtube_dl/extractor/common.py64
-rw-r--r--youtube_dl/extractor/extractors.py18
-rw-r--r--youtube_dl/extractor/mixcloud.py30
-rw-r--r--youtube_dl/extractor/orf.py1103
-rw-r--r--youtube_dl/extractor/palcomp3.py9
-rw-r--r--youtube_dl/extractor/vidlii.py59
-rw-r--r--youtube_dl/jsinterp.py85
-rw-r--r--youtube_dl/utils.py2
14 files changed, 898 insertions, 589 deletions
diff --git a/test/helper.py b/test/helper.py
index 5b7e3dfe2..6f2129eff 100644
--- a/test/helper.py
+++ b/test/helper.py
@@ -5,9 +5,9 @@ import hashlib
import json
import os.path
import re
-import types
import ssl
import sys
+import types
import unittest
import youtube_dl.extractor
@@ -181,18 +181,18 @@ def expect_value(self, got, expected, field):
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
- assert_func = assertGreaterEqual
+ assert_func = self.assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
- assert_func = assertLessEqual
+ assert_func = self.assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
- assert_func = assertEqual
+ assert_func = self.assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
- self, len(got), expected_num,
+ len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
@@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None):
self.assertTrue(m, msg)
-def assertGreaterEqual(self, got, expected, msg=None):
- if not (got >= expected):
- if msg is None:
- msg = '%r not greater than or equal to %r' % (got, expected)
- self.assertTrue(got >= expected, msg)
-
-
-def assertLessEqual(self, got, expected, msg=None):
- if not (got <= expected):
- if msg is None:
- msg = '%r not less than or equal to %r' % (got, expected)
- self.assertTrue(got <= expected, msg)
-
-
-def assertEqual(self, got, expected, msg=None):
- if not (got == expected):
- if msg is None:
- msg = '%r not equal to %r' % (got, expected)
- self.assertTrue(got == expected, msg)
-
-
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py
index d55d6ad54..09100a1d6 100644
--- a/test/test_InfoExtractor.py
+++ b/test/test_InfoExtractor.py
@@ -153,6 +153,9 @@ class TestInfoExtractor(unittest.TestCase):
'''
search = self.ie._search_nextjs_data(html, 'testID')
self.assertEqual(search['props']['pageProps']['video']['id'], 'testid')
+ search = self.ie._search_nextjs_data(
+ 'no next.js data here, move along', 'testID', default={'status': 0})
+ self.assertEqual(search['status'], 0)
def test_search_nuxt_data(self):
html = '''
diff --git a/test/test_download.py b/test/test_download.py
index df8b370cf..f7d6a23bc 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -9,8 +9,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
- assertGreaterEqual,
- assertLessEqual,
expect_warnings,
get_params,
gettestcases,
@@ -36,12 +34,20 @@ from youtube_dl.utils import (
ExtractorError,
error_to_compat_str,
format_bytes,
+ IDENTITY,
+ preferredencoding,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
RETRIES = 3
+# Some unittest APIs require actual str
+if not isinstance('TEST', str):
+ _encode_str = lambda s: s.encode(preferredencoding())
+else:
+ _encode_str = IDENTITY
+
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
@@ -102,7 +108,7 @@ def generator(test_case, tname):
def print_skipping(reason):
print('Skipping %s: %s' % (test_case['name'], reason))
- self.skipTest(reason)
+ self.skipTest(_encode_str(reason))
if not ie.working():
print_skipping('IE marked as not _WORKING')
@@ -187,16 +193,14 @@ def generator(test_case, tname):
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
if 'playlist_mincount' in test_case:
- assertGreaterEqual(
- self,
+ self.assertGreaterEqual(
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_maxcount' in test_case:
- assertLessEqual(
- self,
+ self.assertLessEqual(
len(res_dict['entries']),
test_case['playlist_maxcount'],
'Expected at most %d in playlist %s, but got %d' % (
@@ -243,8 +247,8 @@ def generator(test_case, tname):
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
- assertGreaterEqual(
- self, got_fsize, expected_minsize,
+ self.assertGreaterEqual(
+ got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))
diff --git a/test/test_jsinterp.py b/test/test_jsinterp.py
index 91b12f544..da8e98020 100644
--- a/test/test_jsinterp.py
+++ b/test/test_jsinterp.py
@@ -577,9 +577,11 @@ class TestJSInterpreter(unittest.TestCase):
def test_unary_operators(self):
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
- # fails
- # jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
- # self.assertEqual(jsi.call_function('f'), 0)
+ jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
+ self.assertEqual(jsi.call_function('f'), 0)
+ # https://github.com/ytdl-org/youtube-dl/issues/32815
+ jsi = JSInterpreter('function f(){return 0 - 7 * - 6;}')
+ self.assertEqual(jsi.call_function('f'), 42)
""" # fails so far
def test_packed(self):
diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index f45dfec7c..cafba7a5c 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -158,6 +158,10 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
),
+ (
+ 'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js',
+ '1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg',
+ ),
]
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 6f2aba5ac..dad44435f 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -1039,8 +1039,8 @@ class YoutubeDL(object):
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
- webpage_url = ie_result['webpage_url']
- if webpage_url in self._playlist_urls:
+ webpage_url = ie_result.get('webpage_url') # not all pl/mv have this
+ if webpage_url and webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
@@ -1048,6 +1048,10 @@ class YoutubeDL(object):
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
+ new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result)
+ if new_result:
+ new_result.update(ie_result)
+ ie_result = new_result
try:
return self.__process_playlist(ie_result, download)
finally:
@@ -1593,6 +1597,28 @@ class YoutubeDL(object):
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
+ def _fill_common_fields(self, info_dict, final=True):
+
+ for ts_key, date_key in (
+ ('timestamp', 'upload_date'),
+ ('release_timestamp', 'release_date'),
+ ):
+ if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
+ # Working around out-of-range timestamp values (e.g. negative ones on Windows,
+ # see http://bugs.python.org/issue1646728)
+ try:
+ upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
+ info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
+ except (ValueError, OverflowError, OSError):
+ pass
+
+ # Auto generate title fields corresponding to the *_number fields when missing
+ # in order to always have clean titles. This is very common for TV series.
+ if final:
+ for field in ('chapter', 'season', 'episode'):
+ if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
+ info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
@@ -1660,24 +1686,7 @@ class YoutubeDL(object):
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
- for ts_key, date_key in (
- ('timestamp', 'upload_date'),
- ('release_timestamp', 'release_date'),
- ):
- if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
- # Working around out-of-range timestamp values (e.g. negative ones on Windows,
- # see http://bugs.python.org/issue1646728)
- try:
- upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
- info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d'))
- except (ValueError, OverflowError, OSError):
- pass
-
- # Auto generate title fields corresponding to the *_number fields when missing
- # in order to always have clean titles. This is very common for TV series.
- for field in ('chapter', 'season', 'episode'):
- if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
- info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+ self._fill_common_fields(info_dict)
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 7fae9e57b..9b0016d07 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1169,10 +1169,10 @@ class InfoExtractor(object):
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
- netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
+ netrc_machine = netrc_machine or self._NETRC_MACHINE
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
@@ -1180,7 +1180,7 @@ class InfoExtractor(object):
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
- except (IOError, netrc.NetrcParseError) as err:
+ except (AttributeError, IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
@@ -1490,14 +1490,18 @@ class InfoExtractor(object):
return dict((k, v) for k, v in info.items() if v is not None)
def _search_nextjs_data(self, webpage, video_id, **kw):
- nkw = dict((k, v) for k, v in kw.items() if k in ('transform_source', 'fatal'))
- kw.pop('transform_source', None)
- next_data = self._search_regex(
- r'''<script[^>]+\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>(?P<nd>[^<]+)</script>''',
- webpage, 'next.js data', group='nd', **kw)
- if not next_data:
- return {}
- return self._parse_json(next_data, video_id, **nkw)
+ # ..., *, transform_source=None, fatal=True, default=NO_DEFAULT
+
+ # TODO: remove this backward compat
+ default = kw.get('default', NO_DEFAULT)
+ if default == '{}':
+ kw['default'] = {}
+ kw = compat_kwargs(kw)
+
+ return self._search_json(
+ r'''<script\s[^>]*?\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>''',
+ webpage, 'next.js data', video_id, end_pattern='</script>',
+ **kw)
def _search_nuxt_data(self, webpage, video_id, *args, **kwargs):
"""Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
@@ -3029,7 +3033,6 @@ class InfoExtractor(object):
transform_source=transform_source, default=None)
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
-
# allow passing `transform_source` through to _find_jwplayer_data()
transform_source = kwargs.pop('transform_source', None)
kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {}
@@ -3296,12 +3299,16 @@ class InfoExtractor(object):
return ret
@classmethod
- def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
- """ Merge two subtitle dictionaries, language by language. """
- ret = dict(subtitle_dict1)
- for lang in subtitle_dict2:
- ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
- return ret
+ def _merge_subtitles(cls, subtitle_dict1, *subtitle_dicts, **kwargs):
+ """ Merge subtitle dictionaries, language by language. """
+
+ # ..., * , target=None
+ target = kwargs.get('target') or dict(subtitle_dict1)
+
+ for subtitle_dict in subtitle_dicts:
+ for lang in subtitle_dict:
+ target[lang] = cls._merge_subtitle_items(target.get(lang, []), subtitle_dict[lang])
+ return target
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
@@ -3334,6 +3341,29 @@ class InfoExtractor(object):
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
+ def _yes_playlist(self, playlist_id, video_id, *args, **kwargs):
+ # smuggled_data=None, *, playlist_label='playlist', video_label='video'
+ smuggled_data = args[0] if len(args) == 1 else kwargs.get('smuggled_data')
+ playlist_label = kwargs.get('playlist_label', 'playlist')
+ video_label = kwargs.get('video_label', 'video')
+
+ if not playlist_id or not video_id:
+ return not video_id
+
+ no_playlist = (smuggled_data or {}).get('force_noplaylist')
+ if no_playlist is not None:
+ return not no_playlist
+
+ video_id = '' if video_id is True else ' ' + video_id
+ noplaylist = self.get_param('noplaylist')
+ self.to_screen(
+ 'Downloading just the {0}{1} because of --no-playlist'.format(video_label, video_id)
+ if noplaylist else
+ 'Downloading {0}{1} - add --no-playlist to download just the {2}{3}'.format(
+ playlist_label, '' if playlist_id is True else ' ' + playlist_id,
+ video_label, video_id))
+ return not noplaylist
+
class SearchInfoExtractor(InfoExtractor):
"""
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py
index 03d035a27..3da5f8020 100644
--- a/youtube_dl/extractor/extractors.py
+++ b/youtube_dl/extractor/extractors.py
@@ -898,21 +898,13 @@ from .ooyala import (
)
from .ora import OraTVIE
from .orf import (
- ORFTVthekIE,
- ORFFM4IE,
+ ORFONIE,
+ ORFONLiveIE,
ORFFM4StoryIE,
- ORFOE1IE,
- ORFOE3IE,
- ORFNOEIE,
- ORFWIEIE,
- ORFBGLIE,
- ORFOOEIE,
- ORFSTMIE,
- ORFKTNIE,
- ORFSBGIE,
- ORFTIRIE,
- ORFVBGIE,
ORFIPTVIE,
+ ORFPodcastIE,
+ ORFRadioIE,
+ ORFRadioCollectionIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (
diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py
index 69319857d..2b5e2c15c 100644
--- a/youtube_dl/extractor/mixcloud.py
+++ b/youtube_dl/extractor/mixcloud.py
@@ -1,3 +1,4 @@
+# coding: utf-8
from __future__ import unicode_literals
import itertools
@@ -10,7 +11,7 @@ from ..compat import (
compat_ord,
compat_str,
compat_urllib_parse_unquote,
- compat_zip
+ compat_zip as zip,
)
from ..utils import (
int_or_none,
@@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor):
def _call_api(self, object_type, object_fields, display_id, username, slug=None):
lookup_key = object_type + 'Lookup'
return self._download_json(
- 'https://www.mixcloud.com/graphql', display_id, query={
+ 'https://app.mixcloud.com/graphql', display_id, query={
'query': '''{
%s(lookup: {username: "%s"%s}) {
%s
@@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE):
'ext': 'm4a',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
- 'uploader': 'Daniel Holbach',
+ 'uploader': 'dholbach', # was: 'Daniel Holbach',
'uploader_id': 'dholbach',
'thumbnail': r're:https?://.*\.jpg',
'view_count': int,
@@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE):
'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat',
'ext': 'mp3',
'title': 'Caribou 7 inch Vinyl Mix & Chat',
- 'description': 'md5:2b8aec6adce69f9d41724647c65875e8',
+ 'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}',
'uploader': 'Gilles Peterson Worldwide',
'uploader_id': 'gillespeterson',
'thumbnail': 're:https?://.*',
@@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE):
'timestamp': 1422987057,
'upload_date': '20150203',
},
+ 'params': {
+ 'skip_download': '404 not found',
+ },
+ }, {
+ 'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/',
+ 'info_dict': {
+ 'id': 'gillespeterson_carnival-música-popular-brasileira-mix',
+ 'ext': 'm4a',
+ 'title': 'Carnival Música Popular Brasileira Mix',
+ 'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}',
+ 'timestamp': 1454347174,
+ 'upload_date': '20160201',
+ 'uploader': 'Gilles Peterson Worldwide',
+ 'uploader_id': 'gillespeterson',
+ 'thumbnail': 're:https?://.*',
+ 'view_count': int,
+ },
}, {
'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/',
'only_matching': True,
@@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([
compat_chr(compat_ord(ch) ^ compat_ord(k))
- for ch, k in compat_zip(ciphertext, itertools.cycle(key))])
+ for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url):
- username, slug = re.match(self._VALID_URL, url).groups()
+ username, slug = self._match_valid_url(url).groups()
username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug)
track_id = '%s_%s' % (username, slug)
diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py
index 8d537d7ae..1ee78edbc 100644
--- a/youtube_dl/extractor/orf.py
+++ b/youtube_dl/extractor/orf.py
@@ -1,407 +1,394 @@
# coding: utf-8
from __future__ import unicode_literals
+import base64
+import functools
import re
from .common import InfoExtractor
-from ..compat import compat_str
+from .youtube import YoutubeIE
from ..utils import (
clean_html,
determine_ext,
+ ExtractorError,
float_or_none,
- HEADRequest,
int_or_none,
- orderedSet,
- remove_end,
- str_or_none,
+ merge_dicts,
+ mimetype2ext,
+ parse_age_limit,
+ parse_iso8601,
strip_jsonp,
- unescapeHTML,
+ txt_or_none,
unified_strdate,
+ update_url_query,
url_or_none,
)
-
-
-class ORFTVthekIE(InfoExtractor):
- IE_NAME = 'orf:tvthek'
- IE_DESC = 'ORF TVthek'
- _VALID_URL = r'https?://tvthek\.orf\.at/(?:[^/]+/)+(?P<id>\d+)'
+from ..traversal import T, traverse_obj
+
+k_float_or_none = functools.partial(float_or_none, scale=1000)
+
+
+class ORFRadioBase(InfoExtractor):
+ STATION_INFO = {
+ 'fm4': ('fm4', 'fm4', 'orffm4'),
+ 'noe': ('noe', 'oe2n', 'orfnoe'),
+ 'wien': ('wie', 'oe2w', 'orfwie'),
+ 'burgenland': ('bgl', 'oe2b', 'orfbgl'),
+ 'ooe': ('ooe', 'oe2o', 'orfooe'),
+ 'steiermark': ('stm', 'oe2st', 'orfstm'),
+ 'kaernten': ('ktn', 'oe2k', 'orfktn'),
+ 'salzburg': ('sbg', 'oe2s', 'orfsbg'),
+ 'tirol': ('tir', 'oe2t', 'orftir'),
+ 'vorarlberg': ('vbg', 'oe2v', 'orfvbg'),
+ 'oe3': ('oe3', 'oe3', 'orfoe3'),
+ 'oe1': ('oe1', 'oe1', 'orfoe1'),
+ }
+ _ID_NAMES = ('id', 'guid', 'program')
+
+ @classmethod
+ def _get_item_id(cls, data):
+ return traverse_obj(data, *cls._ID_NAMES, expected_type=txt_or_none)
+
+ @classmethod
+ def _get_api_payload(cls, data, expected_id, in_payload=False):
+ if expected_id not in traverse_obj(data, ('payload',)[:1 if in_payload else 0] + (cls._ID_NAMES, T(txt_or_none))):
+ raise ExtractorError('Unexpected API data result', video_id=expected_id)
+ return data['payload']
+
+ @staticmethod
+ def _extract_podcast_upload(data):
+ return traverse_obj(data, {
+ 'url': ('enclosures', 0, 'url'),
+ 'ext': ('enclosures', 0, 'type', T(mimetype2ext)),
+ 'filesize': ('enclosures', 0, 'length', T(int_or_none)),
+ 'title': ('title', T(txt_or_none)),
+ 'description': ('description', T(clean_html)),
+ 'timestamp': (('published', 'postDate'), T(parse_iso8601)),
+ 'duration': ('duration', T(k_float_or_none)),
+ 'series': ('podcast', 'title'),
+ 'uploader': ((('podcast', 'author'), 'station'), T(txt_or_none)),
+ 'uploader_id': ('podcast', 'channel', T(txt_or_none)),
+ }, get_all=False)
+
+ @classmethod
+ def _entries(cls, data, station, item_type=None):
+ if item_type in ('upload', 'podcast-episode'):
+ yield merge_dicts({
+ 'id': cls._get_item_id(data),
+ 'ext': 'mp3',
+ 'vcodec': 'none',
+ }, cls._extract_podcast_upload(data), rev=True)
+ return
+
+ loop_station = cls.STATION_INFO[station][1]
+ for info in traverse_obj(data, ((('streams', Ellipsis), 'stream'), T(lambda v: v if v['loopStreamId'] else None))):
+ item_id = info['loopStreamId']
+ host = info.get('host') or 'loopstream01.apa.at'
+ yield merge_dicts({
+ 'id': item_id.replace('.mp3', ''),
+ 'ext': 'mp3',
+ 'url': update_url_query('https://{0}/'.format(host), {
+ 'channel': loop_station,
+ 'id': item_id,
+ }),
+ 'vcodec': 'none',
+ # '_old_archive_ids': [make_archive_id(old_ie, video_id)],
+ }, traverse_obj(data, {
+ 'title': ('title', T(txt_or_none)),
+ 'description': ('subtitle', T(clean_html)),
+ 'uploader': 'station',
+ 'series': ('programTitle', T(txt_or_none)),
+ }), traverse_obj(info, {
+ 'duration': (('duration',
+ (None, T(lambda x: x['end'] - x['start']))),
+ T(k_float_or_none), any),
+ 'timestamp': (('start', 'startISO'), T(parse_iso8601), any),
+ }))
+
+
+class ORFRadioIE(ORFRadioBase):
+ IE_NAME = 'orf:sound'
+ _STATION_RE = '|'.join(map(re.escape, ORFRadioBase.STATION_INFO.keys()))
+
+ _VALID_URL = (
+ r'https?://sound\.orf\.at/radio/(?P<station>{0})/sendung/(?P<id>\d+)(?:/(?P<show>\w+))?'.format(_STATION_RE),
+ r'https?://(?P<station>{0})\.orf\.at/player/(?P<date>\d{{8}})/(?P<id>\d+)'.format(_STATION_RE),
+ )
_TESTS = [{
- 'url': 'http://tvthek.orf.at/program/Aufgetischt/2745173/Aufgetischt-Mit-der-Steirischen-Tafelrunde/8891389',
+ 'url': 'https://sound.orf.at/radio/ooe/sendung/37802/guten-morgen-oberoesterreich-am-feiertag',
+ 'info_dict': {
+ 'id': '37802',
+ 'title': 'Guten Morgen Oberösterreich am Feiertag',
+ 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl',
+ },
'playlist': [{
- 'md5': '2942210346ed779588f428a92db88712',
+ 'md5': 'f9ff8517dd681b642a2c900e2c9e6085',
'info_dict': {
- 'id': '8896777',
- 'ext': 'mp4',
- 'title': 'Aufgetischt: Mit der Steirischen Tafelrunde',
- 'description': 'md5:c1272f0245537812d4e36419c207b67d',
- 'duration': 2668,
- 'upload_date': '20141208',
- },
+ 'id': '2024-05-30_0559_tl_66_7DaysThu1_443862',
+ 'ext': 'mp3',
+ 'title': 'Guten Morgen Oberösterreich am Feiertag',
+ 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl',
+ 'timestamp': 1717041587,
+ 'upload_date': '20240530',
+ 'uploader': 'ooe',
+ 'duration': 14413.0,
+ }
}],
- 'skip': 'Blocked outside of Austria / Germany',
+ 'skip': 'Shows from ORF Sound are only available for 30 days.'
}, {
- 'url': 'http://tvthek.orf.at/topic/Im-Wandel-der-Zeit/8002126/Best-of-Ingrid-Thurnher/7982256',
+ 'url': 'https://oe1.orf.at/player/20240531/758136',
+ 'md5': '2397717aaf3ae9c22a4f090ee3b8d374',
'info_dict': {
- 'id': '7982259',
- 'ext': 'mp4',
- 'title': 'Best of Ingrid Thurnher',
- 'upload_date': '20140527',
- 'description': 'Viele Jahre war Ingrid Thurnher das "Gesicht" der ZIB 2. Vor ihrem Wechsel zur ZIB 2 im Jahr 1995 moderierte sie unter anderem "Land und Leute", "Österreich-Bild" und "Niederösterreich heute".',
- },
- 'params': {
- 'skip_download': True, # rtsp downloads
+ 'id': '2024-05-31_1905_tl_51_7DaysFri35_2413387',
+ 'ext': 'mp3',
+ 'title': '"Who Cares?"',
+ 'description': 'Europas größte Netzkonferenz re:publica 2024',
+ 'timestamp': 1717175100,
+ 'upload_date': '20240531',
+ 'uploader': 'oe1',
+ 'duration': 1500,
},
- 'skip': 'Blocked outside of Austria / Germany',
- }, {
- 'url': 'http://tvthek.orf.at/topic/Fluechtlingskrise/10463081/Heimat-Fremde-Heimat/13879132/Senioren-betreuen-Migrantenkinder/13879141',
- 'only_matching': True,
- }, {
- 'url': 'http://tvthek.orf.at/profile/Universum/35429',
- 'only_matching': True,
+ 'skip': 'Shows from ORF Sound are only available for 30 days.'
}]
def _real_extract(self, url):
- playlist_id = self._match_id(url)
- webpage = self._download_webpage(url, playlist_id)
+ m = self._match_valid_url(url)
+ station, show_id = m.group('station', 'id')
+ api_station, _, _ = self.STATION_INFO[station]
+ if 'date' in m.groupdict():
+ data = self._download_json(
+ 'https://audioapi.orf.at/{0}/json/4.0/broadcast/{1}/{2}?_o={3}.orf.at'.format(
+ api_station, show_id, m.group('date'), station), show_id)
+ show_id = data['id']
+ else:
+ data = self._download_json(
+ 'https://audioapi.orf.at/{0}/api/json/5.0/broadcast/{1}?_o=sound.orf.at'.format(
+ api_station, show_id), show_id)
- data_jsb = self._parse_json(
- self._search_regex(
- r'<div[^>]+class=(["\']).*?VideoPlaylist.*?\1[^>]+data-jsb=(["\'])(?P<json>.+?)\2',
- webpage, 'playlist', group='json'),
- playlist_id, transform_source=unescapeHTML)['playlist']['videos']
+ data = self._get_api_payload(data, show_id, in_payload=True)
- entries = []
- for sd in data_jsb:
- video_id, title = sd.get('id'), sd.get('title')
- if not video_id or not title:
- continue
- video_id = compat_str(video_id)
- formats = []
- for fd in sd['sources']:
- src = url_or_none(fd.get('src'))
- if not src:
- continue
- format_id_list = []
- for key in ('delivery', 'quality', 'quality_string'):
- value = fd.get(key)
- if value:
- format_id_list.append(value)
- format_id = '-'.join(format_id_list)
- ext = determine_ext(src)
- if ext == 'm3u8':
- m3u8_formats = self._extract_m3u8_formats(
- src, video_id, 'mp4', m3u8_id=format_id, fatal=False)
- if any('/geoprotection' in f['url'] for f in m3u8_formats):
- self.raise_geo_restricted()
- formats.extend(m3u8_formats)
- elif ext == 'f4m':
- formats.extend(self._extract_f4m_formats(
- src, video_id, f4m_id=format_id, fatal=False))
- elif ext == 'mpd':
- formats.extend(self._extract_mpd_formats(
- src, video_id, mpd_id=format_id, fatal=False))
- else:
- formats.append({
- 'format_id': format_id,
- 'url': src,
- 'protocol': fd.get('protocol'),
- })
-
- # Check for geoblocking.
- # There is a property is_geoprotection, but that's always false
- geo_str = sd.get('geoprotection_string')
- if geo_str:
- try:
- http_url = next(
- f['url']
- for f in formats
- if re.match(r'^https?://.*\.mp4$', f['url']))
- except StopIteration:
- pass
- else:
- req = HEADRequest(http_url)
- self._request_webpage(
- req, video_id,
- note='Testing for geoblocking',
- errnote=((
- 'This video seems to be blocked outside of %s. '
- 'You may want to try the streaming-* formats.')
- % geo_str),
- fatal=False)
-
- self._check_formats(formats, video_id)
- self._sort_formats(formats)
+ # site sends ISO8601 GMT date-times with separate TZ offset, ignored
+ # TODO: should `..._date` be calculated relative to TZ?
- subtitles = {}
- for sub in sd.get('subtitles', []):
- sub_src = sub.get('src')
- if not sub_src:
- continue
- subtitles.setdefault(sub.get('lang', 'de-AT'), []).append({
- 'url': sub_src,
- })
-
- upload_date = unified_strdate(sd.get('created_date'))
+ return merge_dicts(
+ {'_type': 'multi_video'},
+ self.playlist_result(
+ self._entries(data, station), show_id,
+ txt_or_none(data.get('title')),
+ clean_html(data.get('subtitle'))))
- thumbnails = []
- preview = sd.get('preview_image_url')
- if preview:
- thumbnails.append({
- 'id': 'preview',
- 'url': preview,
- 'preference': 0,
- })
- image = sd.get('image_full_url')
- if not image and len(data_jsb) == 1:
- image = self._og_search_thumbnail(webpage)
- if image:
- thumbnails.append({
- 'id': 'full',
- 'url': image,
- 'preference': 1,
- })
- entries.append({
- '_type': 'video',
- 'id': video_id,
- 'title': title,
- 'formats': formats,
- 'subtitles': subtitles,
- 'description': sd.get('description'),
- 'duration': int_or_none(sd.get('duration_in_seconds')),
- 'upload_date': upload_date,
- 'thumbnails': thumbnails,
- })
-
- return {
- '_type': 'playlist',
- 'entries': entries,
- 'id': playlist_id,
- }
-
-
-class ORFRadioIE(InfoExtractor):
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- show_date = mobj.group('date')
- show_id = mobj.group('show')
+class ORFRadioCollectionIE(ORFRadioBase):
+ IE_NAME = 'orf:collection'
+ _VALID_URL = r'https?://sound\.orf\.at/collection/(?P<coll_id>\d+)(?:/(?P<item_id>\d+))?'
- data = self._download_json(
- 'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s'
- % (self._API_STATION, show_id, show_date), show_id)
-
- entries = []
- for info in data['streams']:
- loop_stream_id = str_or_none(info.get('loopStreamId'))
- if not loop_stream_id:
- continue
- title = str_or_none(data.get('title'))
- if not title:
- continue
- start = int_or_none(info.get('start'), scale=1000)
- end = int_or_none(info.get('end'), scale=1000)
- duration = end - start if end and start else None
- entries.append({
- 'id': loop_stream_id.replace('.mp3', ''),
- 'url': 'https://loopstream01.apa.at/?channel=%s&id=%s' % (self._LOOP_STATION, loop_stream_id),
- 'title': title,
- 'description': clean_html(data.get('subtitle')),
- 'duration': duration,
- 'timestamp': start,
+ _TESTS = [{
+ 'url': 'https://sound.orf.at/collection/4/61908/was-das-uberschreiten-des-15-limits-bedeutet',
+ 'info_dict': {
+ 'id': '2577582',
+ },
+ 'playlist': [{
+ 'md5': '5789cec7d75575ff58d19c0428c80eb3',
+ 'info_dict': {
+ 'id': '2024-06-06_1659_tl_54_7DaysThu6_153926',
'ext': 'mp3',
- 'series': data.get('programTitle'),
- })
-
- return {
- '_type': 'playlist',
- 'id': show_id,
- 'title': data.get('title'),
- 'description': clean_html(data.get('subtitle')),
- 'entries': entries,
- }
-
-
-class ORFFM4IE(ORFRadioIE):
- IE_NAME = 'orf:fm4'
- IE_DESC = 'radio FM4'
- _VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>4\w+)'
- _API_STATION = 'fm4'
- _LOOP_STATION = 'fm4'
-
- _TEST = {
- 'url': 'http://fm4.orf.at/player/20170107/4CC',
- 'md5': '2b0be47375432a7ef104453432a19212',
+ 'title': 'Klimakrise: Was das Überschreiten des 1,5°-Limits bedeutet',
+ 'timestamp': 1717686674,
+ 'upload_date': '20240606',
+ 'uploader': 'fm4',
+ },
+ }],
+ 'skip': 'Shows from ORF Sound are only available for 30 days.'
+ }, {
+ # persistent playlist (FM4 Highlights)
+ 'url': 'https://sound.orf.at/collection/4/',
'info_dict': {
- 'id': '2017-01-07_2100_tl_54_7DaysSat18_31295',
- 'ext': 'mp3',
- 'title': 'Solid Steel Radioshow',
- 'description': 'Die Mixshow von Coldcut und Ninja Tune.',
- 'duration': 3599,
- 'timestamp': 1483819257,
- 'upload_date': '20170107',
+ 'id': '4',
},
- 'skip': 'Shows from ORF radios are only available for 7 days.',
- 'only_matching': True,
- }
-
-
-class ORFNOEIE(ORFRadioIE):
- IE_NAME = 'orf:noe'
- IE_DESC = 'Radio Niederösterreich'
- _VALID_URL = r'https?://(?P<station>noe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'noe'
- _LOOP_STATION = 'oe2n'
-
- _TEST = {
- 'url': 'https://noe.orf.at/player/20200423/NGM',
- 'only_matching': True,
- }
-
-
-class ORFWIEIE(ORFRadioIE):
- IE_NAME = 'orf:wien'
- IE_DESC = 'Radio Wien'
- _VALID_URL = r'https?://(?P<station>wien)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'wie'
- _LOOP_STATION = 'oe2w'
-
- _TEST = {
- 'url': 'https://wien.orf.at/player/20200423/WGUM',
- 'only_matching': True,
- }
-
-
-class ORFBGLIE(ORFRadioIE):
- IE_NAME = 'orf:burgenland'
- IE_DESC = 'Radio Burgenland'
- _VALID_URL = r'https?://(?P<station>burgenland)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'bgl'
- _LOOP_STATION = 'oe2b'
-
- _TEST = {
- 'url': 'https://burgenland.orf.at/player/20200423/BGM',
- 'only_matching': True,
- }
-
-
-class ORFOOEIE(ORFRadioIE):
- IE_NAME = 'orf:oberoesterreich'
- IE_DESC = 'Radio Oberösterreich'
- _VALID_URL = r'https?://(?P<station>ooe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'ooe'
- _LOOP_STATION = 'oe2o'
+ 'playlist_mincount': 10,
+ 'playlist_maxcount': 13,
+ }]
- _TEST = {
- 'url': 'https://ooe.orf.at/player/20200423/OGMO',
- 'only_matching': True,
- }
+ def _real_extract(self, url):
+ coll_id, item_id = self._match_valid_url(url).group('coll_id', 'item_id')
+ data = self._download_json(
+ 'https://collector.orf.at/api/frontend/collections/{0}?_o=sound.orf.at'.format(
+ coll_id), coll_id)
+ data = self._get_api_payload(data, coll_id, in_payload=True)
+
+ def yield_items():
+ for item in traverse_obj(data, (
+ 'content', 'items', lambda _, v: any(k in v['target']['params'] for k in self._ID_NAMES))):
+ if item_id is None or item_id == txt_or_none(item.get('id')):
+ target = item['target']
+ typed_item_id = self._get_item_id(target['params'])
+ station = target['params'].get('station')
+ item_type = target.get('type')
+ if typed_item_id and (station or item_type):
+ yield station, typed_item_id, item_type
+ if item_id is not None:
+ break
+ else:
+ if item_id is not None:
+ raise ExtractorError('Item not found in collection',
+ video_id=coll_id, expected=True)
+
+ def item_playlist(station, typed_item_id, item_type):
+ if item_type == 'upload':
+ item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/upload/{0}?_o=sound.orf.at'.format(
+ typed_item_id), typed_item_id)
+ elif item_type == 'podcast-episode':
+ item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/episode/{0}?_o=sound.orf.at'.format(
+ typed_item_id), typed_item_id)
+ else:
+ api_station, _, _ = self.STATION_INFO[station]
+ item_data = self._download_json(
+ 'https://audioapi.orf.at/{0}/api/json/5.0/{1}/{2}?_o=sound.orf.at'.format(
+ api_station, item_type or 'broadcastitem', typed_item_id), typed_item_id)
+ item_data = self._get_api_payload(item_data, typed_item_id, in_payload=True)
-class ORFSTMIE(ORFRadioIE):
- IE_NAME = 'orf:steiermark'
- IE_DESC = 'Radio Steiermark'
- _VALID_URL = r'https?://(?P<station>steiermark)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'stm'
- _LOOP_STATION = 'oe2st'
+ return merge_dicts(
+ {'_type': 'multi_video'},
+ self.playlist_result(
+ self._entries(item_data, station, item_type), typed_item_id,
+ txt_or_none(data.get('title')),
+ clean_html(data.get('subtitle'))))
- _TEST = {
- 'url': 'https://steiermark.orf.at/player/20200423/STGMS',
- 'only_matching': True,
- }
+ def yield_item_entries():
+ for station, typed_id, item_type in yield_items():
+ yield item_playlist(station, typed_id, item_type)
+ if item_id is not None:
+ # coll_id = '/'.join((coll_id, item_id))
+ return next(yield_item_entries())
-class ORFKTNIE(ORFRadioIE):
- IE_NAME = 'orf:kaernten'
- IE_DESC = 'Radio Kärnten'
- _VALID_URL = r'https?://(?P<station>kaernten)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'ktn'
- _LOOP_STATION = 'oe2k'
+ return self.playlist_result(yield_item_entries(), coll_id, data.get('title'))
- _TEST = {
- 'url': 'https://kaernten.orf.at/player/20200423/KGUMO',
- 'only_matching': True,
- }
+class ORFPodcastIE(ORFRadioBase):
+ IE_NAME = 'orf:podcast'
+ _STATION_RE = '|'.join(map(re.escape, (x[0] for x in ORFRadioBase.STATION_INFO.values()))) + '|tv'
+ _VALID_URL = r'https?://sound\.orf\.at/podcast/(?P<station>{0})/(?P<show>[\w-]+)/(?P<id>[\w-]+)'.format(_STATION_RE)
+ _TESTS = [{
+ 'url': 'https://sound.orf.at/podcast/stm/der-kraeutertipp-von-christine-lackner/rotklee',
+ 'md5': '1f2bab2ba90c2ce0c2754196ea78b35f',
+ 'info_dict': {
+ 'id': 'der-kraeutertipp-von-christine-lackner/rotklee',
+ 'ext': 'mp3',
+ 'title': 'Rotklee',
+ 'description': 'In der Natur weit verbreitet - in der Medizin längst anerkennt: Rotklee. Dieser Podcast begleitet die Sendung "Radio Steiermark am Vormittag", Radio Steiermark, 28. Mai 2024.',
+ 'timestamp': 1716891761,
+ 'upload_date': '20240528',
+ 'uploader_id': 'stm_kraeutertipp',
+ 'uploader': 'ORF Radio Steiermark',
+ 'duration': 101,
+ 'series': 'Der Kräutertipp von Christine Lackner',
+ },
+ 'skip': 'ORF podcasts are only available for a limited time'
+ }]
-class ORFSBGIE(ORFRadioIE):
- IE_NAME = 'orf:salzburg'
- IE_DESC = 'Radio Salzburg'
- _VALID_URL = r'https?://(?P<station>salzburg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'sbg'
- _LOOP_STATION = 'oe2s'
+ _ID_NAMES = ('slug', 'guid')
- _TEST = {
- 'url': 'https://salzburg.orf.at/player/20200423/SGUM',
- 'only_matching': True,
- }
+ def _real_extract(self, url):
+ station, show, show_id = self._match_valid_url(url).group('station', 'show', 'id')
+ data = self._download_json(
+ 'https://audioapi.orf.at/radiothek/api/2.0/podcast/{0}/{1}/{2}'.format(
+ station, show, show_id), show_id)
+ data = self._get_api_payload(data, show_id, in_payload=True)
+ return merge_dicts({
+ 'id': '/'.join((show, show_id)),
+ 'ext': 'mp3',
+ 'vcodec': 'none',
+ }, self._extract_podcast_upload(data), rev=True)
-class ORFTIRIE(ORFRadioIE):
- IE_NAME = 'orf:tirol'
- IE_DESC = 'Radio Tirol'
- _VALID_URL = r'https?://(?P<station>tirol)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'tir'
- _LOOP_STATION = 'oe2t'
- _TEST = {
- 'url': 'https://tirol.orf.at/player/20200423/TGUMO',
- 'only_matching': True,
- }
+class ORFIPTVBase(InfoExtractor):
+ _TITLE_STRIP_RE = ''
+ def _extract_video(self, video_id, webpage, fatal=False):
-class ORFVBGIE(ORFRadioIE):
- IE_NAME = 'orf:vorarlberg'
- IE_DESC = 'Radio Vorarlberg'
- _VALID_URL = r'https?://(?P<station>vorarlberg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'vbg'
- _LOOP_STATION = 'oe2v'
+ data = self._download_json(
+ 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
+ video_id)[0]
- _TEST = {
- 'url': 'https://vorarlberg.orf.at/player/20200423/VGUM',
- 'only_matching': True,
- }
+ video = traverse_obj(data, (
+ 'sources', ('default', 'q8c'),
+ T(lambda x: x if x['loadBalancerUrl'] else None),
+ any))
+ load_balancer_url = video['loadBalancerUrl']
-class ORFOE3IE(ORFRadioIE):
- IE_NAME = 'orf:oe3'
- IE_DESC = 'Radio Österreich 3'
- _VALID_URL = r'https?://(?P<station>oe3)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'oe3'
- _LOOP_STATION = 'oe3'
+ try:
+ rendition = self._download_json(
+ load_balancer_url, video_id, transform_source=strip_jsonp)
+ except ExtractorError:
+ rendition = None
+
+ if not rendition:
+ rendition = {
+ 'redirect': {
+ 'smil': re.sub(
+ r'(/)jsonp(/.+\.)mp4$', r'\1dash\2smil/manifest.mpd',
+ load_balancer_url),
+ },
+ }
- _TEST = {
- 'url': 'https://oe3.orf.at/player/20200424/3WEK',
- 'only_matching': True,
- }
+ f = traverse_obj(video, {
+ 'abr': ('audioBitrate', T(int_or_none)),
+ 'vbr': ('bitrate', T(int_or_none)),
+ 'fps': ('videoFps', T(int_or_none)),
+ 'width': ('videoWidth', T(int_or_none)),
+ 'height': ('videoHeight', T(int_or_none)),
+ })
+ formats = []
+ for format_id, format_url in traverse_obj(rendition, (
+ 'redirect', T(dict.items), Ellipsis)):
+ if format_id == 'rtmp':
+ ff = f.copy()
+ ff.update({
+ 'url': format_url,
+ 'format_id': format_id,
+ })
+ formats.append(ff)
+ elif determine_ext(format_url) == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ format_url, video_id, f4m_id=format_id))
+ elif determine_ext(format_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', m3u8_id=format_id,
+ entry_protocol='m3u8_native'))
+ elif determine_ext(format_url) == 'mpd':
+ formats.extend(self._extract_mpd_formats(
+ format_url, video_id, mpd_id=format_id))
-class ORFOE1IE(ORFRadioIE):
- IE_NAME = 'orf:oe1'
- IE_DESC = 'Radio Österreich 1'
- _VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)'
- _API_STATION = 'oe1'
- _LOOP_STATION = 'oe1'
+ if formats or fatal:
+ self._sort_formats(formats)
+ else:
+ return
- _TEST = {
- 'url': 'http://oe1.orf.at/player/20170108/456544',
- 'md5': '34d8a6e67ea888293741c86a099b745b',
- 'info_dict': {
- 'id': '2017-01-08_0759_tl_51_7DaysSun6_256141',
- 'ext': 'mp3',
- 'title': 'Morgenjournal',
- 'duration': 609,
- 'timestamp': 1483858796,
- 'upload_date': '20170108',
- },
- 'skip': 'Shows from ORF radios are only available for 7 days.'
- }
+ return merge_dicts({
+ 'id': video_id,
+ 'title': re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage)),
+ 'description': self._og_search_description(webpage),
+ 'upload_date': unified_strdate(self._html_search_meta(
+ 'dc.date', webpage, 'upload date', fatal=False)),
+ 'formats': formats,
+ }, traverse_obj(data, {
+ 'duration': ('duration', T(k_float_or_none)),
+ 'thumbnail': ('sources', 'default', 'preview', T(url_or_none)),
+ }), rev=True)
-class ORFIPTVIE(InfoExtractor):
+class ORFIPTVIE(ORFIPTVBase):
IE_NAME = 'orf:iptv'
IE_DESC = 'iptv.ORF.at'
+ _WORKING = False # URLs redirect to orf.at/
_VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)'
+ _TITLE_STRIP_RE = r'\s+-\s+iptv\.ORF\.at\S*$'
_TEST = {
'url': 'http://iptv.orf.at/stories/2275236/',
@@ -426,74 +413,32 @@ class ORFIPTVIE(InfoExtractor):
video_id = self._search_regex(
r'data-video(?:id)?="(\d+)"', webpage, 'video id')
- data = self._download_json(
- 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
- video_id)[0]
-
- duration = float_or_none(data['duration'], 1000)
+ return self._extract_video(video_id, webpage)
- video = data['sources']['default']
- load_balancer_url = video['loadBalancerUrl']
- abr = int_or_none(video.get('audioBitrate'))
- vbr = int_or_none(video.get('bitrate'))
- fps = int_or_none(video.get('videoFps'))
- width = int_or_none(video.get('videoWidth'))
- height = int_or_none(video.get('videoHeight'))
- thumbnail = video.get('preview')
-
- rendition = self._download_json(
- load_balancer_url, video_id, transform_source=strip_jsonp)
-
- f = {
- 'abr': abr,
- 'vbr': vbr,
- 'fps': fps,
- 'width': width,
- 'height': height,
- }
- formats = []
- for format_id, format_url in rendition['redirect'].items():
- if format_id == 'rtmp':
- ff = f.copy()
- ff.update({
- 'url': format_url,
- 'format_id': format_id,
- })
- formats.append(ff)
- elif determine_ext(format_url) == 'f4m':
- formats.extend(self._extract_f4m_formats(
- format_url, video_id, f4m_id=format_id))
- elif determine_ext(format_url) == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- format_url, video_id, 'mp4', m3u8_id=format_id))
- else:
- continue
- self._sort_formats(formats)
-
- title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at')
- description = self._og_search_description(webpage)
- upload_date = unified_strdate(self._html_search_meta(
- 'dc.date', webpage, 'upload date'))
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'formats': formats,
- }
-
-
-class ORFFM4StoryIE(InfoExtractor):
+class ORFFM4StoryIE(ORFIPTVBase):
IE_NAME = 'orf:fm4:story'
IE_DESC = 'fm4.orf.at stories'
_VALID_URL = r'https?://fm4\.orf\.at/stories/(?P<id>\d+)'
+ _TITLE_STRIP_RE = r'\s+-\s+fm4\.ORF\.at\s*$'
- _TEST = {
+ _TESTS = [{
+ 'url': 'https://fm4.orf.at/stories/3041554/',
+ 'add_ie': ['Youtube'],
+ 'info_dict': {
+ 'id': '3041554',
+ 'title': 'Is The EU Green Deal In Mortal Danger?',
+ },
+ 'playlist_count': 4,
+ 'params': {
+ 'format': 'bestvideo',
+ },
+ }, {
'url': 'http://fm4.orf.at/stories/2865738/',
+ 'info_dict': {
+ 'id': '2865738',
+ 'title': 'Manu Delago und Inner Tongue live',
+ },
'playlist': [{
'md5': 'e1c2c706c45c7b34cf478bbf409907ca',
'info_dict': {
@@ -510,83 +455,311 @@ class ORFFM4StoryIE(InfoExtractor):
'info_dict': {
'id': '547798',
'ext': 'flv',
- 'title': 'Manu Delago und Inner Tongue live (2)',
+ 'title': 'Manu Delago und Inner Tongue https://vod-ww.mdn.ors.at/cms-worldwide_episodes_nas/_definst_/nas/cms-worldwide_episodes/online/14228823_0005.smil/chunklist_b992000_vo.m3u8live (2)',
'duration': 1504.08,
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20170913',
'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.',
},
}],
- }
+ 'skip': 'Videos gone',
+ }]
def _real_extract(self, url):
story_id = self._match_id(url)
webpage = self._download_webpage(url, story_id)
entries = []
- all_ids = orderedSet(re.findall(r'data-video(?:id)?="(\d+)"', webpage))
- for idx, video_id in enumerate(all_ids):
- data = self._download_json(
- 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
- video_id)[0]
+ seen_ids = set()
+ for idx, video_id in enumerate(re.findall(r'data-video(?:id)?="(\d+)"', webpage)):
+ if video_id in seen_ids:
+ continue
+ seen_ids.add(video_id)
+ entry = self._extract_video(video_id, webpage, fatal=False)
+ if not entry:
+ continue
+
+ if idx >= 1:
+ # Titles are duplicates, make them unique
+ entry['title'] = '%s (%d)' % (entry['title'], idx)
- duration = float_or_none(data['duration'], 1000)
+ entries.append(entry)
- video = data['sources']['q8c']
- load_balancer_url = video['loadBalancerUrl']
- abr = int_or_none(video.get('audioBitrate'))
- vbr = int_or_none(video.get('bitrate'))
- fps = int_or_none(video.get('videoFps'))
- width = int_or_none(video.get('videoWidth'))
- height = int_or_none(video.get('videoHeight'))
- thumbnail = video.get('preview')
+ seen_ids = set()
+ for yt_id in re.findall(
+ r'data-id\s*=\s*["\']([\w-]+)[^>]+\bclass\s*=\s*["\']youtube\b',
+ webpage):
+ if yt_id in seen_ids:
+ continue
+ seen_ids.add(yt_id)
+ if YoutubeIE.suitable(yt_id):
+ entries.append(self.url_result(yt_id, ie='Youtube', video_id=yt_id))
+
+ return self.playlist_result(
+ entries, story_id,
+ re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage, default='') or None))
+
+
+class ORFONBase(InfoExtractor):
+ _ENC_PFX = '3dSlfek03nsLKdj4Jsd'
+ _API_PATH = 'episode'
+
+ def _call_api(self, video_id, **kwargs):
+ encrypted_id = base64.b64encode('{0}{1}'.format(
+ self._ENC_PFX, video_id).encode('utf-8')).decode('ascii')
+ return self._download_json(
+ 'https://api-tvthek.orf.at/api/v4.3/public/{0}/encrypted/{1}'.format(
+ self._API_PATH, encrypted_id),
+ video_id, **kwargs)
+
+ @classmethod
+ def _parse_metadata(cls, api_json):
+ return traverse_obj(api_json, {
+ 'id': ('id', T(int), T(txt_or_none)),
+ 'age_limit': ('age_classification', T(parse_age_limit)),
+ 'duration': ((('exact_duration', T(k_float_or_none)),
+ ('duration_second', T(float_or_none))),),
+ 'title': (('title', 'headline'), T(txt_or_none)),
+ 'description': (('description', 'teaser_text'), T(txt_or_none)),
+ # 'media_type': ('video_type', T(txt_or_none)),
+ 'thumbnail': ('_embedded', 'image', 'public_urls', 'highlight_teaser', 'url', T(url_or_none)),
+ 'timestamp': (('date', 'episode_date'), T(parse_iso8601)),
+ 'release_timestamp': ('release_date', T(parse_iso8601)),
+ # 'modified_timestamp': ('updated_at', T(parse_iso8601)),
+ }, get_all=False)
+
+ def _extract_video(self, video_id, segment_id):
+ # Not a segmented episode: return single video
+ # Segmented episode without valid segment id: return entire playlist
+ # Segmented episode with valid segment id and yes-playlist: return entire playlist
+ # Segmented episode with valid segment id and no-playlist: return single video corresponding to segment id
+ # If a multi_video playlist would be returned, but an unsegmented source exists, that source is chosen instead.
+
+ api_json = self._call_api(video_id)
+
+ if traverse_obj(api_json, 'is_drm_protected'):
+ self.report_drm(video_id)
+
+ # updates formats, subtitles
+ def extract_sources(src_json, video_id):
+ for manifest_type in traverse_obj(src_json, ('sources', T(dict.keys), Ellipsis)):
+ for manifest_url in traverse_obj(src_json, ('sources', manifest_type, Ellipsis, 'src', T(url_or_none))):
+ if manifest_type == 'hls':
+ fmts, subs = self._extract_m3u8_formats(
+ manifest_url, video_id, fatal=False, m3u8_id='hls',
+ ext='mp4', entry_protocol='m3u8_native'), {}
+ for f in fmts:
+ if '_vo.' in f['url']:
+ f['acodec'] = 'none'
+ elif manifest_type == 'dash':
+ fmts, subs = self._extract_mpd_formats_and_subtitles(
+ manifest_url, video_id, fatal=False, mpd_id='dash')
+ else:
+ continue
+ formats.extend(fmts)
+ self._merge_subtitles(subs, target=subtitles)
+
+ formats, subtitles = [], {}
+ if segment_id is None:
+ extract_sources(api_json, video_id)
+ if not formats:
+ segments = traverse_obj(api_json, (
+ '_embedded', 'segments', lambda _, v: v['id']))
+ if len(segments) > 1 and segment_id is not None:
+ if not self._yes_playlist(video_id, segment_id, playlist_label='collection', video_label='segment'):
+ segments = [next(s for s in segments if txt_or_none(s['id']) == segment_id)]
+
+ entries = []
+ for seg in segments:
+ formats, subtitles = [], {}
+ extract_sources(seg, segment_id)
+ self._sort_formats(formats)
+ entries.append(merge_dicts({
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }, self._parse_metadata(seg), rev=True))
+ result = merge_dicts(
+ {'_type': 'multi_video' if len(entries) > 1 else 'playlist'},
+ self._parse_metadata(api_json),
+ self.playlist_result(entries, video_id))
+ # not yet processed in core for playlist/multi
+ self._downloader._fill_common_fields(result)
+ return result
+ else:
+ self._sort_formats(formats)
- rendition = self._download_json(
- load_balancer_url, video_id, transform_source=strip_jsonp)
+ for sub_url in traverse_obj(api_json, (
+ '_embedded', 'subtitle',
+ ('xml_url', 'sami_url', 'stl_url', 'ttml_url', 'srt_url', 'vtt_url'),
+ T(url_or_none))):
+ self._merge_subtitles({'de': [{'url': sub_url}]}, target=subtitles)
- f = {
- 'abr': abr,
- 'vbr': vbr,
- 'fps': fps,
- 'width': width,
- 'height': height,
- }
+ return merge_dicts({
+ 'id': video_id,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ # '_old_archive_ids': [self._downloader._make_archive_id({'ie_key': 'ORFTVthek', 'id': video_id})],
+ }, self._parse_metadata(api_json), rev=True)
- formats = []
- for format_id, format_url in rendition['redirect'].items():
- if format_id == 'rtmp':
- ff = f.copy()
- ff.update({
- 'url': format_url,
- 'format_id': format_id,
- })
- formats.append(ff)
- elif determine_ext(format_url) == 'f4m':
- formats.extend(self._extract_f4m_formats(
- format_url, video_id, f4m_id=format_id))
- elif determine_ext(format_url) == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- format_url, video_id, 'mp4', m3u8_id=format_id))
- else:
- continue
- self._sort_formats(formats)
+ def _real_extract(self, url):
+ video_id, segment_id = self._match_valid_url(url).group('id', 'segment')
+ webpage = self._download_webpage(url, video_id)
- title = remove_end(self._og_search_title(webpage), ' - fm4.ORF.at')
- if idx >= 1:
- # Titles are duplicates, make them unique
- title += ' (' + str(idx + 1) + ')'
- description = self._og_search_description(webpage)
- upload_date = unified_strdate(self._html_search_meta(
- 'dc.date', webpage, 'upload date'))
-
- entries.append({
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'formats': formats,
- })
-
- return self.playlist_result(entries)
+ # ORF doesn't like 410 or 404
+ if self._search_regex(r'<div\b[^>]*>\s*(Nicht mehr verfügbar)\s*</div>', webpage, 'Availability', default=False):
+ raise ExtractorError('Content is no longer available', expected=True, video_id=video_id)
+
+ return merge_dicts({
+ 'id': video_id,
+ 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
+ 'description': self._html_search_meta(
+ ['description', 'og:description', 'twitter:description'], webpage, default=None),
+ }, self._search_json_ld(webpage, video_id, default={}),
+ self._extract_video(video_id, segment_id),
+ rev=True)
+
+
+class ORFONIE(ORFONBase):
+ IE_NAME = 'orf:on'
+ _VALID_URL = r'https?://on\.orf\.at/video/(?P<id>\d+)(?:/(?P<segment>\d+))?'
+ _TESTS = [{
+ 'url': 'https://on.orf.at/video/14210000/school-of-champions-48',
+ 'info_dict': {
+ 'id': '14210000',
+ 'ext': 'mp4',
+ 'duration': 2651.08,
+ 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0167/98/thumb_16697671_segments_highlight_teaser.jpeg',
+ 'title': 'School of Champions (4/8)',
+ 'description': r're:(?s)Luca hat sein ganzes Leben in den Bergen Südtirols verbracht und ist bei seiner Mutter aufgewachsen, .{1029} Leo$',
+ # 'media_type': 'episode',
+ 'timestamp': 1706558922,
+ 'upload_date': '20240129',
+ 'release_timestamp': 1706472362,
+ 'release_date': '20240128',
+ # 'modified_timestamp': 1712756663,
+ # 'modified_date': '20240410',
+ # '_old_archive_ids': ['orftvthek 14210000'],
+ },
+ 'params': {
+ 'format': 'bestvideo',
+ },
+ 'skip': 'Available until 2024-08-12',
+ }, {
+ 'url': 'https://on.orf.at/video/3220355',
+ 'md5': '925a93b2b9a37da5c9b979d7cf71aa2e',
+ 'info_dict': {
+ 'id': '3220355',
+ 'ext': 'mp4',
+ 'duration': 445.04,
+ 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0002/60/thumb_159573_segments_highlight_teaser.png',
+ 'title': '50 Jahre Burgenland: Der Festumzug',
+ 'description': r're:(?s)Aus allen Landesteilen zogen festlich geschmückte Wagen und Musikkapellen .{270} Jenakowitsch$',
+ # 'media_type': 'episode',
+ 'timestamp': 52916400,
+ 'upload_date': '19710905',
+ 'release_timestamp': 52916400,
+ 'release_date': '19710905',
+ # 'modified_timestamp': 1498536049,
+ # 'modified_date': '20170627',
+ # '_old_archive_ids': ['orftvthek 3220355'],
+ },
+ }, {
+ # Video with multiple segments selecting the second segment
+ 'url': 'https://on.orf.at/video/14226549/15639808/jugendbande-einbrueche-aus-langeweile',
+ 'md5': 'fc151bba8c05ea77ab5693617e4a33d3',
+ 'info_dict': {
+ 'id': '15639808',
+ 'ext': 'mp4',
+ 'duration': 97.707,
+ 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0175/43/thumb_17442704_segments_highlight_teaser.jpg',
+ 'title': 'Jugendbande: Einbrüche aus Langeweile',
+ 'description': r're:Jugendbande: Einbrüche aus Langeweile \| Neuer Kinder- und .{259} Wanda$',
+ # 'media_type': 'segment',
+ 'timestamp': 1715792400,
+ 'upload_date': '20240515',
+ # 'modified_timestamp': 1715794394,
+ # 'modified_date': '20240515',
+ # '_old_archive_ids': ['orftvthek 15639808'],
+ },
+ 'params': {
+ 'noplaylist': True,
+ 'format': 'bestvideo',
+ },
+ 'skip': 'Available until 2024-06-14',
+ }, {
+ # Video with multiple segments and no combined version
+ 'url': 'https://on.orf.at/video/14227864/formel-1-grosser-preis-von-monaco-2024',
+ 'info_dict': {
+ '_type': 'multi_video',
+ 'id': '14227864',
+ 'duration': 18410.52,
+ 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/04/thumb_17503881_segments_highlight_teaser.jpg',
+ 'title': 'Formel 1: Großer Preis von Monaco 2024',
+ 'description': 'md5:aeeb010710ccf70ce28ccb4482243d4f',
+ # 'media_type': 'episode',
+ 'timestamp': 1716721200,
+ 'upload_date': '20240526',
+ 'release_timestamp': 1716721802,
+ 'release_date': '20240526',
+ # 'modified_timestamp': 1716884702,
+ # 'modified_date': '20240528',
+ },
+ 'playlist_count': 42,
+ 'skip': 'Gone: Nicht mehr verfügbar',
+ }, {
+ # Video with multiple segments, but with combined version
+ 'url': 'https://on.orf.at/video/14228172',
+ 'info_dict': {
+ 'id': '14228172',
+ 'ext': 'mp4',
+ 'duration': 3294.878,
+ 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/29/thumb_17528242_segments_highlight_teaser.jpg',
+ 'title': 'Willkommen Österreich mit Stermann & Grissemann',
+ 'description': r're:Zum Saisonfinale freuen sich die urlaubsreifen Gastgeber Stermann und .{1863} Geschichten\.$',
+ # 'media_type': 'episode',
+ 'timestamp': 1716926584,
+ 'upload_date': '20240528',
+ 'release_timestamp': 1716919202,
+ 'release_date': '20240528',
+ # 'modified_timestamp': 1716968045,
+ # 'modified_date': '20240529',
+ # '_old_archive_ids': ['orftvthek 14228172'],
+ },
+ 'params': {
+ 'format': 'bestvideo',
+ },
+ 'skip': 'Gone: Nicht mehr verfügbar',
+ }]
+
+
+class ORFONLiveIE(ORFONBase):
+ _ENC_PFX = '8876324jshjd7293ktd'
+ _API_PATH = 'livestream'
+ _VALID_URL = r'https?://on\.orf\.at/livestream/(?P<id>\d+)(?:/(?P<segment>\d+))?'
+ _TESTS = [{
+ 'url': 'https://on.orf.at/livestream/14320204/pressekonferenz-neos-zu-aktuellen-entwicklungen',
+ 'info_dict': {
+ 'id': '14320204',
+ 'ext': 'mp4',
+ 'title': 'Pressekonferenz: Neos zu aktuellen Entwicklungen',
+ 'description': r're:(?s)Neos-Chefin Beate Meinl-Reisinger informi.{598}ng\."',
+ 'timestamp': 1716886335,
+ 'upload_date': '20240528',
+ # 'modified_timestamp': 1712756663,
+ # 'modified_date': '20240410',
+ # '_old_archive_ids': ['orftvthek 14210000'],
+ },
+ 'params': {
+ 'format': 'bestvideo',
+ },
+ }]
+
+ @classmethod
+ def _parse_metadata(cls, api_json):
+ return merge_dicts(
+ super(ORFONLiveIE, cls)._parse_metadata(api_json),
+ traverse_obj(api_json, {
+ 'timestamp': ('updated_at', T(parse_iso8601)),
+ 'release_timestamp': ('start', T(parse_iso8601)),
+ 'is_live': True,
+ }))
diff --git a/youtube_dl/extractor/palcomp3.py b/youtube_dl/extractor/palcomp3.py
index fb29d83f9..60f7a4d48 100644
--- a/youtube_dl/extractor/palcomp3.py
+++ b/youtube_dl/extractor/palcomp3.py
@@ -8,7 +8,7 @@ from ..compat import compat_str
from ..utils import (
int_or_none,
str_or_none,
- try_get,
+ traverse_obj,
)
@@ -109,7 +109,7 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
}
name'''
- @ classmethod
+ @classmethod
def suitable(cls, url):
return False if re.match(PalcoMP3IE._VALID_URL, url) else super(PalcoMP3ArtistIE, cls).suitable(url)
@@ -118,7 +118,8 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE):
artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist']
def entries():
- for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []):
+ for music in traverse_obj(artist, (
+ 'musics', 'nodes', lambda _, m: m['musicID'])):
yield self._parse_music(music)
return self.playlist_result(
@@ -137,7 +138,7 @@ class PalcoMP3VideoIE(PalcoMP3BaseIE):
'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande',
'description': 'md5:7043342c09a224598e93546e98e49282',
'upload_date': '20161107',
- 'uploader_id': 'maiaramaraisaoficial',
+ 'uploader_id': '@maiaramaraisaoficial',
'uploader': 'Maiara e Maraisa',
}
}]
diff --git a/youtube_dl/extractor/vidlii.py b/youtube_dl/extractor/vidlii.py
index f4774256b..47f328e87 100644
--- a/youtube_dl/extractor/vidlii.py
+++ b/youtube_dl/extractor/vidlii.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+
from ..utils import (
float_or_none,
get_element_by_id,
@@ -11,6 +12,7 @@ from ..utils import (
strip_or_none,
unified_strdate,
urljoin,
+ str_to_int,
)
@@ -36,6 +38,26 @@ class VidLiiIE(InfoExtractor):
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
+ # HD
+ 'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl',
+ 'md5': '450e7da379c884788c3a4fa02a3ce1a4',
+ 'info_dict': {
+ 'id': '2Ng8Abj2Fkl',
+ 'ext': 'mp4',
+ 'title': 'test',
+ 'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9',
+ 'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg',
+ 'uploader': 'VidLii',
+ 'uploader_url': 'https://www.vidlii.com/user/VidLii',
+ 'upload_date': '20200927',
+ 'duration': 5,
+ 'view_count': int,
+ 'comment_count': int,
+ 'average_rating': float,
+ 'categories': ['Film & Animation'],
+ 'tags': list,
+ },
+ }, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
}]
@@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor):
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
- video_url = self._search_regex(
- r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
- 'video url', group='url')
+ formats = []
+
+ def add_format(format_url, height=None):
+ height = int(self._search_regex(r'(\d+)\.mp4',
+ format_url, 'height', default=360))
+
+ formats.append({
+ 'url': format_url,
+ 'format_id': '%dp' % height if height else None,
+ 'height': height,
+ })
+
+ sources = re.findall(
+ r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1',
+ webpage)
+
+ formats = []
+ if len(sources) > 1:
+ add_format(sources[1][1])
+ self._check_formats(formats, video_id)
+ if len(sources) > 0:
+ add_format(sources[0][1])
+
+ self._sort_formats(formats)
- title = self._search_regex(
+ title = self._html_search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
@@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor):
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
- view_count = int_or_none(self._search_regex(
- (r'<strong>(\d+)</strong> views',
- r'Views\s*:\s*<strong>(\d+)</strong>'),
+ view_count = str_to_int(self._html_search_regex(
+ (r'<strong>([\d,.]+)</strong> views',
+ r'Views\s*:\s*<strong>([\d,.]+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
@@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor):
return {
'id': video_id,
- 'url': video_url,
+ 'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py
index 86d902248..02adf6678 100644
--- a/youtube_dl/jsinterp.py
+++ b/youtube_dl/jsinterp.py
@@ -14,6 +14,7 @@ from .utils import (
remove_quotes,
unified_timestamp,
variadic,
+ write_string,
)
from .compat import (
compat_basestring,
@@ -53,15 +54,16 @@ def wraps_op(op):
# NB In principle NaN cannot be checked by membership.
# Here all NaN values are actually this one, so _NaN is _NaN,
-# although _NaN != _NaN.
+# although _NaN != _NaN. Ditto Infinity.
_NaN = float('nan')
+_Infinity = float('inf')
def _js_bit_op(op):
def zeroise(x):
- return 0 if x in (None, JS_Undefined, _NaN) else x
+ return 0 if x in (None, JS_Undefined, _NaN, _Infinity) else x
@wraps_op(op)
def wrapped(a, b):
@@ -84,7 +86,7 @@ def _js_arith_op(op):
def _js_div(a, b):
if JS_Undefined in (a, b) or not (a or b):
return _NaN
- return operator.truediv(a or 0, b) if b else float('inf')
+ return operator.truediv(a or 0, b) if b else _Infinity
def _js_mod(a, b):
@@ -220,6 +222,42 @@ class LocalNameSpace(ChainMap):
return 'LocalNameSpace%s' % (self.maps, )
+class Debugger(object):
+ ENABLED = False
+
+ @staticmethod
+ def write(*args, **kwargs):
+ level = kwargs.get('level', 100)
+
+ def truncate_string(s, left, right=0):
+ if s is None or len(s) <= left + right:
+ return s
+ return '...'.join((s[:left - 3], s[-right:] if right else ''))
+
+ write_string('[debug] JS: {0}{1}\n'.format(
+ ' ' * (100 - level),
+ ' '.join(truncate_string(compat_str(x), 50, 50) for x in args)))
+
+ @classmethod
+ def wrap_interpreter(cls, f):
+ def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs):
+ if cls.ENABLED and stmt.strip():
+ cls.write(stmt, level=allow_recursion)
+ try:
+ ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs)
+ except Exception as e:
+ if cls.ENABLED:
+ if isinstance(e, ExtractorError):
+ e = e.orig_msg
+ cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion)
+ raise
+ if cls.ENABLED and stmt.strip():
+ if should_ret or not repr(ret) == stmt:
+ cls.write(['->', '=>'][should_ret], repr(ret), '<-|', stmt, level=allow_recursion)
+ return ret, should_ret
+ return interpret_statement
+
+
class JSInterpreter(object):
__named_object_counter = 0
@@ -307,8 +345,7 @@ class JSInterpreter(object):
def __op_chars(cls):
op_chars = set(';,[')
for op in cls._all_operators():
- for c in op[0]:
- op_chars.add(c)
+ op_chars.update(op[0])
return op_chars
def _named_object(self, namespace, obj):
@@ -326,9 +363,8 @@ class JSInterpreter(object):
# collections.Counter() is ~10% slower in both 2.7 and 3.9
counters = dict((k, 0) for k in _MATCHING_PARENS.values())
start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1
- in_quote, escaping, skipping = None, False, 0
- after_op, in_regex_char_group = True, False
-
+ in_quote, escaping, after_op, in_regex_char_group = None, False, True, False
+ skipping = 0
for idx, char in enumerate(expr):
paren_delta = 0
if not in_quote:
@@ -382,10 +418,12 @@ class JSInterpreter(object):
return separated[0][1:].strip(), separated[1].strip()
@staticmethod
- def _all_operators():
- return itertools.chain(
- # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
- _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)
+ def _all_operators(_cached=[]):
+ if not _cached:
+ _cached.extend(itertools.chain(
+ # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
+ _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS))
+ return _cached
def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion):
if op in ('||', '&&'):
@@ -416,7 +454,7 @@ class JSInterpreter(object):
except Exception as e:
if allow_undefined:
return JS_Undefined
- raise self.Exception('Cannot get index {idx:.100}'.format(**locals()), expr=repr(obj), cause=e)
+ raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
try:
@@ -438,6 +476,7 @@ class JSInterpreter(object):
_FINALLY_RE = re.compile(r'finally\s*\{')
_SWITCH_RE = re.compile(r'switch\s*\(')
+ @Debugger.wrap_interpreter
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise self.Exception('Recursion limit reached')
@@ -511,7 +550,6 @@ class JSInterpreter(object):
expr = self._dump(inner, local_vars) + outer
if expr.startswith('('):
-
m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
if m:
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
@@ -693,7 +731,7 @@ class JSInterpreter(object):
(?P<op>{_OPERATOR_RE})?
=(?!=)(?P<expr>.*)$
)|(?P<return>
- (?!if|return|true|false|null|undefined)(?P<name>{_NAME_RE})$
+ (?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$
)|(?P<indexing>
(?P<in>{_NAME_RE})\[(?P<idx>.+)\]$
)|(?P<attribute>
@@ -727,11 +765,12 @@ class JSInterpreter(object):
raise JS_Break()
elif expr == 'continue':
raise JS_Continue()
-
elif expr == 'undefined':
return JS_Undefined, should_return
elif expr == 'NaN':
return _NaN, should_return
+ elif expr == 'Infinity':
+ return _Infinity, should_return
elif md.get('return'):
return local_vars[m.group('name')], should_return
@@ -760,18 +799,28 @@ class JSInterpreter(object):
right_expr = separated.pop()
# handle operators that are both unary and binary, minimal BODMAS
if op in ('+', '-'):
+ # simplify/adjust consecutive instances of these operators
undone = 0
while len(separated) > 1 and not separated[-1].strip():
undone += 1
separated.pop()
if op == '-' and undone % 2 != 0:
right_expr = op + right_expr
+ elif op == '+':
+ while len(separated) > 1 and separated[-1].strip() in self.OP_CHARS:
+ right_expr = separated.pop() + right_expr
+ # hanging op at end of left => unary + (strip) or - (push right)
left_val = separated[-1]
for dm_op in ('*', '%', '/', '**'):
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
if len(bodmas) > 1 and not bodmas[-1].strip():
expr = op.join(separated) + op + right_expr
- right_expr = None
+ if len(separated) > 1:
+ separated.pop()
+ right_expr = op.join((left_val, right_expr))
+ else:
+ separated = [op.join((left_val, right_expr))]
+ right_expr = None
break
if right_expr is None:
continue
@@ -797,6 +846,8 @@ class JSInterpreter(object):
def eval_method():
if (variable, member) == ('console', 'debug'):
+ if Debugger.ENABLED:
+ Debugger.write(self.interpret_expression('[{}]'.format(arg_str), local_vars, allow_recursion))
return
types = {
'String': compat_str,
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index cd4303566..113c913df 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -2406,7 +2406,7 @@ class ExtractorError(YoutubeDLError):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
-
+ self.orig_msg = msg
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None: