diff options
29 files changed, 3496 insertions, 1628 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d3b9ae016..8234e0ccb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -116,7 +116,7 @@ jobs: strategy: fail-fast: true matrix: - os: [ubuntu-20.04] + os: [ubuntu-22.04] python-version: ${{ fromJSON(needs.select.outputs.cpython-versions) }} python-impl: [cpython] ytdl-test-set: ${{ fromJSON(needs.select.outputs.test-set) }} @@ -133,12 +133,12 @@ jobs: ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }} run-tests-ext: bat # jython - - os: ubuntu-20.04 + - os: ubuntu-22.04 python-version: 2.7 python-impl: jython ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }} run-tests-ext: sh - - os: ubuntu-20.04 + - os: ubuntu-22.04 python-version: 2.7 python-impl: jython ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }} @@ -160,7 +160,7 @@ jobs: # NB may run apt-get install in Linux uses: ytdl-org/setup-python@v1 env: - # Temporary workaround for Python 3.5 failures - May 2024 + # Temporary (?) workaround for Python 3.5 failures - May 2024 PIP_TRUSTED_HOST: "pypi.python.org pypi.org files.pythonhosted.org" with: python-version: ${{ matrix.python-version }} @@ -240,7 +240,10 @@ jobs: # install 2.7 shell: bash run: | - sudo apt-get install -y python2 python-is-python2 + # Ubuntu 22.04 no longer has python-is-python2: fetch it + curl -L "http://launchpadlibrarian.net/474693132/python-is-python2_2.7.17-4_all.deb" -o python-is-python2.deb + sudo apt-get install -y python2 + sudo dpkg --force-breaks -i python-is-python2.deb echo "PYTHONHOME=/usr" >> "$GITHUB_ENV" #-------- Python 2.6 -- - name: Set up Python 2.6 environment diff --git a/test/helper.py b/test/helper.py index 5b7e3dfe2..6f2129eff 100644 --- a/test/helper.py +++ b/test/helper.py @@ -5,9 +5,9 @@ import hashlib import json import os.path import re -import types import ssl import sys +import types import unittest import youtube_dl.extractor @@ -181,18 +181,18 @@ def expect_value(self, got, expected, field): op, _, expected_num = expected.partition(':') expected_num = int(expected_num) if op == 'mincount': - assert_func = assertGreaterEqual + assert_func = self.assertGreaterEqual msg_tmpl = 'Expected %d items in field %s, but only got %d' elif op == 'maxcount': - assert_func = assertLessEqual + assert_func = self.assertLessEqual msg_tmpl = 'Expected maximum %d items in field %s, but got %d' elif op == 'count': - assert_func = assertEqual + assert_func = self.assertEqual msg_tmpl = 'Expected exactly %d items in field %s, but got %d' else: assert False assert_func( - self, len(got), expected_num, + len(got), expected_num, msg_tmpl % (expected_num, field, len(got))) return self.assertEqual( @@ -262,27 +262,6 @@ def assertRegexpMatches(self, text, regexp, msg=None): self.assertTrue(m, msg) -def assertGreaterEqual(self, got, expected, msg=None): - if not (got >= expected): - if msg is None: - msg = '%r not greater than or equal to %r' % (got, expected) - self.assertTrue(got >= expected, msg) - - -def assertLessEqual(self, got, expected, msg=None): - if not (got <= expected): - if msg is None: - msg = '%r not less than or equal to %r' % (got, expected) - self.assertTrue(got <= expected, msg) - - -def assertEqual(self, got, expected, msg=None): - if not (got == expected): - if msg is None: - msg = '%r not equal to %r' % (got, expected) - self.assertTrue(got == expected, msg) - - def expect_warnings(ydl, warnings_re): real_warning = ydl.report_warning diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py index d55d6ad54..09100a1d6 100644 --- a/test/test_InfoExtractor.py +++ b/test/test_InfoExtractor.py @@ -153,6 +153,9 @@ class TestInfoExtractor(unittest.TestCase): ''' search = self.ie._search_nextjs_data(html, 'testID') self.assertEqual(search['props']['pageProps']['video']['id'], 'testid') + search = self.ie._search_nextjs_data( + 'no next.js data here, move along', 'testID', default={'status': 0}) + self.assertEqual(search['status'], 0) def test_search_nuxt_data(self): html = ''' diff --git a/test/test_cache.py b/test/test_cache.py index 931074aa1..0431f4f15 100644 --- a/test/test_cache.py +++ b/test/test_cache.py @@ -63,9 +63,21 @@ class TestCache(unittest.TestCase): obj = {'x': 1, 'y': ['ä', '\\a', True]} c.store('test_cache', 'k.', obj) self.assertEqual(c.load('test_cache', 'k.', min_ver='1970.01.01'), obj) - new_version = '.'.join(('%d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__))) + new_version = '.'.join(('%0.2d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__))) self.assertIs(c.load('test_cache', 'k.', min_ver=new_version), None) + def test_cache_clear(self): + ydl = FakeYDL({ + 'cachedir': self.test_dir, + }) + c = Cache(ydl) + c.store('test_cache', 'k.', 'kay') + c.store('test_cache', 'l.', 'ell') + self.assertEqual(c.load('test_cache', 'k.'), 'kay') + c.clear('test_cache', 'k.') + self.assertEqual(c.load('test_cache', 'k.'), None) + self.assertEqual(c.load('test_cache', 'l.'), 'ell') + if __name__ == '__main__': unittest.main() diff --git a/test/test_download.py b/test/test_download.py index df8b370cf..f7d6a23bc 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -9,8 +9,6 @@ import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from test.helper import ( - assertGreaterEqual, - assertLessEqual, expect_warnings, get_params, gettestcases, @@ -36,12 +34,20 @@ from youtube_dl.utils import ( ExtractorError, error_to_compat_str, format_bytes, + IDENTITY, + preferredencoding, UnavailableVideoError, ) from youtube_dl.extractor import get_info_extractor RETRIES = 3 +# Some unittest APIs require actual str +if not isinstance('TEST', str): + _encode_str = lambda s: s.encode(preferredencoding()) +else: + _encode_str = IDENTITY + class YoutubeDL(youtube_dl.YoutubeDL): def __init__(self, *args, **kwargs): @@ -102,7 +108,7 @@ def generator(test_case, tname): def print_skipping(reason): print('Skipping %s: %s' % (test_case['name'], reason)) - self.skipTest(reason) + self.skipTest(_encode_str(reason)) if not ie.working(): print_skipping('IE marked as not _WORKING') @@ -187,16 +193,14 @@ def generator(test_case, tname): expect_info_dict(self, res_dict, test_case.get('info_dict', {})) if 'playlist_mincount' in test_case: - assertGreaterEqual( - self, + self.assertGreaterEqual( len(res_dict['entries']), test_case['playlist_mincount'], 'Expected at least %d in playlist %s, but got only %d' % ( test_case['playlist_mincount'], test_case['url'], len(res_dict['entries']))) if 'playlist_maxcount' in test_case: - assertLessEqual( - self, + self.assertLessEqual( len(res_dict['entries']), test_case['playlist_maxcount'], 'Expected at most %d in playlist %s, but got %d' % ( @@ -243,8 +247,8 @@ def generator(test_case, tname): if params.get('test'): expected_minsize = max(expected_minsize, 10000) got_fsize = os.path.getsize(tc_filename) - assertGreaterEqual( - self, got_fsize, expected_minsize, + self.assertGreaterEqual( + got_fsize, expected_minsize, 'Expected %s to be at least %s, but it\'s only %s ' % (tc_filename, format_bytes(expected_minsize), format_bytes(got_fsize))) diff --git a/test/test_jsinterp.py b/test/test_jsinterp.py index 91b12f544..479cb43a0 100644 --- a/test/test_jsinterp.py +++ b/test/test_jsinterp.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# coding: utf-8 from __future__ import unicode_literals @@ -6,199 +7,257 @@ from __future__ import unicode_literals import os import sys import unittest + sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import math import re +import time +from youtube_dl.compat import compat_str as str from youtube_dl.jsinterp import JS_Undefined, JSInterpreter +NaN = object() -class TestJSInterpreter(unittest.TestCase): - def test_basic(self): - jsi = JSInterpreter('function x(){;}') - self.assertEqual(jsi.call_function('x'), None) - self.assertEqual(repr(jsi.extract_function('x')), 'F<x>') - jsi = JSInterpreter('function x3(){return 42;}') - self.assertEqual(jsi.call_function('x3'), 42) - - jsi = JSInterpreter('function x3(){42}') - self.assertEqual(jsi.call_function('x3'), None) +class TestJSInterpreter(unittest.TestCase): + def _test(self, jsi_or_code, expected, func='f', args=()): + if isinstance(jsi_or_code, str): + jsi_or_code = JSInterpreter(jsi_or_code) + got = jsi_or_code.call_function(func, *args) + if expected is NaN: + self.assertTrue(math.isnan(got), '{0} is not NaN'.format(got)) + else: + self.assertEqual(got, expected) - jsi = JSInterpreter('var x5 = function(){return 42;}') - self.assertEqual(jsi.call_function('x5'), 42) + def test_basic(self): + jsi = JSInterpreter('function f(){;}') + self.assertEqual(repr(jsi.extract_function('f')), 'F<f>') + self._test(jsi, None) - def test_calc(self): - jsi = JSInterpreter('function x4(a){return 2*a+1;}') - self.assertEqual(jsi.call_function('x4', 3), 7) + self._test('function f(){return 42;}', 42) + self._test('function f(){42}', None) + self._test('var f = function(){return 42;}', 42) def test_add(self): - jsi = JSInterpreter('function f(){return 42 + 7;}') - self.assertEqual(jsi.call_function('f'), 49) - jsi = JSInterpreter('function f(){return 42 + undefined;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) - jsi = JSInterpreter('function f(){return 42 + null;}') - self.assertEqual(jsi.call_function('f'), 42) + self._test('function f(){return 42 + 7;}', 49) + self._test('function f(){return 42 + undefined;}', NaN) + self._test('function f(){return 42 + null;}', 42) + self._test('function f(){return 1 + "";}', '1') + self._test('function f(){return 42 + "7";}', '427') + self._test('function f(){return false + true;}', 1) + self._test('function f(){return "false" + true;}', 'falsetrue') + self._test('function f(){return ' + '1 + "2" + [3,4] + {k: 56} + null + undefined + Infinity;}', + '123,4[object Object]nullundefinedInfinity') def test_sub(self): - jsi = JSInterpreter('function f(){return 42 - 7;}') - self.assertEqual(jsi.call_function('f'), 35) - jsi = JSInterpreter('function f(){return 42 - undefined;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) - jsi = JSInterpreter('function f(){return 42 - null;}') - self.assertEqual(jsi.call_function('f'), 42) + self._test('function f(){return 42 - 7;}', 35) + self._test('function f(){return 42 - undefined;}', NaN) + self._test('function f(){return 42 - null;}', 42) + self._test('function f(){return 42 - "7";}', 35) + self._test('function f(){return 42 - "spam";}', NaN) def test_mul(self): - jsi = JSInterpreter('function f(){return 42 * 7;}') - self.assertEqual(jsi.call_function('f'), 294) - jsi = JSInterpreter('function f(){return 42 * undefined;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) - jsi = JSInterpreter('function f(){return 42 * null;}') - self.assertEqual(jsi.call_function('f'), 0) + self._test('function f(){return 42 * 7;}', 294) + self._test('function f(){return 42 * undefined;}', NaN) + self._test('function f(){return 42 * null;}', 0) + self._test('function f(){return 42 * "7";}', 294) + self._test('function f(){return 42 * "eggs";}', NaN) def test_div(self): jsi = JSInterpreter('function f(a, b){return a / b;}') - self.assertTrue(math.isnan(jsi.call_function('f', 0, 0))) - self.assertTrue(math.isnan(jsi.call_function('f', JS_Undefined, 1))) - self.assertTrue(math.isinf(jsi.call_function('f', 2, 0))) - self.assertEqual(jsi.call_function('f', 0, 3), 0) + self._test(jsi, NaN, args=(0, 0)) + self._test(jsi, NaN, args=(JS_Undefined, 1)) + self._test(jsi, float('inf'), args=(2, 0)) + self._test(jsi, 0, args=(0, 3)) + self._test(jsi, 6, args=(42, 7)) + self._test(jsi, 0, args=(42, float('inf'))) + self._test(jsi, 6, args=("42", 7)) + self._test(jsi, NaN, args=("spam", 7)) def test_mod(self): - jsi = JSInterpreter('function f(){return 42 % 7;}') - self.assertEqual(jsi.call_function('f'), 0) - jsi = JSInterpreter('function f(){return 42 % 0;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) - jsi = JSInterpreter('function f(){return 42 % undefined;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) + self._test('function f(){return 42 % 7;}', 0) + self._test('function f(){return 42 % 0;}', NaN) + self._test('function f(){return 42 % undefined;}', NaN) + self._test('function f(){return 42 % "7";}', 0) + self._test('function f(){return 42 % "beans";}', NaN) def test_exp(self): - jsi = JSInterpreter('function f(){return 42 ** 2;}') - self.assertEqual(jsi.call_function('f'), 1764) - jsi = JSInterpreter('function f(){return 42 ** undefined;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) - jsi = JSInterpreter('function f(){return 42 ** null;}') - self.assertEqual(jsi.call_function('f'), 1) - jsi = JSInterpreter('function f(){return undefined ** 42;}') - self.assertTrue(math.isnan(jsi.call_function('f'))) + self._test('function f(){return 42 ** 2;}', 1764) + self._test('function f(){return 42 ** undefined;}', NaN) + self._test('function f(){return 42 ** null;}', 1) + self._test('function f(){return undefined ** 0;}', 1) + self._test('function f(){return undefined ** 42;}', NaN) + self._test('function f(){return 42 ** "2";}', 1764) + self._test('function f(){return 42 ** "spam";}', NaN) + + def test_calc(self): + self._test('function f(a){return 2*a+1;}', 7, args=[3]) def test_empty_return(self): - jsi = JSInterpreter('function f(){return; y()}') - self.assertEqual(jsi.call_function('f'), None) + self._test('function f(){return; y()}', None) def test_morespace(self): - jsi = JSInterpreter('function x (a) { return 2 * a + 1 ; }') - self.assertEqual(jsi.call_function('x', 3), 7) - - jsi = JSInterpreter('function f () { x = 2 ; return x; }') - self.assertEqual(jsi.call_function('f'), 2) + self._test('function f (a) { return 2 * a + 1 ; }', 7, args=[3]) + self._test('function f () { x = 2 ; return x; }', 2) def test_strange_chars(self): - jsi = JSInterpreter('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }') - self.assertEqual(jsi.call_function('$_xY1', 20), 21) + self._test('function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }', + 21, args=[20], func='$_xY1') def test_operators(self): - jsi = JSInterpreter('function f(){return 1 << 5;}') - self.assertEqual(jsi.call_function('f'), 32) - - jsi = JSInterpreter('function f(){return 2 ** 5}') - self.assertEqual(jsi.call_function('f'), 32) - - jsi = JSInterpreter('function f(){return 19 & 21;}') - self.assertEqual(jsi.call_function('f'), 17) - - jsi = JSInterpreter('function f(){return 11 >> 2;}') - self.assertEqual(jsi.call_function('f'), 2) - - jsi = JSInterpreter('function f(){return []? 2+3: 4;}') - self.assertEqual(jsi.call_function('f'), 5) - - jsi = JSInterpreter('function f(){return 1 == 2}') - self.assertEqual(jsi.call_function('f'), False) - - jsi = JSInterpreter('function f(){return 0 && 1 || 2;}') - self.assertEqual(jsi.call_function('f'), 2) - - jsi = JSInterpreter('function f(){return 0 ?? 42;}') - self.assertEqual(jsi.call_function('f'), 0) - - jsi = JSInterpreter('function f(){return "life, the universe and everything" < 42;}') - self.assertFalse(jsi.call_function('f')) + self._test('function f(){return 1 << 5;}', 32) + self._test('function f(){return 2 ** 5}', 32) + self._test('function f(){return 19 & 21;}', 17) + self._test('function f(){return 11 >> 2;}', 2) + self._test('function f(){return []? 2+3: 4;}', 5) + # equality + self._test('function f(){return 1 == 1}', True) + self._test('function f(){return 1 == 1.0}', True) + self._test('function f(){return 1 == "1"}', True) + self._test('function f(){return 1 == 2}', False) + self._test('function f(){return 1 != "1"}', False) + self._test('function f(){return 1 != 2}', True) + self._test('function f(){var x = {a: 1}; var y = x; return x == y}', True) + self._test('function f(){var x = {a: 1}; return x == {a: 1}}', False) + self._test('function f(){return NaN == NaN}', False) + self._test('function f(){return null == undefined}', True) + self._test('function f(){return "spam, eggs" == "spam, eggs"}', True) + # strict equality + self._test('function f(){return 1 === 1}', True) + self._test('function f(){return 1 === 1.0}', True) + self._test('function f(){return 1 === "1"}', False) + self._test('function f(){return 1 === 2}', False) + self._test('function f(){var x = {a: 1}; var y = x; return x === y}', True) + self._test('function f(){var x = {a: 1}; return x === {a: 1}}', False) + self._test('function f(){return NaN === NaN}', False) + self._test('function f(){return null === undefined}', False) + self._test('function f(){return null === null}', True) + self._test('function f(){return undefined === undefined}', True) + self._test('function f(){return "uninterned" === "uninterned"}', True) + self._test('function f(){return 1 === 1}', True) + self._test('function f(){return 1 === "1"}', False) + self._test('function f(){return 1 !== 1}', False) + self._test('function f(){return 1 !== "1"}', True) + # expressions + self._test('function f(){return 0 && 1 || 2;}', 2) + self._test('function f(){return 0 ?? 42;}', 0) + self._test('function f(){return "life, the universe and everything" < 42;}', False) + # https://github.com/ytdl-org/youtube-dl/issues/32815 + self._test('function f(){return 0 - 7 * - 6;}', 42) + + def test_bitwise_operators_typecast(self): + # madness + self._test('function f(){return null << 5}', 0) + self._test('function f(){return undefined >> 5}', 0) + self._test('function f(){return 42 << NaN}', 42) + self._test('function f(){return 42 << Infinity}', 42) + self._test('function f(){return 0.0 << null}', 0) + self._test('function f(){return NaN << 42}', 0) + self._test('function f(){return "21.9" << 1}', 42) + self._test('function f(){return true << "5";}', 32) + self._test('function f(){return true << true;}', 2) + self._test('function f(){return "19" & "21.9";}', 17) + self._test('function f(){return "19" & false;}', 0) + self._test('function f(){return "11.0" >> "2.1";}', 2) + self._test('function f(){return 5 ^ 9;}', 12) + self._test('function f(){return 0.0 << NaN}', 0) + self._test('function f(){return null << undefined}', 0) + self._test('function f(){return 21 << 4294967297}', 42) def test_array_access(self): - jsi = JSInterpreter('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}') - self.assertEqual(jsi.call_function('f'), [5, 2, 7]) + self._test('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}', [5, 2, 7]) def test_parens(self): - jsi = JSInterpreter('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}') - self.assertEqual(jsi.call_function('f'), 7) - - jsi = JSInterpreter('function f(){return (1 + 2) * 3;}') - self.assertEqual(jsi.call_function('f'), 9) + self._test('function f(){return (1) + (2) * ((( (( (((((3)))))) )) ));}', 7) + self._test('function f(){return (1 + 2) * 3;}', 9) def test_quotes(self): - jsi = JSInterpreter(r'function f(){return "a\"\\("}') - self.assertEqual(jsi.call_function('f'), r'a"\(') + self._test(r'function f(){return "a\"\\("}', r'a"\(') def test_assignments(self): - jsi = JSInterpreter('function f(){var x = 20; x = 30 + 1; return x;}') - self.assertEqual(jsi.call_function('f'), 31) - - jsi = JSInterpreter('function f(){var x = 20; x += 30 + 1; return x;}') - self.assertEqual(jsi.call_function('f'), 51) - - jsi = JSInterpreter('function f(){var x = 20; x -= 30 + 1; return x;}') - self.assertEqual(jsi.call_function('f'), -11) + self._test('function f(){var x = 20; x = 30 + 1; return x;}', 31) + self._test('function f(){var x = 20; x += 30 + 1; return x;}', 51) + self._test('function f(){var x = 20; x -= 30 + 1; return x;}', -11) + self._test('function f(){var x = 2; var y = ["a", "b"]; y[x%y["length"]]="z"; return y}', ['z', 'b']) def test_comments(self): - 'Skipping: Not yet fully implemented' - return - jsi = JSInterpreter(''' - function x() { - var x = /* 1 + */ 2; - var y = /* 30 - * 40 */ 50; - return x + y; - } - ''') - self.assertEqual(jsi.call_function('x'), 52) - - jsi = JSInterpreter(''' - function f() { - var x = "/*"; - var y = 1 /* comment */ + 2; - return y; - } - ''') - self.assertEqual(jsi.call_function('f'), 3) + self._test(''' + function f() { + var x = /* 1 + */ 2; + var y = /* 30 + * 40 */ 50; + return x + y; + } + ''', 52) + + self._test(''' + function f() { + var x = "/*"; + var y = 1 /* comment */ + 2; + return y; + } + ''', 3) + + self._test(''' + function f() { + var x = ( /* 1 + */ 2 + + /* 30 * 40 */ + 50); + return x; + } + ''', 52) def test_precedence(self): - jsi = JSInterpreter(''' - function x() { - var a = [10, 20, 30, 40, 50]; - var b = 6; - a[0]=a[b%a.length]; - return a; - }''') - self.assertEqual(jsi.call_function('x'), [20, 20, 30, 40, 50]) + self._test(''' + function f() { + var a = [10, 20, 30, 40, 50]; + var b = 6; + a[0]=a[b%a.length]; + return a; + } + ''', [20, 20, 30, 40, 50]) def test_builtins(self): - jsi = JSInterpreter(''' - function x() { return NaN } - ''') - self.assertTrue(math.isnan(jsi.call_function('x'))) + self._test('function f() { return NaN }', NaN) def test_Date(self): - jsi = JSInterpreter(''' - function x(dt) { return new Date(dt) - 0; } - ''') - self.assertEqual(jsi.call_function('x', 'Wednesday 31 December 1969 18:01:26 MDT'), 86000) + self._test('function f() { return new Date("Wednesday 31 December 1969 18:01:26 MDT") - 0; }', 86000) + jsi = JSInterpreter('function f(dt) { return new Date(dt) - 0; }') # date format m/d/y - self.assertEqual(jsi.call_function('x', '12/31/1969 18:01:26 MDT'), 86000) - + self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT']) # epoch 0 - self.assertEqual(jsi.call_function('x', '1 January 1970 00:00:00 UTC'), 0) + self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC']) + # undefined + self._test(jsi, NaN, args=[JS_Undefined]) + # y,m,d, ... - may fail with older dates lacking DST data + jsi = JSInterpreter( + 'function f() { return new Date(%s); }' + % ('2024, 5, 29, 2, 52, 12, 42',)) + self._test(jsi, ( + 1719625932042 # UK value + + ( + + 3600 # back to GMT + + (time.altzone if time.daylight # host's DST + else time.timezone) + ) * 1000)) + # no arg + self.assertAlmostEqual(JSInterpreter( + 'function f() { return new Date() - 0; }').call_function('f'), + time.time() * 1000, delta=100) + # Date.now() + self.assertAlmostEqual(JSInterpreter( + 'function f() { return Date.now(); }').call_function('f'), + time.time() * 1000, delta=100) + # Date.parse() + jsi = JSInterpreter('function f(dt) { return Date.parse(dt); }') + self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC']) + # Date.UTC() + jsi = JSInterpreter('function f() { return Date.UTC(%s); }' + % ('1970, 0, 1, 0, 0, 0, 0',)) + self._test(jsi, 0) def test_call(self): jsi = JSInterpreter(''' @@ -206,179 +265,135 @@ class TestJSInterpreter(unittest.TestCase): function y(a) { return x() + (a?a:0); } function z() { return y(3); } ''') - self.assertEqual(jsi.call_function('z'), 5) - self.assertEqual(jsi.call_function('y'), 2) + self._test(jsi, 5, func='z') + self._test(jsi, 2, func='y') def test_if(self): - jsi = JSInterpreter(''' - function x() { + self._test(''' + function f() { let a = 9; if (0==0) {a++} return a - }''') - self.assertEqual(jsi.call_function('x'), 10) + } + ''', 10) - jsi = JSInterpreter(''' - function x() { + self._test(''' + function f() { if (0==0) {return 10} - }''') - self.assertEqual(jsi.call_function('x'), 10) + } + ''', 10) - jsi = JSInterpreter(''' - function x() { + self._test(''' + function f() { if (0!=0) {return 1} else {return 10} - }''') - self.assertEqual(jsi.call_function('x'), 10) - - """ # Unsupported - jsi = JSInterpreter(''' - function x() { - if (0!=0) return 1; - else {return 10} - }''') - self.assertEqual(jsi.call_function('x'), 10) - """ + } + ''', 10) def test_elseif(self): - jsi = JSInterpreter(''' - function x() { - if (0!=0) {return 1} - else if (1==0) {return 2} - else {return 10} - }''') - self.assertEqual(jsi.call_function('x'), 10) - - """ # Unsupported - jsi = JSInterpreter(''' - function x() { - if (0!=0) return 1; - else if (1==0) {return 2} - else {return 10} - }''') - self.assertEqual(jsi.call_function('x'), 10) - # etc - """ + self._test(''' + function f() { + if (0!=0) {return 1} + else if (1==0) {return 2} + else {return 10} + } + ''', 10) def test_for_loop(self): - # function x() { a=0; for (i=0; i-10; i++) {a++} a } - jsi = JSInterpreter(''' - function x() { a=0; for (i=0; i-10; i++) {a++} return a } - ''') - self.assertEqual(jsi.call_function('x'), 10) + self._test('function f() { a=0; for (i=0; i-10; i++) {a++} return a }', 10) def test_while_loop(self): - # function x() { a=0; while (a<10) {a++} a } - jsi = JSInterpreter(''' - function x() { a=0; while (a<10) {a++} return a } - ''') - self.assertEqual(jsi.call_function('x'), 10) + self._test('function f() { a=0; while (a<10) {a++} return a }', 10) def test_switch(self): jsi = JSInterpreter(''' - function x(f) { switch(f){ - case 1:f+=1; - case 2:f+=2; - case 3:f+=3;break; - case 4:f+=4; - default:f=0; - } return f } + function f(x) { switch(x){ + case 1:x+=1; + case 2:x+=2; + case 3:x+=3;break; + case 4:x+=4; + default:x=0; + } return x } ''') - self.assertEqual(jsi.call_function('x', 1), 7) - self.assertEqual(jsi.call_function('x', 3), 6) - self.assertEqual(jsi.call_function('x', 5), 0) + self._test(jsi, 7, args=[1]) + self._test(jsi, 6, args=[3]) + self._test(jsi, 0, args=[5]) def test_switch_default(self): jsi = JSInterpreter(''' - function x(f) { switch(f){ - case 2: f+=2; - default: f-=1; - case 5: - case 6: f+=6; - case 0: break; - case 1: f+=1; - } return f } + function f(x) { switch(x){ + case 2: x+=2; + default: x-=1; + case 5: + case 6: x+=6; + case 0: break; + case 1: x+=1; + } return x } ''') - self.assertEqual(jsi.call_function('x', 1), 2) - self.assertEqual(jsi.call_function('x', 5), 11) - self.assertEqual(jsi.call_function('x', 9), 14) + self._test(jsi, 2, args=[1]) + self._test(jsi, 11, args=[5]) + self._test(jsi, 14, args=[9]) def test_try(self): - jsi = JSInterpreter(''' - function x() { try{return 10} catch(e){return 5} } - ''') - self.assertEqual(jsi.call_function('x'), 10) + self._test('function f() { try{return 10} catch(e){return 5} }', 10) def test_catch(self): - jsi = JSInterpreter(''' - function x() { try{throw 10} catch(e){return 5} } - ''') - self.assertEqual(jsi.call_function('x'), 5) + self._test('function f() { try{throw 10} catch(e){return 5} }', 5) def test_finally(self): - jsi = JSInterpreter(''' - function x() { try{throw 10} finally {return 42} } - ''') - self.assertEqual(jsi.call_function('x'), 42) - jsi = JSInterpreter(''' - function x() { try{throw 10} catch(e){return 5} finally {return 42} } - ''') - self.assertEqual(jsi.call_function('x'), 42) + self._test('function f() { try{throw 10} finally {return 42} }', 42) + self._test('function f() { try{throw 10} catch(e){return 5} finally {return 42} }', 42) def test_nested_try(self): - jsi = JSInterpreter(''' - function x() {try { - try{throw 10} finally {throw 42} + self._test(''' + function f() {try { + try{throw 10} finally {throw 42} } catch(e){return 5} } - ''') - self.assertEqual(jsi.call_function('x'), 5) + ''', 5) def test_for_loop_continue(self): - jsi = JSInterpreter(''' - function x() { a=0; for (i=0; i-10; i++) { continue; a++ } return a } - ''') - self.assertEqual(jsi.call_function('x'), 0) + self._test('function f() { a=0; for (i=0; i-10; i++) { continue; a++ } return a }', 0) def test_for_loop_break(self): - jsi = JSInterpreter(''' - function x() { a=0; for (i=0; i-10; i++) { break; a++ } return a } - ''') - self.assertEqual(jsi.call_function('x'), 0) + self._test('function f() { a=0; for (i=0; i-10; i++) { break; a++ } return a }', 0) def test_for_loop_try(self): - jsi = JSInterpreter(''' - function x() { - for (i=0; i-10; i++) { try { if (i == 5) throw i} catch {return 10} finally {break} }; - return 42 } - ''') - self.assertEqual(jsi.call_function('x'), 42) + self._test(''' + function f() { + for (i=0; i-10; i++) { try { if (i == 5) throw i} catch {return 10} finally {break} }; + return 42 } + ''', 42) def test_literal_list(self): - jsi = JSInterpreter(''' - function x() { return [1, 2, "asdf", [5, 6, 7]][3] } - ''') - self.assertEqual(jsi.call_function('x'), [5, 6, 7]) + self._test('function f() { return [1, 2, "asdf", [5, 6, 7]][3] }', [5, 6, 7]) def test_comma(self): - jsi = JSInterpreter(''' - function x() { a=5; a -= 1, a+=3; return a } - ''') - self.assertEqual(jsi.call_function('x'), 7) - jsi = JSInterpreter(''' - function x() { a=5; return (a -= 1, a+=3, a); } - ''') - self.assertEqual(jsi.call_function('x'), 7) + self._test('function f() { a=5; a -= 1, a+=3; return a }', 7) + self._test('function f() { a=5; return (a -= 1, a+=3, a); }', 7) + self._test('function f() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) }', 5) - jsi = JSInterpreter(''' - function x() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) } - ''') - self.assertEqual(jsi.call_function('x'), 5) + def test_not(self): + self._test('function f() { return ! undefined; }', True) + self._test('function f() { return !0; }', True) + self._test('function f() { return !!0; }', False) + self._test('function f() { return ![]; }', False) + self._test('function f() { return !0 !== false; }', True) def test_void(self): - jsi = JSInterpreter(''' - function x() { return void 42; } - ''') - self.assertEqual(jsi.call_function('x'), None) + self._test('function f() { return void 42; }', JS_Undefined) + + def test_typeof(self): + self._test('function f() { return typeof undefined; }', 'undefined') + self._test('function f() { return typeof NaN; }', 'number') + self._test('function f() { return typeof Infinity; }', 'number') + self._test('function f() { return typeof true; }', 'boolean') + self._test('function f() { return typeof null; }', 'object') + self._test('function f() { return typeof "a string"; }', 'string') + self._test('function f() { return typeof 42; }', 'number') + self._test('function f() { return typeof 42.42; }', 'number') + self._test('function f() { var g = function(){}; return typeof g; }', 'function') + self._test('function f() { return typeof {key: "value"}; }', 'object') + # not yet implemented: Symbol, BigInt def test_return_function(self): jsi = JSInterpreter(''' @@ -387,110 +402,71 @@ class TestJSInterpreter(unittest.TestCase): self.assertEqual(jsi.call_function('x')([]), 1) def test_null(self): - jsi = JSInterpreter(''' - function x() { return null; } - ''') - self.assertIs(jsi.call_function('x'), None) - - jsi = JSInterpreter(''' - function x() { return [null > 0, null < 0, null == 0, null === 0]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, False, False, False]) - - jsi = JSInterpreter(''' - function x() { return [null >= 0, null <= 0]; } - ''') - self.assertEqual(jsi.call_function('x'), [True, True]) + self._test('function f() { return null; }', None) + self._test('function f() { return [null > 0, null < 0, null == 0, null === 0]; }', + [False, False, False, False]) + self._test('function f() { return [null >= 0, null <= 0]; }', [True, True]) def test_undefined(self): - jsi = JSInterpreter(''' - function x() { return undefined === undefined; } - ''') - self.assertTrue(jsi.call_function('x')) - - jsi = JSInterpreter(''' - function x() { return undefined; } - ''') - self.assertIs(jsi.call_function('x'), JS_Undefined) - - jsi = JSInterpreter(''' - function x() { let v; return v; } - ''') - self.assertIs(jsi.call_function('x'), JS_Undefined) - - jsi = JSInterpreter(''' - function x() { return [undefined === undefined, undefined == undefined, undefined < undefined, undefined > undefined]; } - ''') - self.assertEqual(jsi.call_function('x'), [True, True, False, False]) - - jsi = JSInterpreter(''' - function x() { return [undefined === 0, undefined == 0, undefined < 0, undefined > 0]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, False, False, False]) - - jsi = JSInterpreter(''' - function x() { return [undefined >= 0, undefined <= 0]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, False]) - - jsi = JSInterpreter(''' - function x() { return [undefined > null, undefined < null, undefined == null, undefined === null]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, False, True, False]) - - jsi = JSInterpreter(''' - function x() { return [undefined === null, undefined == null, undefined < null, undefined > null]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, True, False, False]) - - jsi = JSInterpreter(''' - function x() { let v; return [42+v, v+42, v**42, 42**v, 0**v]; } + self._test('function f() { return undefined === undefined; }', True) + self._test('function f() { return undefined; }', JS_Undefined) + self._test('function f() { return undefined ?? 42; }', 42) + self._test('function f() { let v; return v; }', JS_Undefined) + self._test('function f() { let v; return v**0; }', 1) + self._test('function f() { let v; return [v>42, v<=42, v&&42, 42&&v]; }', + [False, False, JS_Undefined, JS_Undefined]) + + self._test(''' + function f() { return [ + undefined === undefined, + undefined == undefined, + undefined == null + ]; } + ''', [True] * 3) + self._test(''' + function f() { return [ + undefined < undefined, + undefined > undefined, + undefined === 0, + undefined == 0, + undefined < 0, + undefined > 0, + undefined >= 0, + undefined <= 0, + undefined > null, + undefined < null, + undefined === null + ]; } + ''', [False] * 11) + + jsi = JSInterpreter(''' + function x() { let v; return [42+v, v+42, v**42, 42**v, 0**v]; } ''') for y in jsi.call_function('x'): self.assertTrue(math.isnan(y)) - jsi = JSInterpreter(''' - function x() { let v; return v**0; } - ''') - self.assertEqual(jsi.call_function('x'), 1) - - jsi = JSInterpreter(''' - function x() { let v; return [v>42, v<=42, v&&42, 42&&v]; } - ''') - self.assertEqual(jsi.call_function('x'), [False, False, JS_Undefined, JS_Undefined]) - - jsi = JSInterpreter('function x(){return undefined ?? 42; }') - self.assertEqual(jsi.call_function('x'), 42) - def test_object(self): - jsi = JSInterpreter(''' - function x() { return {}; } - ''') - self.assertEqual(jsi.call_function('x'), {}) - - jsi = JSInterpreter(''' - function x() { let a = {m1: 42, m2: 0 }; return [a["m1"], a.m2]; } - ''') - self.assertEqual(jsi.call_function('x'), [42, 0]) - - jsi = JSInterpreter(''' - function x() { let a; return a?.qq; } - ''') - self.assertIs(jsi.call_function('x'), JS_Undefined) - - jsi = JSInterpreter(''' - function x() { let a = {m1: 42, m2: 0 }; return a?.qq; } - ''') - self.assertIs(jsi.call_function('x'), JS_Undefined) + self._test('function f() { return {}; }', {}) + self._test('function f() { let a = {m1: 42, m2: 0 }; return [a["m1"], a.m2]; }', [42, 0]) + self._test('function f() { let a; return a?.qq; }', JS_Undefined) + self._test('function f() { let a = {m1: 42, m2: 0 }; return a?.qq; }', JS_Undefined) + + def test_indexing(self): + self._test('function f() { return [1, 2, 3, 4][3]}', 4) + self._test('function f() { return [1, [2, [3, [4]]]][1][1][1][0]}', 4) + self._test('function f() { var o = {1: 2, 3: 4}; return o[3]}', 4) + self._test('function f() { var o = {1: 2, 3: 4}; return o["3"]}', 4) + self._test('function f() { return [1, [2, {3: [4]}]][1][1]["3"][0]}', 4) + self._test('function f() { return [1, 2, 3, 4].length}', 4) + self._test('function f() { var o = {1: 2, 3: 4}; return o.length}', JS_Undefined) + self._test('function f() { var o = {1: 2, 3: 4}; o["length"] = 42; return o.length}', 42) def test_regex(self): - jsi = JSInterpreter(''' - function x() { let a=/,,[/,913,/](,)}/; } - ''') - self.assertIs(jsi.call_function('x'), None) + self._test('function f() { let a=/,,[/,913,/](,)}/; }', None) + self._test('function f() { let a=/,,[/,913,/](,)}/; return a.source; }', ',,[/,913,/](,)}') jsi = JSInterpreter(''' - function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; } + function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; } ''') attrs = set(('findall', 'finditer', 'match', 'scanner', 'search', 'split', 'sub', 'subn')) @@ -500,92 +476,181 @@ class TestJSInterpreter(unittest.TestCase): self.assertSetEqual(set(dir(jsi.call_function('x'))) & attrs, attrs) jsi = JSInterpreter(''' - function x() { let a=/,,[/,913,/](,)}/i; return a; } + function x() { let a=/,,[/,913,/](,)}/i; return a; } ''') self.assertEqual(jsi.call_function('x').flags & ~re.U, re.I) - jsi = JSInterpreter(r''' - function x() { let a="data-name".replace("data-", ""); return a } - ''') - self.assertEqual(jsi.call_function('x'), 'name') + jsi = JSInterpreter(r'function f() { let a=/,][}",],()}(\[)/; return a; }') + self.assertEqual(jsi.call_function('f').pattern, r',][}",],()}(\[)') - jsi = JSInterpreter(r''' - function x() { let a="data-name".replace(new RegExp("^.+-"), ""); return a; } - ''') - self.assertEqual(jsi.call_function('x'), 'name') + jsi = JSInterpreter(r'function f() { let a=[/[)\\]/]; return a[0]; }') + self.assertEqual(jsi.call_function('f').pattern, r'[)\\]') - jsi = JSInterpreter(r''' - function x() { let a="data-name".replace(/^.+-/, ""); return a; } - ''') - self.assertEqual(jsi.call_function('x'), 'name') - - jsi = JSInterpreter(r''' - function x() { let a="data-name".replace(/a/g, "o"); return a; } - ''') - self.assertEqual(jsi.call_function('x'), 'doto-nome') - - jsi = JSInterpreter(r''' - function x() { let a="data-name".replaceAll("a", "o"); return a; } - ''') - self.assertEqual(jsi.call_function('x'), 'doto-nome') - - jsi = JSInterpreter(r''' - function x() { let a=[/[)\\]/]; return a[0]; } - ''') - self.assertEqual(jsi.call_function('x').pattern, r'[)\\]') - - """ # fails - jsi = JSInterpreter(r''' - function x() { let a=100; a/=/[0-9]+/.exec('divide by 20 today')[0]; } - ''') - self.assertEqual(jsi.call_function('x'), 5) - """ + def test_replace(self): + self._test('function f() { let a="data-name".replace("data-", ""); return a }', + 'name') + self._test('function f() { let a="data-name".replace(new RegExp("^.+-"), ""); return a; }', + 'name') + self._test('function f() { let a="data-name".replace(/^.+-/, ""); return a; }', + 'name') + self._test('function f() { let a="data-name".replace(/a/g, "o"); return a; }', + 'doto-nome') + self._test('function f() { let a="data-name".replaceAll("a", "o"); return a; }', + 'doto-nome') def test_char_code_at(self): - jsi = JSInterpreter('function x(i){return "test".charCodeAt(i)}') - self.assertEqual(jsi.call_function('x', 0), 116) - self.assertEqual(jsi.call_function('x', 1), 101) - self.assertEqual(jsi.call_function('x', 2), 115) - self.assertEqual(jsi.call_function('x', 3), 116) - self.assertEqual(jsi.call_function('x', 4), None) - self.assertEqual(jsi.call_function('x', 'not_a_number'), 116) + jsi = JSInterpreter('function f(i){return "test".charCodeAt(i)}') + self._test(jsi, 116, args=[0]) + self._test(jsi, 101, args=[1]) + self._test(jsi, 115, args=[2]) + self._test(jsi, 116, args=[3]) + self._test(jsi, None, args=[4]) + self._test(jsi, 116, args=['not_a_number']) def test_bitwise_operators_overflow(self): - jsi = JSInterpreter('function x(){return -524999584 << 5}') - self.assertEqual(jsi.call_function('x'), 379882496) - - jsi = JSInterpreter('function x(){return 1236566549 << 5}') - self.assertEqual(jsi.call_function('x'), 915423904) + self._test('function f(){return -524999584 << 5}', 379882496) + self._test('function f(){return 1236566549 << 5}', 915423904) - def test_bitwise_operators_madness(self): - jsi = JSInterpreter('function x(){return null << 5}') - self.assertEqual(jsi.call_function('x'), 0) - - jsi = JSInterpreter('function x(){return undefined >> 5}') - self.assertEqual(jsi.call_function('x'), 0) - - jsi = JSInterpreter('function x(){return 42 << NaN}') - self.assertEqual(jsi.call_function('x'), 42) - - jsi = JSInterpreter('function x(){return 42 << Infinity}') - self.assertEqual(jsi.call_function('x'), 42) + def test_negative(self): + self._test('function f(){return 2 * -2.0 ;}', -4) + self._test('function f(){return 2 - - -2 ;}', 0) + self._test('function f(){return 2 - - - -2 ;}', 4) + self._test('function f(){return 2 - + + - -2;}', 0) + self._test('function f(){return 2 + - + - -2;}', 0) def test_32066(self): - jsi = JSInterpreter("function x(){return Math.pow(3, 5) + new Date('1970-01-01T08:01:42.000+08:00') / 1000 * -239 - -24205;}") - self.assertEqual(jsi.call_function('x'), 70) - - def test_unary_operators(self): - jsi = JSInterpreter('function f(){return 2 - - - 2;}') - self.assertEqual(jsi.call_function('f'), 0) - # fails - # jsi = JSInterpreter('function f(){return 2 + - + - - 2;}') - # self.assertEqual(jsi.call_function('f'), 0) + self._test( + "function f(){return Math.pow(3, 5) + new Date('1970-01-01T08:01:42.000+08:00') / 1000 * -239 - -24205;}", + 70) - """ # fails so far + @unittest.skip('Not yet working') def test_packed(self): - jsi = JSInterpreter('''function x(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''') - self.assertEqual(jsi.call_function('x', '''h 7=g("1j");7.7h({7g:[{33:"w://7f-7e-7d-7c.v.7b/7a/79/78/77/76.74?t=73&s=2s&e=72&f=2t&71=70.0.0.1&6z=6y&6x=6w"}],6v:"w://32.v.u/6u.31",16:"r%",15:"r%",6t:"6s",6r:"",6q:"l",6p:"l",6o:"6n",6m:\'6l\',6k:"6j",9:[{33:"/2u?b=6i&n=50&6h=w://32.v.u/6g.31",6f:"6e"}],1y:{6d:1,6c:\'#6b\',6a:\'#69\',68:"67",66:30,65:r,},"64":{63:"%62 2m%m%61%5z%5y%5x.u%5w%5v%5u.2y%22 2k%m%1o%22 5t%m%1o%22 5s%m%1o%22 2j%m%5r%22 16%m%5q%22 15%m%5p%22 5o%2z%5n%5m%2z",5l:"w://v.u/d/1k/5k.2y",5j:[]},\'5i\':{"5h":"5g"},5f:"5e",5d:"w://v.u",5c:{},5b:l,1x:[0.25,0.50,0.75,1,1.25,1.5,2]});h 1m,1n,5a;h 59=0,58=0;h 7=g("1j");h 2x=0,57=0,56=0;$.55({54:{\'53-52\':\'2i-51\'}});7.j(\'4z\',6(x){c(5>0&&x.1l>=5&&1n!=1){1n=1;$(\'q.4y\').4x(\'4w\')}});7.j(\'13\',6(x){2x=x.1l});7.j(\'2g\',6(x){2w(x)});7.j(\'4v\',6(){$(\'q.2v\').4u()});6 2w(x){$(\'q.2v\').4t();c(1m)19;1m=1;17=0;c(4s.4r===l){17=1}$.4q(\'/2u?b=4p&2l=1k&4o=2t-4n-4m-2s-4l&4k=&4j=&4i=&17=\'+17,6(2r){$(\'#4h\').4g(2r)});$(\'.3-8-4f-4e:4d("4c")\').2h(6(e){2q();g().4b(0);g().4a(l)});6 2q(){h $14=$("<q />").2p({1l:"49",16:"r%",15:"r%",48:0,2n:0,2o:47,46:"45(10%, 10%, 10%, 0.4)","44-43":"42"});$("<41 />").2p({16:"60%",15:"60%",2o:40,"3z-2n":"3y"}).3x({\'2m\':\'/?b=3w&2l=1k\',\'2k\':\'0\',\'2j\':\'2i\'}).2f($14);$14.2h(6(){$(3v).3u();g().2g()});$14.2f($(\'#1j\'))}g().13(0);}6 3t(){h 9=7.1b(2e);2d.2c(9);c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==2e){2d.2c(\'!!=\'+i);7.1p(i)}}}}7.j(\'3s\',6(){g().1h("/2a/3r.29","3q 10 28",6(){g().13(g().27()+10)},"2b");$("q[26=2b]").23().21(\'.3-20-1z\');g().1h("/2a/3p.29","3o 10 28",6(){h 12=g().27()-10;c(12<0)12=0;g().13(12)},"24");$("q[26=24]").23().21(\'.3-20-1z\');});6 1i(){}7.j(\'3n\',6(){1i()});7.j(\'3m\',6(){1i()});7.j("k",6(y){h 9=7.1b();c(9.n<2)19;$(\'.3-8-3l-3k\').3j(6(){$(\'#3-8-a-k\').1e(\'3-8-a-z\');$(\'.3-a-k\').p(\'o-1f\',\'11\')});7.1h("/3i/3h.3g","3f 3e",6(){$(\'.3-1w\').3d(\'3-8-1v\');$(\'.3-8-1y, .3-8-1x\').p(\'o-1g\',\'11\');c($(\'.3-1w\').3c(\'3-8-1v\')){$(\'.3-a-k\').p(\'o-1g\',\'l\');$(\'.3-a-k\').p(\'o-1f\',\'l\');$(\'.3-8-a\').1e(\'3-8-a-z\');$(\'.3-8-a:1u\').3b(\'3-8-a-z\')}3a{$(\'.3-a-k\').p(\'o-1g\',\'11\');$(\'.3-a-k\').p(\'o-1f\',\'11\');$(\'.3-8-a:1u\').1e(\'3-8-a-z\')}},"39");7.j("38",6(y){1d.37(\'1c\',y.9[y.36].1a)});c(1d.1t(\'1c\')){35("1s(1d.1t(\'1c\'));",34)}});h 18;6 1s(1q){h 9=7.1b();c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==1q){c(i==18){19}18=i;7.1p(i)}}}}',36,270,'|||jw|||function|player|settings|tracks|submenu||if||||jwplayer|var||on|audioTracks|true|3D|length|aria|attr|div|100|||sx|filemoon|https||event|active||false|tt|seek|dd|height|width|adb|current_audio|return|name|getAudioTracks|default_audio|localStorage|removeClass|expanded|checked|addButton|callMeMaybe|vplayer|0fxcyc2ajhp1|position|vvplay|vvad|220|setCurrentAudioTrack|audio_name|for|audio_set|getItem|last|open|controls|playbackRates|captions|rewind|icon|insertAfter||detach|ff00||button|getPosition|sec|png|player8|ff11|log|console|track_name|appendTo|play|click|no|scrolling|frameborder|file_code|src|top|zIndex|css|showCCform|data|1662367683|383371|dl|video_ad|doPlay|prevt|mp4|3E||jpg|thumbs|file|300|setTimeout|currentTrack|setItem|audioTrackChanged|dualSound|else|addClass|hasClass|toggleClass|Track|Audio|svg|dualy|images|mousedown|buttons|topbar|playAttemptFailed|beforePlay|Rewind|fr|Forward|ff|ready|set_audio_track|remove|this|upload_srt|prop|50px|margin|1000001|iframe|center|align|text|rgba|background|1000000|left|absolute|pause|setCurrentCaptions|Upload|contains|item|content|html|fviews|referer|prem|embed|3e57249ef633e0d03bf76ceb8d8a4b65|216|83|hash|view|get|TokenZir|window|hide|show|complete|slow|fadeIn|video_ad_fadein|time||cache|Cache|Content|headers|ajaxSetup|v2done|tott|vastdone2|vastdone1|vvbefore|playbackRateControls|cast|aboutlink|FileMoon|abouttext|UHD|1870|qualityLabels|sites|GNOME_POWER|link|2Fiframe|3C|allowfullscreen|22360|22640|22no|marginheight|marginwidth|2FGNOME_POWER|2F0fxcyc2ajhp1|2Fe|2Ffilemoon|2F|3A||22https|3Ciframe|code|sharing|fontOpacity|backgroundOpacity|Tahoma|fontFamily|303030|backgroundColor|FFFFFF|color|userFontScale|thumbnails|kind|0fxcyc2ajhp10000|url|get_slides|start|startparam|none|preload|html5|primary|hlshtml|androidhls|duration|uniform|stretching|0fxcyc2ajhp1_xt|image|2048|sp|6871|asn|127|srv|43200|_g3XlBcu2lmD9oDexD2NLWSmah2Nu3XcDrl93m9PwXY|m3u8||master|0fxcyc2ajhp1_x|00076|01|hls2|to|s01|delivery|storage|moon|sources|setup'''.split('|'))) - """ + self._test( + '''function f(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''', + '''h 7=g("1j");7.7h({7g:[{33:"w://7f-7e-7d-7c.v.7b/7a/79/78/77/76.74?t=73&s=2s&e=72&f=2t&71=70.0.0.1&6z=6y&6x=6w"}],6v:"w://32.v.u/6u.31",16:"r%",15:"r%",6t:"6s",6r:"",6q:"l",6p:"l",6o:"6n",6m:\'6l\',6k:"6j",9:[{33:"/2u?b=6i&n=50&6h=w://32.v.u/6g.31",6f:"6e"}],1y:{6d:1,6c:\'#6b\',6a:\'#69\',68:"67",66:30,65:r,},"64":{63:"%62 2m%m%61%5z%5y%5x.u%5w%5v%5u.2y%22 2k%m%1o%22 5t%m%1o%22 5s%m%1o%22 2j%m%5r%22 16%m%5q%22 15%m%5p%22 5o%2z%5n%5m%2z",5l:"w://v.u/d/1k/5k.2y",5j:[]},\'5i\':{"5h":"5g"},5f:"5e",5d:"w://v.u",5c:{},5b:l,1x:[0.25,0.50,0.75,1,1.25,1.5,2]});h 1m,1n,5a;h 59=0,58=0;h 7=g("1j");h 2x=0,57=0,56=0;$.55({54:{\'53-52\':\'2i-51\'}});7.j(\'4z\',6(x){c(5>0&&x.1l>=5&&1n!=1){1n=1;$(\'q.4y\').4x(\'4w\')}});7.j(\'13\',6(x){2x=x.1l});7.j(\'2g\',6(x){2w(x)});7.j(\'4v\',6(){$(\'q.2v\').4u()});6 2w(x){$(\'q.2v\').4t();c(1m)19;1m=1;17=0;c(4s.4r===l){17=1}$.4q(\'/2u?b=4p&2l=1k&4o=2t-4n-4m-2s-4l&4k=&4j=&4i=&17=\'+17,6(2r){$(\'#4h\').4g(2r)});$(\'.3-8-4f-4e:4d("4c")\').2h(6(e){2q();g().4b(0);g().4a(l)});6 2q(){h $14=$("<q />").2p({1l:"49",16:"r%",15:"r%",48:0,2n:0,2o:47,46:"45(10%, 10%, 10%, 0.4)","44-43":"42"});$("<41 />").2p({16:"60%",15:"60%",2o:40,"3z-2n":"3y"}).3x({\'2m\':\'/?b=3w&2l=1k\',\'2k\':\'0\',\'2j\':\'2i\'}).2f($14);$14.2h(6(){$(3v).3u();g().2g()});$14.2f($(\'#1j\'))}g().13(0);}6 3t(){h 9=7.1b(2e);2d.2c(9);c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==2e){2d.2c(\'!!=\'+i);7.1p(i)}}}}7.j(\'3s\',6(){g().1h("/2a/3r.29","3q 10 28",6(){g().13(g().27()+10)},"2b");$("q[26=2b]").23().21(\'.3-20-1z\');g().1h("/2a/3p.29","3o 10 28",6(){h 12=g().27()-10;c(12<0)12=0;g().13(12)},"24");$("q[26=24]").23().21(\'.3-20-1z\');});6 1i(){}7.j(\'3n\',6(){1i()});7.j(\'3m\',6(){1i()});7.j("k",6(y){h 9=7.1b();c(9.n<2)19;$(\'.3-8-3l-3k\').3j(6(){$(\'#3-8-a-k\').1e(\'3-8-a-z\');$(\'.3-a-k\').p(\'o-1f\',\'11\')});7.1h("/3i/3h.3g","3f 3e",6(){$(\'.3-1w\').3d(\'3-8-1v\');$(\'.3-8-1y, .3-8-1x\').p(\'o-1g\',\'11\');c($(\'.3-1w\').3c(\'3-8-1v\')){$(\'.3-a-k\').p(\'o-1g\',\'l\');$(\'.3-a-k\').p(\'o-1f\',\'l\');$(\'.3-8-a\').1e(\'3-8-a-z\');$(\'.3-8-a:1u\').3b(\'3-8-a-z\')}3a{$(\'.3-a-k\').p(\'o-1g\',\'11\');$(\'.3-a-k\').p(\'o-1f\',\'11\');$(\'.3-8-a:1u\').1e(\'3-8-a-z\')}},"39");7.j("38",6(y){1d.37(\'1c\',y.9[y.36].1a)});c(1d.1t(\'1c\')){35("1s(1d.1t(\'1c\'));",34)}});h 18;6 1s(1q){h 9=7.1b();c(9.n>1){1r(i=0;i<9.n;i++){c(9[i].1a==1q){c(i==18){19}18=i;7.1p(i)}}}}',36,270,'|||jw|||function|player|settings|tracks|submenu||if||||jwplayer|var||on|audioTracks|true|3D|length|aria|attr|div|100|||sx|filemoon|https||event|active||false|tt|seek|dd|height|width|adb|current_audio|return|name|getAudioTracks|default_audio|localStorage|removeClass|expanded|checked|addButton|callMeMaybe|vplayer|0fxcyc2ajhp1|position|vvplay|vvad|220|setCurrentAudioTrack|audio_name|for|audio_set|getItem|last|open|controls|playbackRates|captions|rewind|icon|insertAfter||detach|ff00||button|getPosition|sec|png|player8|ff11|log|console|track_name|appendTo|play|click|no|scrolling|frameborder|file_code|src|top|zIndex|css|showCCform|data|1662367683|383371|dl|video_ad|doPlay|prevt|mp4|3E||jpg|thumbs|file|300|setTimeout|currentTrack|setItem|audioTrackChanged|dualSound|else|addClass|hasClass|toggleClass|Track|Audio|svg|dualy|images|mousedown|buttons|topbar|playAttemptFailed|beforePlay|Rewind|fr|Forward|ff|ready|set_audio_track|remove|this|upload_srt|prop|50px|margin|1000001|iframe|center|align|text|rgba|background|1000000|left|absolute|pause|setCurrentCaptions|Upload|contains|item|content|html|fviews|referer|prem|embed|3e57249ef633e0d03bf76ceb8d8a4b65|216|83|hash|view|get|TokenZir|window|hide|show|complete|slow|fadeIn|video_ad_fadein|time||cache|Cache|Content|headers|ajaxSetup|v2done|tott|vastdone2|vastdone1|vvbefore|playbackRateControls|cast|aboutlink|FileMoon|abouttext|UHD|1870|qualityLabels|sites|GNOME_POWER|link|2Fiframe|3C|allowfullscreen|22360|22640|22no|marginheight|marginwidth|2FGNOME_POWER|2F0fxcyc2ajhp1|2Fe|2Ffilemoon|2F|3A||22https|3Ciframe|code|sharing|fontOpacity|backgroundOpacity|Tahoma|fontFamily|303030|backgroundColor|FFFFFF|color|userFontScale|thumbnails|kind|0fxcyc2ajhp10000|url|get_slides|start|startparam|none|preload|html5|primary|hlshtml|androidhls|duration|uniform|stretching|0fxcyc2ajhp1_xt|image|2048|sp|6871|asn|127|srv|43200|_g3XlBcu2lmD9oDexD2NLWSmah2Nu3XcDrl93m9PwXY|m3u8||master|0fxcyc2ajhp1_x|00076|01|hls2|to|s01|delivery|storage|moon|sources|setup'''.split('|')) + + def test_join(self): + test_input = list('test') + tests = [ + 'function f(a, b){return a.join(b)}', + 'function f(a, b){return Array.prototype.join.call(a, b)}', + 'function f(a, b){return Array.prototype.join.apply(a, [b])}', + ] + for test in tests: + jsi = JSInterpreter(test) + self._test(jsi, 'test', args=[test_input, '']) + self._test(jsi, 't-e-s-t', args=[test_input, '-']) + self._test(jsi, '', args=[[], '-']) + + self._test('function f(){return ' + '[1, 1.0, "abc", {a: 1}, null, undefined, Infinity, NaN].join()}', + '1,1,abc,[object Object],,,Infinity,NaN') + self._test('function f(){return ' + '[1, 1.0, "abc", {a: 1}, null, undefined, Infinity, NaN].join("~")}', + '1~1~abc~[object Object]~~~Infinity~NaN') + + def test_split(self): + test_result = list('test') + tests = [ + 'function f(a, b){return a.split(b)}', + 'function f(a, b){return a["split"](b)}', + 'function f(a, b){let x = ["split"]; return a[x[0]](b)}', + 'function f(a, b){return String.prototype.split.call(a, b)}', + 'function f(a, b){return String.prototype.split.apply(a, [b])}', + ] + for test in tests: + jsi = JSInterpreter(test) + self._test(jsi, test_result, args=['test', '']) + self._test(jsi, test_result, args=['t-e-s-t', '-']) + self._test(jsi, [''], args=['', '-']) + self._test(jsi, [], args=['', '']) + # RegExp split + self._test('function f(){return "test".split(/(?:)/)}', + ['t', 'e', 's', 't']) + self._test('function f(){return "t-e-s-t".split(/[es-]+/)}', + ['t', 't']) + # from MDN: surrogate pairs aren't handled: case 1 fails + # self._test('function f(){return "😄😄".split(/(?:)/)}', + # ['\ud83d', '\ude04', '\ud83d', '\ude04']) + # case 2 beats Py3.2: it gets the case 1 result + if sys.version_info >= (2, 6) and not ((3, 0) <= sys.version_info < (3, 3)): + self._test('function f(){return "😄😄".split(/(?:)/u)}', + ['😄', '😄']) + + def test_slice(self): + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', []) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', []) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', []) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7]) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', []) + self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7]) + self._test('function f(){return "012345678".slice()}', '012345678') + self._test('function f(){return "012345678".slice(0)}', '012345678') + self._test('function f(){return "012345678".slice(5)}', '5678') + self._test('function f(){return "012345678".slice(99)}', '') + self._test('function f(){return "012345678".slice(-2)}', '78') + self._test('function f(){return "012345678".slice(-99)}', '012345678') + self._test('function f(){return "012345678".slice(0, 0)}', '') + self._test('function f(){return "012345678".slice(1, 0)}', '') + self._test('function f(){return "012345678".slice(0, 1)}', '0') + self._test('function f(){return "012345678".slice(3, 6)}', '345') + self._test('function f(){return "012345678".slice(1, -1)}', '1234567') + self._test('function f(){return "012345678".slice(-1, 1)}', '') + self._test('function f(){return "012345678".slice(-3, -1)}', '67') + + def test_splice(self): + self._test('function f(){var T = ["0", "1", "2"]; T["splice"](2, 1, "0")[0]; return T }', ['0', '1', '0']) + + def test_pop(self): + # pop + self._test('function f(){var a = [0, 1, 2, 3, 4, 5, 6, 7, 8]; return [a.pop(), a]}', + [8, [0, 1, 2, 3, 4, 5, 6, 7]]) + self._test('function f(){return [].pop()}', JS_Undefined) + # push + self._test('function f(){var a = [0, 1, 2]; return [a.push(3, 4), a]}', + [5, [0, 1, 2, 3, 4]]) + self._test('function f(){var a = [0, 1, 2]; return [a.push(), a]}', + [3, [0, 1, 2]]) + + def test_shift(self): + # shift + self._test('function f(){var a = [0, 1, 2, 3, 4, 5, 6, 7, 8]; return [a.shift(), a]}', + [0, [1, 2, 3, 4, 5, 6, 7, 8]]) + self._test('function f(){return [].shift()}', JS_Undefined) + # unshift + self._test('function f(){var a = [0, 1, 2]; return [a.unshift(3, 4), a]}', + [5, [3, 4, 0, 1, 2]]) + self._test('function f(){var a = [0, 1, 2]; return [a.unshift(), a]}', + [3, [0, 1, 2]]) + + def test_forEach(self): + self._test('function f(){var ret = []; var l = [4, 2]; ' + 'var log = function(e,i,a){ret.push([e,i,a]);}; ' + 'l.forEach(log); ' + 'return [ret.length, ret[0][0], ret[1][1], ret[0][2]]}', + [2, 4, 1, [4, 2]]) + self._test('function f(){var ret = []; var l = [4, 2]; ' + 'var log = function(e,i,a){this.push([e,i,a]);}; ' + 'l.forEach(log, ret); ' + 'return [ret.length, ret[0][0], ret[1][1], ret[0][2]]}', + [2, 4, 1, [4, 2]]) + + def test_extract_function(self): + jsi = JSInterpreter('function a(b) { return b + 1; }') + func = jsi.extract_function('a') + self.assertEqual(func([2]), 3) + + def test_extract_function_with_global_stack(self): + jsi = JSInterpreter('function c(d) { return d + e + f + g; }') + func = jsi.extract_function('c', {'e': 10}, {'f': 100, 'g': 1000}) + self.assertEqual(func([1]), 1111) if __name__ == '__main__': diff --git a/test/test_utils.py b/test/test_utils.py index de7fe80b8..2947cce7e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -14,9 +14,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import io import itertools import json +import types import xml.etree.ElementTree from youtube_dl.utils import ( + _UnsafeExtensionError, age_restricted, args_to_str, base_url, @@ -270,6 +272,27 @@ class TestUtil(unittest.TestCase): expand_path('~/%s' % env('YOUTUBE_DL_EXPATH_PATH')), '%s/expanded' % compat_getenv('HOME')) + _uncommon_extensions = [ + ('exe', 'abc.exe.ext'), + ('de', 'abc.de.ext'), + ('../.mp4', None), + ('..\\.mp4', None), + ] + + def assertUnsafeExtension(self, ext=None): + assert_raises = self.assertRaises(_UnsafeExtensionError) + assert_raises.ext = ext + orig_exit = assert_raises.__exit__ + + def my_exit(self_, exc_type, exc_val, exc_tb): + did_raise = orig_exit(exc_type, exc_val, exc_tb) + if did_raise and assert_raises.ext is not None: + self.assertEqual(assert_raises.ext, assert_raises.exception.extension, 'Unsafe extension not as unexpected') + return did_raise + + assert_raises.__exit__ = types.MethodType(my_exit, assert_raises) + return assert_raises + def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext') @@ -278,6 +301,19 @@ class TestUtil(unittest.TestCase): self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext') + # Test uncommon extensions + self.assertEqual(prepend_extension('abc.ext', 'bin'), 'abc.bin.ext') + for ext, result in self._uncommon_extensions: + with self.assertUnsafeExtension(ext): + prepend_extension('abc', ext) + if result: + self.assertEqual(prepend_extension('abc.ext', ext, 'ext'), result) + else: + with self.assertUnsafeExtension(ext): + prepend_extension('abc.ext', ext, 'ext') + with self.assertUnsafeExtension(ext): + prepend_extension('abc.unexpected_ext', ext, 'ext') + def test_replace_extension(self): self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp') self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp') @@ -286,6 +322,16 @@ class TestUtil(unittest.TestCase): self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') + # Test uncommon extensions + self.assertEqual(replace_extension('abc.ext', 'bin'), 'abc.unknown_video') + for ext, _ in self._uncommon_extensions: + with self.assertUnsafeExtension(ext): + replace_extension('abc', ext) + with self.assertUnsafeExtension(ext): + replace_extension('abc.ext', ext, 'ext') + with self.assertUnsafeExtension(ext): + replace_extension('abc.unexpected_ext', ext, 'ext') + def test_subtitles_filename(self): self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt') self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt') diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py index f45dfec7c..98221b9c2 100644 --- a/test/test_youtube_signature.py +++ b/test/test_youtube_signature.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# coding: utf-8 from __future__ import unicode_literals @@ -12,6 +13,7 @@ import re import string from youtube_dl.compat import ( + compat_contextlib_suppress, compat_open as open, compat_str, compat_urlretrieve, @@ -50,23 +52,93 @@ _SIG_TESTS = [ ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js', 84, - '123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>' + '123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js', 83, - '123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F' + '123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js', '4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288', - '82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B' + '82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B', ), ( 'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', '312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12', '112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3', - ) + ), + ( + 'https://www.youtube.com/s/player/6ed0d907/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'AOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL2QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', + ), + ( + 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'MyOSJXtKI3m-uME_jv7-pT12gOFC02RFkGoqWpzE0Cs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + ), + ( + 'https://www.youtube.com/s/player/2f1832d2/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q', + ), + ( + 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'AAOAOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7vgpDL0QwbdV06sCIEzpWqMGkFR20CFOS21Tp-7vj_EMu-m37KtXJoOy1', + ), + ( + 'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + ), + ( + 'https://www.youtube.com/s/player/363db69b/player_ias_tce.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + ), + ( + 'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', + ), + ( + 'https://www.youtube.com/s/player/4fcd6e4a/player_ias_tce.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0', + ), + ( + 'https://www.youtube.com/s/player/20830619/player_ias.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', + ), + ( + 'https://www.youtube.com/s/player/20830619/player_ias_tce.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', + ), + ( + 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', + ), + ( + 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw', + ), + ( + 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0', + ), + ( + 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js', + '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', + 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0', + ), ] _NSIG_TESTS = [ @@ -136,13 +208,17 @@ _NSIG_TESTS = [ ), ( 'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js', - '-Txvy6bT5R6LqgnQNx', 'dcklJCnRUHbgSg', + 'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA', ), ( 'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js', 'B2j7f_UPT4rfje85Lu_e', 'm5DmNymaGQ5RdQ', ), ( + 'https://www.youtube.com/s/player/7a062b77/player_ias.vflset/en_US/base.js', + 'NRcE3y3mVtm_cV-W', 'VbsCYUATvqlt5w', + ), + ( 'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js', 'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A', ), @@ -152,12 +228,128 @@ _NSIG_TESTS = [ ), ( 'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js', - 'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ', + 'aCi3iElgd2kq0bxVbQ', 'QX1y8jGb2IbZ0w', + ), + ( + 'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js', + '1wWCVpRR96eAmMI87L', 'KSkWAVv1ZQxC3A', ), ( 'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js', '_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ', ), + ( + 'https://www.youtube.com/s/player/590f65a6/player_ias.vflset/en_US/base.js', + '1tm7-g_A9zsI8_Lay_', 'xI4Vem4Put_rOg', + ), + ( + 'https://www.youtube.com/s/player/b22ef6e7/player_ias.vflset/en_US/base.js', + 'b6HcntHGkvBLk_FRf', 'kNPW6A7FyP2l8A', + ), + ( + 'https://www.youtube.com/s/player/3400486c/player_ias.vflset/en_US/base.js', + 'lL46g3XifCKUZn1Xfw', 'z767lhet6V2Skl', + ), + ( + 'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js', + '7X-he4jjvMx7BCX', 'sViSydX8IHtdWA', + ), + ( + 'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js', + '-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw', + ), + ( + 'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js', + 'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw', + ), + ( + 'https://www.youtube.com/s/player/3bb1f723/player_ias.vflset/en_US/base.js', + 'gK15nzVyaXE9RsMP3z', 'ZFFWFLPWx9DEgQ', + ), + ( + 'https://www.youtube.com/s/player/f8f53e1a/player_ias.vflset/en_US/base.js', + 'VTQOUOv0mCIeJ7i8kZB', 'kcfD8wy0sNLyNQ', + ), + ( + 'https://www.youtube.com/s/player/2f1832d2/player_ias.vflset/en_US/base.js', + 'YWt1qdbe8SAfkoPHW5d', 'RrRjWQOJmBiP', + ), + ( + 'https://www.youtube.com/s/player/9c6dfc4a/player_ias.vflset/en_US/base.js', + 'jbu7ylIosQHyJyJV', 'uwI0ESiynAmhNg', + ), + ( + 'https://www.youtube.com/s/player/f6e09c70/player_ias.vflset/en_US/base.js', + 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', + ), + ( + 'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js', + 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', + ), + ( + 'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', + 'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ', + ), + ( + 'https://www.youtube.com/s/player/d50f54ef/player_ias_tce.vflset/en_US/base.js', + 'Ha7507LzRmH3Utygtj', 'XFTb2HoeOE5MHg', + ), + ( + 'https://www.youtube.com/s/player/074a8365/player_ias_tce.vflset/en_US/base.js', + 'Ha7507LzRmH3Utygtj', 'ufTsrE0IVYrkl8v', + ), + ( + 'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js', + 'N5uAlLqm0eg1GyHO', 'dCBQOejdq5s-ww', + ), + ( + 'https://www.youtube.com/s/player/69f581a5/tv-player-ias.vflset/tv-player-ias.js', + '-qIP447rVlTTwaZjY', 'KNcGOksBAvwqQg', + ), + ( + 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', + 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', + ), + ( + 'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js', + 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', + ), + ( + 'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js', + 'eWYu5d5YeY_4LyEDc', 'XJQqf-N7Xra3gg', + ), + ( + 'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js', + 'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A', + ), + ( + 'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js', + 'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A', + ), + ( + 'https://www.youtube.com/s/player/20830619/tv-player-ias.vflset/tv-player-ias.js', + 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', + ), + ( + 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js', + 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', + ), + ( + 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js', + 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4', + ), + ( + 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js', + 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE', + ), + ( + 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js', + 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE', + ), + ( + 'https://www.youtube.com/s/player/aa3fc80b/player_ias.vflset/en_US/base.js', + '0qY9dal2uzOnOGwa-48hha', 'VSh1KDfQMk-eag', + ), ] @@ -170,6 +362,8 @@ class TestPlayerInfo(unittest.TestCase): ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'), + ('https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'e7567ecf'), + ('https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '643afba4'), # obsolete ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'), ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'), @@ -179,8 +373,9 @@ class TestPlayerInfo(unittest.TestCase): ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'), ) + ie = YoutubeIE(FakeYDL({'cachedir': False})) for player_url, expected_player_id in PLAYER_URLS: - player_id = YoutubeIE._extract_player_info(player_url) + player_id = ie._extract_player_info(player_url) self.assertEqual(player_id, expected_player_id) @@ -192,21 +387,19 @@ class TestSignature(unittest.TestCase): os.mkdir(self.TESTDATA_DIR) def tearDown(self): - try: + with compat_contextlib_suppress(OSError): for f in os.listdir(self.TESTDATA_DIR): os.remove(f) - except OSError: - pass def t_factory(name, sig_func, url_pattern): def make_tfunc(url, sig_input, expected_sig): m = url_pattern.match(url) - assert m, '%r should follow URL format' % url - test_id = m.group('id') + assert m, '{0!r} should follow URL format'.format(url) + test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id')) def test_func(self): - basename = 'player-{0}-{1}.js'.format(name, test_id) + basename = 'player-{0}.js'.format(test_id) fn = os.path.join(self.TESTDATA_DIR, basename) if not os.path.exists(fn): @@ -221,7 +414,7 @@ def t_factory(name, sig_func, url_pattern): def signature(jscode, sig_input): - func = YoutubeIE(FakeYDL())._parse_sig_js(jscode) + func = YoutubeIE(FakeYDL({'cachedir': False}))._parse_sig_js(jscode) src_sig = ( compat_str(string.printable[:sig_input]) if isinstance(sig_input, int) else sig_input) @@ -229,17 +422,23 @@ def signature(jscode, sig_input): def n_sig(jscode, sig_input): - funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode) - return JSInterpreter(jscode).call_function(funcname, sig_input) + ie = YoutubeIE(FakeYDL({'cachedir': False})) + jsi = JSInterpreter(jscode) + jsi, _, func_code = ie._extract_n_function_code_jsi(sig_input, jsi) + return ie._extract_n_function_from_code(jsi, func_code)(sig_input) make_sig_test = t_factory( - 'signature', signature, re.compile(r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$')) + 'signature', signature, + re.compile(r'''(?x) + .+/(?P<h5>html5)?player(?(h5)(?:-en_US)?-|/)(?P<id>[a-zA-Z0-9/._-]+) + (?(h5)/(?:watch_as3|html5player))?\.js$ + ''')) for test_spec in _SIG_TESTS: make_sig_test(*test_spec) make_nsig_test = t_factory( - 'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$')) + 'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$')) for test_spec in _NSIG_TESTS: make_nsig_test(*test_spec) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 6f2aba5ac..8367b6e53 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -7,6 +7,7 @@ import collections import copy import datetime import errno +import functools import io import itertools import json @@ -53,6 +54,7 @@ from .compat import ( compat_urllib_request_DataHandler, ) from .utils import ( + _UnsafeExtensionError, age_restricted, args_to_str, bug_reports_message, @@ -129,6 +131,20 @@ if compat_os_name == 'nt': import ctypes +def _catch_unsafe_file_extension(func): + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + try: + return func(self, *args, **kwargs) + except _UnsafeExtensionError as error: + self.report_error( + '{0} found; to avoid damaging your system, this value is disallowed.' + ' If you believe this is an error{1}'.format( + error_to_compat_str(error), bug_reports_message(','))) + + return wrapper + + class YoutubeDL(object): """YoutubeDL class. @@ -524,10 +540,14 @@ class YoutubeDL(object): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) - def _write_string(self, s, out=None): + def _write_string(self, s, out=None, only_once=False, _cache=set()): + if only_once and s in _cache: + return write_string(s, out=out, encoding=self.params.get('encoding')) + if only_once: + _cache.add(s) - def to_stdout(self, message, skip_eol=False, check_quiet=False): + def to_stdout(self, message, skip_eol=False, check_quiet=False, only_once=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) @@ -536,9 +556,9 @@ class YoutubeDL(object): terminator = ['\n', ''][skip_eol] output = message + terminator - self._write_string(output, self._screen_file) + self._write_string(output, self._screen_file, only_once=only_once) - def to_stderr(self, message): + def to_stderr(self, message, only_once=False): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): @@ -546,7 +566,7 @@ class YoutubeDL(object): else: message = self._bidi_workaround(message) output = message + '\n' - self._write_string(output, self._err_file) + self._write_string(output, self._err_file, only_once=only_once) def to_console_title(self, message): if not self.params.get('consoletitle', False): @@ -625,18 +645,11 @@ class YoutubeDL(object): raise DownloadError(message, exc_info) self._download_retcode = 1 - def report_warning(self, message, only_once=False, _cache={}): + def report_warning(self, message, only_once=False): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' - if only_once: - m_hash = hash((self, message)) - m_cnt = _cache.setdefault(m_hash, 0) - _cache[m_hash] = m_cnt + 1 - if m_cnt > 0: - return - if self.params.get('logger') is not None: self.params['logger'].warning(message) else: @@ -647,7 +660,7 @@ class YoutubeDL(object): else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) - self.to_stderr(warning_message) + self.to_stderr(warning_message, only_once=only_once) def report_error(self, message, *args, **kwargs): ''' @@ -661,6 +674,16 @@ class YoutubeDL(object): kwargs['message'] = '%s %s' % (_msg_header, message) self.trouble(*args, **kwargs) + def write_debug(self, message, only_once=False): + '''Log debug message or Print message to stderr''' + if not self.params.get('verbose', False): + return + message = '[debug] {0}'.format(message) + if self.params.get('logger'): + self.params['logger'].debug(message) + else: + self.to_stderr(message, only_once) + def report_unscoped_cookies(self, *args, **kwargs): # message=None, tb=False, is_error=False if len(args) <= 2: @@ -1039,8 +1062,8 @@ class YoutubeDL(object): elif result_type in ('playlist', 'multi_video'): # Protect from infinite recursion due to recursively nested playlists # (see https://github.com/ytdl-org/youtube-dl/issues/27833) - webpage_url = ie_result['webpage_url'] - if webpage_url in self._playlist_urls: + webpage_url = ie_result.get('webpage_url') # not all pl/mv have this + if webpage_url and webpage_url in self._playlist_urls: self.to_screen( '[download] Skipping already downloaded playlist: %s' % ie_result.get('title') or ie_result.get('id')) @@ -1048,6 +1071,10 @@ class YoutubeDL(object): self._playlist_level += 1 self._playlist_urls.add(webpage_url) + new_result = dict((k, v) for k, v in extra_info.items() if k not in ie_result) + if new_result: + new_result.update(ie_result) + ie_result = new_result try: return self.__process_playlist(ie_result, download) finally: @@ -1593,6 +1620,28 @@ class YoutubeDL(object): self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') + def _fill_common_fields(self, info_dict, final=True): + + for ts_key, date_key in ( + ('timestamp', 'upload_date'), + ('release_timestamp', 'release_date'), + ): + if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None: + # Working around out-of-range timestamp values (e.g. negative ones on Windows, + # see http://bugs.python.org/issue1646728) + try: + upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key]) + info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d')) + except (ValueError, OverflowError, OSError): + pass + + # Auto generate title fields corresponding to the *_number fields when missing + # in order to always have clean titles. This is very common for TV series. + if final: + for field in ('chapter', 'season', 'episode'): + if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): + info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) + def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' @@ -1660,24 +1709,7 @@ class YoutubeDL(object): if 'display_id' not in info_dict and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] - for ts_key, date_key in ( - ('timestamp', 'upload_date'), - ('release_timestamp', 'release_date'), - ): - if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None: - # Working around out-of-range timestamp values (e.g. negative ones on Windows, - # see http://bugs.python.org/issue1646728) - try: - upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key]) - info_dict[date_key] = compat_str(upload_date.strftime('%Y%m%d')) - except (ValueError, OverflowError, OSError): - pass - - # Auto generate title fields corresponding to the *_number fields when missing - # in order to always have clean titles. This is very common for TV series. - for field in ('chapter', 'season', 'episode'): - if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): - info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) + self._fill_common_fields(info_dict) for cc_kind in ('subtitles', 'automatic_captions'): cc = info_dict.get(cc_kind) @@ -1916,6 +1948,7 @@ class YoutubeDL(object): if self.params.get('forcejson', False): self.to_stdout(json.dumps(self.sanitize_info(info_dict))) + @_catch_unsafe_file_extension def process_info(self, info_dict): """Process a single resolved IE result.""" @@ -2088,18 +2121,26 @@ class YoutubeDL(object): # TODO: Check acodec/vcodec return False - filename_real_ext = os.path.splitext(filename)[1][1:] - filename_wo_ext = ( - os.path.splitext(filename)[0] - if filename_real_ext == info_dict['ext'] - else filename) + exts = [info_dict['ext']] requested_formats = info_dict['requested_formats'] if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats): info_dict['ext'] = 'mkv' self.report_warning( 'Requested formats are incompatible for merge and will be merged into mkv.') + exts.append(info_dict['ext']) + # Ensure filename always has a correct extension for successful merge - filename = '%s.%s' % (filename_wo_ext, info_dict['ext']) + def correct_ext(filename, ext=exts[1]): + if filename == '-': + return filename + f_name, f_real_ext = os.path.splitext(filename) + f_real_ext = f_real_ext[1:] + filename_wo_ext = f_name if f_real_ext in exts else filename + if ext is None: + ext = f_real_ext or None + return join_nonempty(filename_wo_ext, ext, delim='.') + + filename = correct_ext(filename) if os.path.exists(encodeFilename(filename)): self.to_screen( '[download] %s has already been downloaded and ' @@ -2109,8 +2150,9 @@ class YoutubeDL(object): new_info = dict(info_dict) new_info.update(f) fname = prepend_extension( - self.prepare_filename(new_info), - 'f%s' % f['format_id'], new_info['ext']) + correct_ext( + self.prepare_filename(new_info), new_info['ext']), + 'f%s' % (f['format_id'],), new_info['ext']) if not ensure_dir_exists(fname): return downloaded.append(fname) @@ -2479,7 +2521,7 @@ class YoutubeDL(object): self.get_encoding())) write_string(encoding_str, encoding=None) - writeln_debug = lambda *s: self._write_string('[debug] %s\n' % (''.join(s), )) + writeln_debug = lambda *s: self.write_debug(''.join(s)) writeln_debug('youtube-dl version ', __version__) if _LAZY_LOADER: writeln_debug('Lazy loading extractors enabled') diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index cc8285eba..3c1272e7b 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -18,9 +18,10 @@ from .compat import ( compat_getpass, compat_register_utf8, compat_shlex_split, - workaround_optparse_bug9161, + _workaround_optparse_bug9161, ) from .utils import ( + _UnsafeExtensionError, DateRange, decodeOption, DEFAULT_OUTTMPL, @@ -49,7 +50,7 @@ def _real_main(argv=None): # Compatibility fix for Windows compat_register_utf8() - workaround_optparse_bug9161() + _workaround_optparse_bug9161() setproctitle('youtube-dl') @@ -173,6 +174,9 @@ def _real_main(argv=None): if opts.ap_mso and opts.ap_mso not in MSO_INFO: parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers') + if opts.no_check_extensions: + _UnsafeExtensionError.lenient = True + def parse_retries(retries): if retries in ('inf', 'infinite'): parsed_retries = float('inf') diff --git a/youtube_dl/cache.py b/youtube_dl/cache.py index 54123da0e..eb0a729c2 100644 --- a/youtube_dl/cache.py +++ b/youtube_dl/cache.py @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import unicode_literals import errno @@ -10,12 +11,14 @@ import traceback from .compat import ( compat_getenv, compat_open as open, + compat_os_makedirs, ) from .utils import ( error_to_compat_str, + escape_rfc3986, expand_path, is_outdated_version, - try_get, + traverse_obj, write_json_file, ) from .version import __version__ @@ -30,23 +33,35 @@ class Cache(object): def __init__(self, ydl): self._ydl = ydl + def _write_debug(self, *args, **kwargs): + self._ydl.write_debug(*args, **kwargs) + + def _report_warning(self, *args, **kwargs): + self._ydl.report_warning(*args, **kwargs) + + def _to_screen(self, *args, **kwargs): + self._ydl.to_screen(*args, **kwargs) + + def _get_param(self, k, default=None): + return self._ydl.params.get(k, default) + def _get_root_dir(self): - res = self._ydl.params.get('cachedir') + res = self._get_param('cachedir') if res is None: cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') res = os.path.join(cache_root, self._YTDL_DIR) return expand_path(res) def _get_cache_fn(self, section, key, dtype): - assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \ + assert re.match(r'^[\w.-]+$', section), \ 'invalid section %r' % section - assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key + key = escape_rfc3986(key, safe='').replace('%', ',') # encode non-ascii characters return os.path.join( self._get_root_dir(), section, '%s.%s' % (key, dtype)) @property def enabled(self): - return self._ydl.params.get('cachedir') is not False + return self._get_param('cachedir') is not False def store(self, section, key, data, dtype='json'): assert dtype in ('json',) @@ -56,61 +71,75 @@ class Cache(object): fn = self._get_cache_fn(section, key, dtype) try: - try: - os.makedirs(os.path.dirname(fn)) - except OSError as ose: - if ose.errno != errno.EEXIST: - raise + compat_os_makedirs(os.path.dirname(fn), exist_ok=True) + self._write_debug('Saving {section}.{key} to cache'.format(section=section, key=key)) write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn) except Exception: tb = traceback.format_exc() - self._ydl.report_warning( - 'Writing cache to %r failed: %s' % (fn, tb)) + self._report_warning('Writing cache to {fn!r} failed: {tb}'.format(fn=fn, tb=tb)) + + def clear(self, section, key, dtype='json'): + + if not self.enabled: + return + + fn = self._get_cache_fn(section, key, dtype) + self._write_debug('Clearing {section}.{key} from cache'.format(section=section, key=key)) + try: + os.remove(fn) + except Exception as e: + if getattr(e, 'errno') == errno.ENOENT: + # file not found + return + tb = traceback.format_exc() + self._report_warning('Clearing cache from {fn!r} failed: {tb}'.format(fn=fn, tb=tb)) def _validate(self, data, min_ver): - version = try_get(data, lambda x: x[self._VERSION_KEY]) + version = traverse_obj(data, self._VERSION_KEY) if not version: # Backward compatibility data, version = {'data': data}, self._DEFAULT_VERSION if not is_outdated_version(version, min_ver or '0', assume_new=False): return data['data'] - self._ydl.to_screen( - 'Discarding old cache from version {version} (needs {min_ver})'.format(**locals())) + self._write_debug('Discarding old cache from version {version} (needs {min_ver})'.format(version=version, min_ver=min_ver)) - def load(self, section, key, dtype='json', default=None, min_ver=None): + def load(self, section, key, dtype='json', default=None, **kw_min_ver): assert dtype in ('json',) + min_ver = kw_min_ver.get('min_ver') if not self.enabled: return default cache_fn = self._get_cache_fn(section, key, dtype) try: + with open(cache_fn, encoding='utf-8') as cachef: + self._write_debug('Loading {section}.{key} from cache'.format(section=section, key=key), only_once=True) + return self._validate(json.load(cachef), min_ver) + except (ValueError, KeyError): try: - with open(cache_fn, 'r', encoding='utf-8') as cachef: - return self._validate(json.load(cachef), min_ver) - except ValueError: - try: - file_size = os.path.getsize(cache_fn) - except (OSError, IOError) as oe: - file_size = error_to_compat_str(oe) - self._ydl.report_warning( - 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) - except IOError: - pass # No cache available + file_size = 'size: %d' % os.path.getsize(cache_fn) + except (OSError, IOError) as oe: + file_size = error_to_compat_str(oe) + self._report_warning('Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) + except Exception as e: + if getattr(e, 'errno') == errno.ENOENT: + # no cache available + return + self._report_warning('Cache retrieval from %s failed' % (cache_fn,)) return default def remove(self): if not self.enabled: - self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)') + self._to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)') return cachedir = self._get_root_dir() if not any((term in cachedir) for term in ('cache', 'tmp')): - raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir) + raise Exception('Not removing directory %s - this does not look like a cache dir' % (cachedir,)) - self._ydl.to_screen( - 'Removing cache dir %s .' % cachedir, skip_eol=True) + self._to_screen( + 'Removing cache dir %s .' % (cachedir,), skip_eol=True, ), if os.path.exists(cachedir): - self._ydl.to_screen('.', skip_eol=True) + self._to_screen('.', skip_eol=True) shutil.rmtree(cachedir) - self._ydl.to_screen('.') + self._to_screen('.') diff --git a/youtube_dl/casefold.py b/youtube_dl/casefold.py index ad9c66f8e..712b2e7fa 100644 --- a/youtube_dl/casefold.py +++ b/youtube_dl/casefold.py @@ -10,9 +10,10 @@ from .compat import ( # https://github.com/unicode-org/icu/blob/main/icu4c/source/data/unidata/CaseFolding.txt # In case newly foldable Unicode characters are defined, paste the new version # of the text inside the ''' marks. -# The text is expected to have only blank lines andlines with 1st character #, +# The text is expected to have only blank lines and lines with 1st character #, # all ignored, and fold definitions like this: -# `from_hex_code; space_separated_to_hex_code_list; comment` +# `from_hex_code; status; space_separated_to_hex_code_list; comment` +# Only `status` C/F are used. _map_str = ''' # CaseFolding-15.0.0.txt @@ -1657,11 +1658,6 @@ _map = dict( del _map_str -def casefold(s): +def _casefold(s): assert isinstance(s, compat_str) return ''.join((_map.get(c, c) for c in s)) - - -__all__ = [ - 'casefold', -] diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index ed1a33cf2..ebe22bdf9 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -16,7 +16,6 @@ import os import platform import re import shlex -import shutil import socket import struct import subprocess @@ -24,11 +23,15 @@ import sys import types import xml.etree.ElementTree +_IDENTITY = lambda x: x + # naming convention # 'compat_' + Python3_name.replace('.', '_') # other aliases exist for convenience and/or legacy +# wrap disposable test values in type() to reclaim storage -# deal with critical unicode/str things first +# deal with critical unicode/str things first: +# compat_str, compat_basestring, compat_chr try: # Python 2 compat_str, compat_basestring, compat_chr = ( @@ -39,18 +42,23 @@ except NameError: str, (str, bytes), chr ) -# casefold + +# compat_casefold try: compat_str.casefold compat_casefold = lambda s: s.casefold() except AttributeError: - from .casefold import casefold as compat_casefold + from .casefold import _casefold as compat_casefold + +# compat_collections_abc try: import collections.abc as compat_collections_abc except ImportError: import collections as compat_collections_abc + +# compat_urllib_request try: import urllib.request as compat_urllib_request except ImportError: # Python 2 @@ -79,11 +87,15 @@ except TypeError: _add_init_method_arg(compat_urllib_request.Request) del _add_init_method_arg + +# compat_urllib_error try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error + +# compat_urllib_parse try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 @@ -98,17 +110,23 @@ except ImportError: # Python 2 compat_urlparse = compat_urllib_parse compat_urllib_parse_urlparse = compat_urllib_parse.urlparse + +# compat_urllib_response try: import urllib.response as compat_urllib_response except ImportError: # Python 2 import urllib as compat_urllib_response + +# compat_urllib_response.addinfourl try: compat_urllib_response.addinfourl.status except AttributeError: # .getcode() is deprecated in Py 3. compat_urllib_response.addinfourl.status = property(lambda self: self.getcode()) + +# compat_http_cookiejar try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 @@ -127,12 +145,16 @@ else: compat_cookiejar_Cookie = compat_cookiejar.Cookie compat_http_cookiejar_Cookie = compat_cookiejar_Cookie + +# compat_http_cookies try: import http.cookies as compat_cookies except ImportError: # Python 2 import Cookie as compat_cookies compat_http_cookies = compat_cookies + +# compat_http_cookies_SimpleCookie if sys.version_info[0] == 2 or sys.version_info < (3, 3): class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie): def load(self, rawdata): @@ -155,11 +177,15 @@ else: compat_cookies_SimpleCookie = compat_cookies.SimpleCookie compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie + +# compat_html_entities, probably useless now try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities + +# compat_html_entities_html5 try: # Python >= 3.3 compat_html_entities_html5 = compat_html_entities.html5 except AttributeError: @@ -2408,18 +2434,24 @@ except AttributeError: # Py < 3.1 compat_http_client.HTTPResponse.getcode = lambda self: self.status + +# compat_urllib_HTTPError try: from urllib.error import HTTPError as compat_HTTPError except ImportError: # Python 2 from urllib2 import HTTPError as compat_HTTPError compat_urllib_HTTPError = compat_HTTPError + +# compat_urllib_request_urlretrieve try: from urllib.request import urlretrieve as compat_urlretrieve except ImportError: # Python 2 from urllib import urlretrieve as compat_urlretrieve compat_urllib_request_urlretrieve = compat_urlretrieve + +# compat_html_parser_HTMLParser, compat_html_parser_HTMLParseError try: from HTMLParser import ( HTMLParser as compat_HTMLParser, @@ -2432,22 +2464,33 @@ except ImportError: # Python 3 # HTMLParseError was deprecated in Python 3.3 and removed in # Python 3.5. Introducing dummy exception for Python >3.5 for compatible # and uniform cross-version exception handling + class compat_HTMLParseError(Exception): pass + compat_html_parser_HTMLParser = compat_HTMLParser compat_html_parser_HTMLParseError = compat_HTMLParseError + +# compat_subprocess_get_DEVNULL try: _DEVNULL = subprocess.DEVNULL compat_subprocess_get_DEVNULL = lambda: _DEVNULL except AttributeError: compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') + +# compat_http_server try: import http.server as compat_http_server except ImportError: import BaseHTTPServer as compat_http_server + +# compat_urllib_parse_unquote_to_bytes, +# compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, +# compat_urllib_parse_urlencode, +# compat_urllib_parse_parse_qs try: from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes from urllib.parse import unquote as compat_urllib_parse_unquote @@ -2455,8 +2498,7 @@ try: from urllib.parse import urlencode as compat_urllib_parse_urlencode from urllib.parse import parse_qs as compat_parse_qs except ImportError: # Python 2 - _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire') - else re.compile(r'([\x00-\x7f]+)')) + _asciire = getattr(compat_urllib_parse, '_asciire', None) or re.compile(r'([\x00-\x7f]+)') # HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus # implementations from cpython 3.4.3's stdlib. Python 2's version @@ -2524,24 +2566,21 @@ except ImportError: # Python 2 # Possible solutions are to either port it from python 3 with all # the friends or manually ensure input query contains only byte strings. # We will stick with latter thus recursively encoding the whole query. - def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'): + def compat_urllib_parse_urlencode(query, doseq=0, safe='', encoding='utf-8', errors='strict'): + def encode_elem(e): if isinstance(e, dict): e = encode_dict(e) elif isinstance(e, (list, tuple,)): - list_e = encode_list(e) - e = tuple(list_e) if isinstance(e, tuple) else list_e + e = type(e)(encode_elem(el) for el in e) elif isinstance(e, compat_str): - e = e.encode(encoding) + e = e.encode(encoding, errors) return e def encode_dict(d): - return dict((encode_elem(k), encode_elem(v)) for k, v in d.items()) + return tuple((encode_elem(k), encode_elem(v)) for k, v in d.items()) - def encode_list(l): - return [encode_elem(e) for e in l] - - return compat_urllib_parse._urlencode(encode_elem(query), doseq=doseq) + return compat_urllib_parse._urlencode(encode_elem(query), doseq=doseq).decode('ascii') # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. # Python 2's version is apparently totally broken @@ -2596,8 +2635,61 @@ except ImportError: # Python 2 ('parse_qs', compat_parse_qs)): setattr(compat_urllib_parse, name, fix) + try: + all(chr(i) in b'' for i in range(256)) + except TypeError: + # not all chr(i) are str: patch Python2 quote + + _safemaps = getattr(compat_urllib_parse, '_safemaps', {}) + _always_safe = frozenset(compat_urllib_parse.always_safe) + + def _quote(s, safe='/'): + """quote('abc def') -> 'abc%20def'""" + + if not s and s is not None: # fast path + return s + safe = frozenset(safe) + cachekey = (safe, _always_safe) + try: + safe_map = _safemaps[cachekey] + except KeyError: + safe = _always_safe | safe + safe_map = {} + for i in range(256): + c = chr(i) + safe_map[c] = ( + c if (i < 128 and c in safe) + else b'%{0:02X}'.format(i)) + _safemaps[cachekey] = safe_map + + if safe.issuperset(s): + return s + return ''.join(safe_map[c] for c in s) + + # linked code + def _quote_plus(s, safe=''): + return ( + _quote(s, safe + b' ').replace(b' ', b'+') if b' ' in s + else _quote(s, safe)) + + # linked code + def _urlcleanup(): + if compat_urllib_parse._urlopener: + compat_urllib_parse._urlopener.cleanup() + _safemaps.clear() + compat_urllib_parse.ftpcache.clear() + + for name, fix in ( + ('quote', _quote), + ('quote_plus', _quote_plus), + ('urlcleanup', _urlcleanup)): + setattr(compat_urllib_parse, '_' + name, getattr(compat_urllib_parse, name)) + setattr(compat_urllib_parse, name, fix) + compat_urllib_parse_parse_qs = compat_parse_qs + +# compat_urllib_request_DataHandler try: from urllib.request import DataHandler as compat_urllib_request_DataHandler except ImportError: # Python < 3.4 @@ -2632,16 +2724,20 @@ except ImportError: # Python < 3.4 return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url) + +# compat_xml_etree_ElementTree_ParseError try: from xml.etree.ElementTree import ParseError as compat_xml_parse_error except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error compat_xml_etree_ElementTree_ParseError = compat_xml_parse_error -etree = xml.etree.ElementTree +# compat_xml_etree_ElementTree_Element +_etree = xml.etree.ElementTree -class _TreeBuilder(etree.TreeBuilder): + +class _TreeBuilder(_etree.TreeBuilder): def doctype(self, name, pubid, system): pass @@ -2650,7 +2746,7 @@ try: # xml.etree.ElementTree.Element is a method in Python <=2.6 and # the following will crash with: # TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types - isinstance(None, etree.Element) + isinstance(None, _etree.Element) from xml.etree.ElementTree import Element as compat_etree_Element except TypeError: # Python <=2.6 from xml.etree.ElementTree import _ElementInterface as compat_etree_Element @@ -2658,12 +2754,12 @@ compat_xml_etree_ElementTree_Element = compat_etree_Element if sys.version_info[0] >= 3: def compat_etree_fromstring(text): - return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder())) + return _etree.XML(text, parser=_etree.XMLParser(target=_TreeBuilder())) else: # python 2.x tries to encode unicode strings with ascii (see the # XMLParser._fixtext method) try: - _etree_iter = etree.Element.iter + _etree_iter = _etree.Element.iter except AttributeError: # Python <=2.6 def _etree_iter(root): for el in root.findall('*'): @@ -2675,27 +2771,29 @@ else: # 2.7 source def _XML(text, parser=None): if not parser: - parser = etree.XMLParser(target=_TreeBuilder()) + parser = _etree.XMLParser(target=_TreeBuilder()) parser.feed(text) return parser.close() def _element_factory(*args, **kwargs): - el = etree.Element(*args, **kwargs) + el = _etree.Element(*args, **kwargs) for k, v in el.items(): if isinstance(v, bytes): el.set(k, v.decode('utf-8')) return el def compat_etree_fromstring(text): - doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory))) + doc = _XML(text, parser=_etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory))) for el in _etree_iter(doc): if el.text is not None and isinstance(el.text, bytes): el.text = el.text.decode('utf-8') return doc -if hasattr(etree, 'register_namespace'): - compat_etree_register_namespace = etree.register_namespace -else: + +# compat_xml_etree_register_namespace +try: + compat_etree_register_namespace = _etree.register_namespace +except AttributeError: def compat_etree_register_namespace(prefix, uri): """Register a namespace prefix. The registry is global, and any existing mapping for either the @@ -2704,14 +2802,16 @@ else: attributes in this namespace will be serialized with prefix if possible. ValueError is raised if prefix is reserved or is invalid. """ - if re.match(r"ns\d+$", prefix): - raise ValueError("Prefix format reserved for internal use") - for k, v in list(etree._namespace_map.items()): + if re.match(r'ns\d+$', prefix): + raise ValueError('Prefix format reserved for internal use') + for k, v in list(_etree._namespace_map.items()): if k == uri or v == prefix: - del etree._namespace_map[k] - etree._namespace_map[uri] = prefix + del _etree._namespace_map[k] + _etree._namespace_map[uri] = prefix compat_xml_etree_register_namespace = compat_etree_register_namespace + +# compat_xpath, compat_etree_iterfind if sys.version_info < (2, 7): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! @@ -2898,7 +2998,6 @@ if sys.version_info < (2, 7): def __init__(self, root): self.root = root - ## # Generate all matching objects. def compat_etree_iterfind(elem, path, namespaces=None): @@ -2933,13 +3032,15 @@ if sys.version_info < (2, 7): else: - compat_xpath = lambda xpath: xpath compat_etree_iterfind = lambda element, match: element.iterfind(match) + compat_xpath = _IDENTITY +# compat_os_name compat_os_name = os._name if os.name == 'java' else os.name +# compat_shlex_quote if compat_os_name == 'nt': def compat_shlex_quote(s): return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"') @@ -2954,6 +3055,7 @@ else: return "'" + s.replace("'", "'\"'\"'") + "'" +# compat_shlex.split try: args = shlex.split('中文') assert (isinstance(args, list) @@ -2969,6 +3071,7 @@ except (AssertionError, UnicodeEncodeError): return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix))) +# compat_ord def compat_ord(c): if isinstance(c, int): return c @@ -2976,6 +3079,7 @@ def compat_ord(c): return ord(c) +# compat_getenv, compat_os_path_expanduser, compat_setenv if sys.version_info >= (3, 0): compat_getenv = os.getenv compat_expanduser = os.path.expanduser @@ -3063,6 +3167,22 @@ else: compat_os_path_expanduser = compat_expanduser +# compat_os_makedirs +try: + os.makedirs('.', exist_ok=True) + compat_os_makedirs = os.makedirs +except TypeError: # < Py3.2 + from errno import EEXIST as _errno_EEXIST + + def compat_os_makedirs(name, mode=0o777, exist_ok=False): + try: + return os.makedirs(name, mode=mode) + except OSError as ose: + if not (exist_ok and ose.errno == _errno_EEXIST): + raise + + +# compat_os_path_realpath if compat_os_name == 'nt' and sys.version_info < (3, 8): # os.path.realpath on Windows does not follow symbolic links # prior to Python 3.8 (see https://bugs.python.org/issue9949) @@ -3076,6 +3196,7 @@ else: compat_os_path_realpath = compat_realpath +# compat_print if sys.version_info < (3, 0): def compat_print(s): from .utils import preferredencoding @@ -3086,6 +3207,7 @@ else: print(s) +# compat_getpass_getpass if sys.version_info < (3, 0) and sys.platform == 'win32': def compat_getpass(prompt, *args, **kwargs): if isinstance(prompt, compat_str): @@ -3098,36 +3220,42 @@ else: compat_getpass_getpass = compat_getpass +# compat_input try: compat_input = raw_input except NameError: # Python 3 compat_input = input +# compat_kwargs # Python < 2.6.5 require kwargs to be bytes try: - def _testfunc(x): - pass - _testfunc(**{'x': 0}) + (lambda x: x)(**{'x': 0}) except TypeError: def compat_kwargs(kwargs): return dict((bytes(k), v) for k, v in kwargs.items()) else: - compat_kwargs = lambda kwargs: kwargs + compat_kwargs = _IDENTITY +# compat_numeric_types try: compat_numeric_types = (int, float, long, complex) except NameError: # Python 3 compat_numeric_types = (int, float, complex) +# compat_integer_types try: compat_integer_types = (int, long) except NameError: # Python 3 compat_integer_types = (int, ) +# compat_int +compat_int = compat_integer_types[-1] + +# compat_socket_create_connection if sys.version_info < (2, 7): def compat_socket_create_connection(address, timeout, source_address=None): host, port = address @@ -3154,6 +3282,7 @@ else: compat_socket_create_connection = socket.create_connection +# compat_contextlib_suppress try: from contextlib import suppress as compat_contextlib_suppress except ImportError: @@ -3196,12 +3325,12 @@ except AttributeError: # repeated .close() is OK, but just in case with compat_contextlib_suppress(EnvironmentError): f.close() - popen.wait() + popen.wait() # Fix https://github.com/ytdl-org/youtube-dl/issues/4223 # See http://bugs.python.org/issue9161 for what is broken -def workaround_optparse_bug9161(): +def _workaround_optparse_bug9161(): op = optparse.OptionParser() og = optparse.OptionGroup(op, 'foo') try: @@ -3220,9 +3349,10 @@ def workaround_optparse_bug9161(): optparse.OptionGroup.add_option = _compat_add_option -if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3 - compat_get_terminal_size = shutil.get_terminal_size -else: +# compat_shutil_get_terminal_size +try: + from shutil import get_terminal_size as compat_get_terminal_size # Python >= 3.3 +except ImportError: _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines']) def compat_get_terminal_size(fallback=(80, 24)): @@ -3252,27 +3382,33 @@ else: columns = _columns if lines is None or lines <= 0: lines = _lines + return _terminal_size(columns, lines) +compat_shutil_get_terminal_size = compat_get_terminal_size + +# compat_itertools_count try: - itertools.count(start=0, step=1) + type(itertools.count(start=0, step=1)) compat_itertools_count = itertools.count -except TypeError: # Python 2.6 +except TypeError: # Python 2.6 lacks step def compat_itertools_count(start=0, step=1): while True: yield start start += step +# compat_tokenize_tokenize if sys.version_info >= (3, 0): from tokenize import tokenize as compat_tokenize_tokenize else: from tokenize import generate_tokens as compat_tokenize_tokenize +# compat_struct_pack, compat_struct_unpack, compat_Struct try: - struct.pack('!I', 0) + type(struct.pack('!I', 0)) except TypeError: # In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument # See https://bugs.python.org/issue19099 @@ -3304,8 +3440,10 @@ else: compat_Struct = struct.Struct -# compat_map/filter() returning an iterator, supposedly the -# same versioning as for zip below +# builtins returning an iterator + +# compat_map, compat_filter +# supposedly the same versioning as for zip below try: from future_builtins import map as compat_map except ImportError: @@ -3322,6 +3460,7 @@ except ImportError: except ImportError: compat_filter = filter +# compat_zip try: from future_builtins import zip as compat_zip except ImportError: # not 2.6+ or is 3.x @@ -3331,6 +3470,7 @@ except ImportError: # not 2.6+ or is 3.x compat_zip = zip +# compat_itertools_zip_longest # method renamed between Py2/3 try: from itertools import zip_longest as compat_itertools_zip_longest @@ -3338,7 +3478,8 @@ except ImportError: from itertools import izip_longest as compat_itertools_zip_longest -# new class in collections +# compat_collections_chain_map +# collections.ChainMap: new class try: from collections import ChainMap as compat_collections_chain_map # Py3.3's ChainMap is deficient @@ -3394,19 +3535,22 @@ except ImportError: def new_child(self, m=None, **kwargs): m = m or {} m.update(kwargs) - return compat_collections_chain_map(m, *self.maps) + # support inheritance ! + return type(self)(m, *self.maps) @property def parents(self): - return compat_collections_chain_map(*(self.maps[1:])) + return type(self)(*(self.maps[1:])) +# compat_re_Pattern, compat_re_Match # Pythons disagree on the type of a pattern (RegexObject, _sre.SRE_Pattern, Pattern, ...?) compat_re_Pattern = type(re.compile('')) # and on the type of a match compat_re_Match = type(re.match('a', 'a')) +# compat_base64_b64decode if sys.version_info < (3, 3): def compat_b64decode(s, *args, **kwargs): if isinstance(s, compat_str): @@ -3418,6 +3562,7 @@ else: compat_base64_b64decode = compat_b64decode +# compat_ctypes_WINFUNCTYPE if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0): # PyPy2 prior to version 5.4.0 expects byte strings as Windows function # names, see the original PyPy issue [1] and the youtube-dl one [2]. @@ -3436,6 +3581,7 @@ else: return ctypes.WINFUNCTYPE(*args, **kwargs) +# compat_open if sys.version_info < (3, 0): # open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None def compat_open(file_, *args, **kwargs): @@ -3463,18 +3609,28 @@ except AttributeError: def compat_datetime_timedelta_total_seconds(td): return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 + # optional decompression packages +# compat_brotli # PyPi brotli package implements 'br' Content-Encoding try: import brotli as compat_brotli except ImportError: compat_brotli = None +# compat_ncompress # PyPi ncompress package implements 'compress' Content-Encoding try: import ncompress as compat_ncompress except ImportError: compat_ncompress = None +# compat_zstandard +# PyPi zstandard package implements 'zstd' Content-Encoding (RFC 8878 7.2) +try: + import zstandard as compat_zstandard +except ImportError: + compat_zstandard = None + legacy = [ 'compat_HTMLParseError', @@ -3491,6 +3647,7 @@ legacy = [ 'compat_getpass', 'compat_parse_qs', 'compat_realpath', + 'compat_shlex_split', 'compat_urllib_parse_parse_qs', 'compat_urllib_parse_unquote', 'compat_urllib_parse_unquote_plus', @@ -3504,8 +3661,6 @@ legacy = [ __all__ = [ - 'compat_html_parser_HTMLParseError', - 'compat_html_parser_HTMLParser', 'compat_Struct', 'compat_base64_b64decode', 'compat_basestring', @@ -3514,13 +3669,9 @@ __all__ = [ 'compat_chr', 'compat_collections_abc', 'compat_collections_chain_map', - 'compat_datetime_timedelta_total_seconds', - 'compat_http_cookiejar', - 'compat_http_cookiejar_Cookie', - 'compat_http_cookies', - 'compat_http_cookies_SimpleCookie', 'compat_contextlib_suppress', 'compat_ctypes_WINFUNCTYPE', + 'compat_datetime_timedelta_total_seconds', 'compat_etree_fromstring', 'compat_etree_iterfind', 'compat_filter', @@ -3529,9 +3680,16 @@ __all__ = [ 'compat_getpass_getpass', 'compat_html_entities', 'compat_html_entities_html5', + 'compat_html_parser_HTMLParseError', + 'compat_html_parser_HTMLParser', + 'compat_http_cookiejar', + 'compat_http_cookiejar_Cookie', + 'compat_http_cookies', + 'compat_http_cookies_SimpleCookie', 'compat_http_client', 'compat_http_server', 'compat_input', + 'compat_int', 'compat_integer_types', 'compat_itertools_count', 'compat_itertools_zip_longest', @@ -3541,6 +3699,7 @@ __all__ = [ 'compat_numeric_types', 'compat_open', 'compat_ord', + 'compat_os_makedirs', 'compat_os_name', 'compat_os_path_expanduser', 'compat_os_path_realpath', @@ -3550,7 +3709,7 @@ __all__ = [ 'compat_register_utf8', 'compat_setenv', 'compat_shlex_quote', - 'compat_shlex_split', + 'compat_shutil_get_terminal_size', 'compat_socket_create_connection', 'compat_str', 'compat_struct_pack', @@ -3570,5 +3729,5 @@ __all__ = [ 'compat_xml_etree_register_namespace', 'compat_xpath', 'compat_zip', - 'workaround_optparse_bug9161', + 'compat_zstandard', ] diff --git a/youtube_dl/extractor/bokecc.py b/youtube_dl/extractor/bokecc.py index 6017e8344..4b8bef391 100644 --- a/youtube_dl/extractor/bokecc.py +++ b/youtube_dl/extractor/bokecc.py @@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor): class BokeCCIE(BokeCCBaseIE): - _IE_DESC = 'CC视频' + IE_DESC = 'CC视频' _VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)' _TESTS = [{ diff --git a/youtube_dl/extractor/cloudy.py b/youtube_dl/extractor/cloudy.py index 85ca20ecc..d39a9a5c2 100644 --- a/youtube_dl/extractor/cloudy.py +++ b/youtube_dl/extractor/cloudy.py @@ -9,7 +9,7 @@ from ..utils import ( class CloudyIE(InfoExtractor): - _IE_DESC = 'cloudy.ec' + IE_DESC = 'cloudy.ec' _VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)' _TESTS = [{ 'url': 'https://www.cloudy.ec/v/af511e2527aac', diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 7fae9e57b..a64fcfccc 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -422,6 +422,8 @@ class InfoExtractor(object): _GEO_COUNTRIES = None _GEO_IP_BLOCKS = None _WORKING = True + # supply this in public subclasses: used in supported sites list, etc + # IE_DESC = 'short description of IE' def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" @@ -503,7 +505,7 @@ class InfoExtractor(object): if not self._x_forwarded_for_ip: # Geo bypass mechanism is explicitly disabled by user - if not self._downloader.params.get('geo_bypass', True): + if not self.get_param('geo_bypass', True): return if not geo_bypass_context: @@ -525,7 +527,7 @@ class InfoExtractor(object): # Explicit IP block specified by user, use it right away # regardless of whether extractor is geo bypassable or not - ip_block = self._downloader.params.get('geo_bypass_ip_block', None) + ip_block = self.get_param('geo_bypass_ip_block', None) # Otherwise use random IP block from geo bypass context but only # if extractor is known as geo bypassable @@ -536,8 +538,8 @@ class InfoExtractor(object): if ip_block: self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block) - if self._downloader.params.get('verbose', False): - self._downloader.to_screen( + if self.get_param('verbose', False): + self.to_screen( '[debug] Using fake IP %s as X-Forwarded-For.' % self._x_forwarded_for_ip) return @@ -546,7 +548,7 @@ class InfoExtractor(object): # Explicit country code specified by user, use it right away # regardless of whether extractor is geo bypassable or not - country = self._downloader.params.get('geo_bypass_country', None) + country = self.get_param('geo_bypass_country', None) # Otherwise use random country code from geo bypass context but # only if extractor is known as geo bypassable @@ -557,8 +559,8 @@ class InfoExtractor(object): if country: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country) - if self._downloader.params.get('verbose', False): - self._downloader.to_screen( + if self.get_param('verbose', False): + self.to_screen( '[debug] Using fake IP %s (%s) as X-Forwarded-For.' % (self._x_forwarded_for_ip, country.upper())) @@ -584,9 +586,9 @@ class InfoExtractor(object): raise ExtractorError('An extractor error has occurred.', cause=e) def __maybe_fake_ip_and_retry(self, countries): - if (not self._downloader.params.get('geo_bypass_country', None) + if (not self.get_param('geo_bypass_country', None) and self._GEO_BYPASS - and self._downloader.params.get('geo_bypass', True) + and self.get_param('geo_bypass', True) and not self._x_forwarded_for_ip and countries): country_code = random.choice(countries) @@ -696,7 +698,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: - self._downloader.report_warning(errmsg) + self.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None): @@ -768,11 +770,11 @@ class InfoExtractor(object): webpage_bytes = prefix + webpage_bytes if not encoding: encoding = self._guess_encoding_from_content(content_type, webpage_bytes) - if self._downloader.params.get('dump_intermediate_pages', False): + if self.get_param('dump_intermediate_pages', False): self.to_screen('Dumping request to ' + urlh.geturl()) dump = base64.b64encode(webpage_bytes).decode('ascii') - self._downloader.to_screen(dump) - if self._downloader.params.get('write_pages', False): + self.to_screen(dump) + if self.get_param('write_pages', False): basen = '%s_%s' % (video_id, urlh.geturl()) if len(basen) > 240: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() @@ -974,19 +976,9 @@ class InfoExtractor(object): """Print msg to screen, prefixing it with '[ie_name]'""" self._downloader.to_screen(self.__ie_msg(msg)) - def write_debug(self, msg, only_once=False, _cache=[]): + def write_debug(self, msg, only_once=False): '''Log debug message or Print message to stderr''' - if not self.get_param('verbose', False): - return - message = '[debug] ' + self.__ie_msg(msg) - logger = self.get_param('logger') - if logger: - logger.debug(message) - else: - if only_once and hash(message) in _cache: - return - self._downloader.to_stderr(message) - _cache.append(hash(message)) + self._downloader.write_debug(self.__ie_msg(msg), only_once=only_once) # name, default=None, *args, **kwargs def get_param(self, name, *args, **kwargs): @@ -1082,7 +1074,7 @@ class InfoExtractor(object): if mobj: break - if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): + if not self.get_param('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name @@ -1100,7 +1092,7 @@ class InfoExtractor(object): elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: - self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) + self.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _search_json(self, start_pattern, string, name, video_id, **kwargs): @@ -1169,10 +1161,10 @@ class InfoExtractor(object): def _get_netrc_login_info(self, netrc_machine=None): username = None password = None - netrc_machine = netrc_machine or self._NETRC_MACHINE - if self._downloader.params.get('usenetrc', False): + if self.get_param('usenetrc', False): try: + netrc_machine = netrc_machine or self._NETRC_MACHINE info = netrc.netrc().authenticators(netrc_machine) if info is not None: username = info[0] @@ -1180,8 +1172,8 @@ class InfoExtractor(object): else: raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) - except (IOError, netrc.NetrcParseError) as err: - self._downloader.report_warning( + except (AttributeError, IOError, netrc.NetrcParseError) as err: + self.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) return username, password @@ -1218,10 +1210,10 @@ class InfoExtractor(object): """ if self._downloader is None: return None - downloader_params = self._downloader.params - if downloader_params.get('twofactor') is not None: - return downloader_params['twofactor'] + twofactor = self.get_param('twofactor') + if twofactor is not None: + return twofactor return compat_getpass('Type %s and press [Return]: ' % note) @@ -1356,7 +1348,7 @@ class InfoExtractor(object): elif fatal: raise RegexNotFoundError('Unable to extract JSON-LD') else: - self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) + self.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) return {} def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): @@ -1490,14 +1482,18 @@ class InfoExtractor(object): return dict((k, v) for k, v in info.items() if v is not None) def _search_nextjs_data(self, webpage, video_id, **kw): - nkw = dict((k, v) for k, v in kw.items() if k in ('transform_source', 'fatal')) - kw.pop('transform_source', None) - next_data = self._search_regex( - r'''<script[^>]+\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>(?P<nd>[^<]+)</script>''', - webpage, 'next.js data', group='nd', **kw) - if not next_data: - return {} - return self._parse_json(next_data, video_id, **nkw) + # ..., *, transform_source=None, fatal=True, default=NO_DEFAULT + + # TODO: remove this backward compat + default = kw.get('default', NO_DEFAULT) + if default == '{}': + kw['default'] = {} + kw = compat_kwargs(kw) + + return self._search_json( + r'''<script\s[^>]*?\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>''', + webpage, 'next.js data', video_id, end_pattern='</script>', + **kw) def _search_nuxt_data(self, webpage, video_id, *args, **kwargs): """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function""" @@ -1583,7 +1579,7 @@ class InfoExtractor(object): if f.get('vcodec') == 'none': # audio only preference -= 50 - if self._downloader.params.get('prefer_free_formats'): + if self.get_param('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] else: ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] @@ -1595,7 +1591,7 @@ class InfoExtractor(object): else: if f.get('acodec') == 'none': # video only preference -= 40 - if self._downloader.params.get('prefer_free_formats'): + if self.get_param('prefer_free_formats'): ORDER = ['flv', 'mp4', 'webm'] else: ORDER = ['webm', 'flv', 'mp4'] @@ -1661,7 +1657,7 @@ class InfoExtractor(object): """ Either "http:" or "https:", depending on the user's preferences """ return ( 'http:' - if self._downloader.params.get('prefer_insecure', False) + if self.get_param('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): @@ -3029,7 +3025,6 @@ class InfoExtractor(object): transform_source=transform_source, default=None) def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs): - # allow passing `transform_source` through to _find_jwplayer_data() transform_source = kwargs.pop('transform_source', None) kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {} @@ -3167,7 +3162,7 @@ class InfoExtractor(object): # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as # of jwplayer.flash.swf rtmp_url_parts = re.split( - r'((?:mp4|mp3|flv):)', source_url, 1) + r'((?:mp4|mp3|flv):)', source_url, maxsplit=1) if len(rtmp_url_parts) == 3: rtmp_url, prefix, play_path = rtmp_url_parts a_format.update({ @@ -3194,7 +3189,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): @@ -3204,7 +3199,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None, port=None, @@ -3213,12 +3208,12 @@ class InfoExtractor(object): 0, name, value, port, port is not None, domain, True, domain.startswith('.'), path, True, secure, expire_time, discard, None, None, rest) - self._downloader.cookiejar.set_cookie(cookie) + self.cookiejar.set_cookie(cookie) def _get_cookies(self, url): """ Return a compat_cookies_SimpleCookie with the cookies for the url """ req = sanitized_Request(url) - self._downloader.cookiejar.add_cookie_header(req) + self.cookiejar.add_cookie_header(req) return compat_cookies_SimpleCookie(req.get_header('Cookie')) def _apply_first_set_cookie_header(self, url_handle, cookie): @@ -3278,8 +3273,8 @@ class InfoExtractor(object): return not any_restricted def extract_subtitles(self, *args, **kwargs): - if (self._downloader.params.get('writesubtitles', False) - or self._downloader.params.get('listsubtitles')): + if (self.get_param('writesubtitles', False) + or self.get_param('listsubtitles')): return self._get_subtitles(*args, **kwargs) return {} @@ -3296,16 +3291,24 @@ class InfoExtractor(object): return ret @classmethod - def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): - """ Merge two subtitle dictionaries, language by language. """ - ret = dict(subtitle_dict1) - for lang in subtitle_dict2: - ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) - return ret + def _merge_subtitles(cls, subtitle_dict1, *subtitle_dicts, **kwargs): + """ Merge subtitle dictionaries, language by language. """ + + # ..., * , target=None + target = kwargs.get('target') + if target is None: + target = dict(subtitle_dict1) + else: + subtitle_dicts = (subtitle_dict1,) + subtitle_dicts + + for subtitle_dict in subtitle_dicts: + for lang in subtitle_dict: + target[lang] = cls._merge_subtitle_items(target.get(lang, []), subtitle_dict[lang]) + return target def extract_automatic_captions(self, *args, **kwargs): - if (self._downloader.params.get('writeautomaticsub', False) - or self._downloader.params.get('listsubtitles')): + if (self.get_param('writeautomaticsub', False) + or self.get_param('listsubtitles')): return self._get_automatic_captions(*args, **kwargs) return {} @@ -3313,9 +3316,9 @@ class InfoExtractor(object): raise NotImplementedError('This method must be implemented by subclasses') def mark_watched(self, *args, **kwargs): - if (self._downloader.params.get('mark_watched', False) + if (self.get_param('mark_watched', False) and (self._get_login_info()[0] is not None - or self._downloader.params.get('cookiefile') is not None)): + or self.get_param('cookiefile') is not None)): self._mark_watched(*args, **kwargs) def _mark_watched(self, *args, **kwargs): @@ -3323,7 +3326,7 @@ class InfoExtractor(object): def geo_verification_headers(self): headers = {} - geo_verification_proxy = self._downloader.params.get('geo_verification_proxy') + geo_verification_proxy = self.get_param('geo_verification_proxy') if geo_verification_proxy: headers['Ytdl-request-proxy'] = geo_verification_proxy return headers @@ -3334,6 +3337,29 @@ class InfoExtractor(object): def _generic_title(self, url): return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]) + def _yes_playlist(self, playlist_id, video_id, *args, **kwargs): + # smuggled_data=None, *, playlist_label='playlist', video_label='video' + smuggled_data = args[0] if len(args) == 1 else kwargs.get('smuggled_data') + playlist_label = kwargs.get('playlist_label', 'playlist') + video_label = kwargs.get('video_label', 'video') + + if not playlist_id or not video_id: + return not video_id + + no_playlist = (smuggled_data or {}).get('force_noplaylist') + if no_playlist is not None: + return not no_playlist + + video_id = '' if video_id is True else ' ' + video_id + noplaylist = self.get_param('noplaylist') + self.to_screen( + 'Downloading just the {0}{1} because of --no-playlist'.format(video_label, video_id) + if noplaylist else + 'Downloading {0}{1} - add --no-playlist to download just the {2}{3}'.format( + playlist_label, '' if playlist_id is True else ' ' + playlist_id, + video_label, video_id)) + return not noplaylist + class SearchInfoExtractor(InfoExtractor): """ diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 03d035a27..3da5f8020 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -898,21 +898,13 @@ from .ooyala import ( ) from .ora import OraTVIE from .orf import ( - ORFTVthekIE, - ORFFM4IE, + ORFONIE, + ORFONLiveIE, ORFFM4StoryIE, - ORFOE1IE, - ORFOE3IE, - ORFNOEIE, - ORFWIEIE, - ORFBGLIE, - ORFOOEIE, - ORFSTMIE, - ORFKTNIE, - ORFSBGIE, - ORFTIRIE, - ORFVBGIE, ORFIPTVIE, + ORFPodcastIE, + ORFRadioIE, + ORFRadioCollectionIE, ) from .outsidetv import OutsideTVIE from .packtpub import ( diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py index c64af3be6..2510ad887 100644 --- a/youtube_dl/extractor/itv.py +++ b/youtube_dl/extractor/itv.py @@ -35,15 +35,6 @@ from ..utils import ( class ITVBaseIE(InfoExtractor): - def _search_nextjs_data(self, webpage, video_id, **kw): - transform_source = kw.pop('transform_source', None) - fatal = kw.pop('fatal', True) - return self._parse_json( - self._search_regex( - r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''', - webpage, 'next.js data', group='js', fatal=fatal, **kw), - video_id, transform_source=transform_source, fatal=fatal) - def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True): if errnote is False: return False @@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor): class ITVIE(ITVBaseIE): _VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)' - _IE_DESC = 'ITVX' + IE_DESC = 'ITVX' + _WORKING = False + _TESTS = [{ 'note': 'Hub URLs redirect to ITVX', 'url': 'https://www.itv.com/hub/liar/2a4547a0012', @@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE): 'ext': determine_ext(href, 'vtt'), }) - next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}') + next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={}) video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {}) title = traverse_obj(video_data, 'headerTitle', 'episodeTitle') info = self._og_extract(webpage, require_title=not title) @@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE): class ITVBTCCIE(ITVBaseIE): _VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)' - _IE_DESC = 'ITV articles: News, British Touring Car Championship' + IE_DESC = 'ITV articles: News, British Touring Car Championship' _TESTS = [{ 'note': 'British Touring Car Championship', 'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch', diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index 69319857d..2b5e2c15c 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import unicode_literals import itertools @@ -10,7 +11,7 @@ from ..compat import ( compat_ord, compat_str, compat_urllib_parse_unquote, - compat_zip + compat_zip as zip, ) from ..utils import ( int_or_none, @@ -24,7 +25,7 @@ class MixcloudBaseIE(InfoExtractor): def _call_api(self, object_type, object_fields, display_id, username, slug=None): lookup_key = object_type + 'Lookup' return self._download_json( - 'https://www.mixcloud.com/graphql', display_id, query={ + 'https://app.mixcloud.com/graphql', display_id, query={ 'query': '''{ %s(lookup: {username: "%s"%s}) { %s @@ -44,7 +45,7 @@ class MixcloudIE(MixcloudBaseIE): 'ext': 'm4a', 'title': 'Cryptkeeper', 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.', - 'uploader': 'Daniel Holbach', + 'uploader': 'dholbach', # was: 'Daniel Holbach', 'uploader_id': 'dholbach', 'thumbnail': r're:https?://.*\.jpg', 'view_count': int, @@ -57,7 +58,7 @@ class MixcloudIE(MixcloudBaseIE): 'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat', 'ext': 'mp3', 'title': 'Caribou 7 inch Vinyl Mix & Chat', - 'description': 'md5:2b8aec6adce69f9d41724647c65875e8', + 'description': r're:Last week Dan Snaith aka Caribou swung by the Brownswood.{136}', 'uploader': 'Gilles Peterson Worldwide', 'uploader_id': 'gillespeterson', 'thumbnail': 're:https?://.*', @@ -65,6 +66,23 @@ class MixcloudIE(MixcloudBaseIE): 'timestamp': 1422987057, 'upload_date': '20150203', }, + 'params': { + 'skip_download': '404 not found', + }, + }, { + 'url': 'https://www.mixcloud.com/gillespeterson/carnival-m%C3%BAsica-popular-brasileira-mix/', + 'info_dict': { + 'id': 'gillespeterson_carnival-música-popular-brasileira-mix', + 'ext': 'm4a', + 'title': 'Carnival Música Popular Brasileira Mix', + 'description': r're:Gilles was recently in Brazil to play at Boiler Room.{208}', + 'timestamp': 1454347174, + 'upload_date': '20160201', + 'uploader': 'Gilles Peterson Worldwide', + 'uploader_id': 'gillespeterson', + 'thumbnail': 're:https?://.*', + 'view_count': int, + }, }, { 'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/', 'only_matching': True, @@ -76,10 +94,10 @@ class MixcloudIE(MixcloudBaseIE): """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" return ''.join([ compat_chr(compat_ord(ch) ^ compat_ord(k)) - for ch, k in compat_zip(ciphertext, itertools.cycle(key))]) + for ch, k in zip(ciphertext, itertools.cycle(key))]) def _real_extract(self, url): - username, slug = re.match(self._VALID_URL, url).groups() + username, slug = self._match_valid_url(url).groups() username, slug = compat_urllib_parse_unquote(username), compat_urllib_parse_unquote(slug) track_id = '%s_%s' % (username, slug) diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py index 8d537d7ae..1ee78edbc 100644 --- a/youtube_dl/extractor/orf.py +++ b/youtube_dl/extractor/orf.py @@ -1,407 +1,394 @@ # coding: utf-8 from __future__ import unicode_literals +import base64 +import functools import re from .common import InfoExtractor -from ..compat import compat_str +from .youtube import YoutubeIE from ..utils import ( clean_html, determine_ext, + ExtractorError, float_or_none, - HEADRequest, int_or_none, - orderedSet, - remove_end, - str_or_none, + merge_dicts, + mimetype2ext, + parse_age_limit, + parse_iso8601, strip_jsonp, - unescapeHTML, + txt_or_none, unified_strdate, + update_url_query, url_or_none, ) - - -class ORFTVthekIE(InfoExtractor): - IE_NAME = 'orf:tvthek' - IE_DESC = 'ORF TVthek' - _VALID_URL = r'https?://tvthek\.orf\.at/(?:[^/]+/)+(?P<id>\d+)' +from ..traversal import T, traverse_obj + +k_float_or_none = functools.partial(float_or_none, scale=1000) + + +class ORFRadioBase(InfoExtractor): + STATION_INFO = { + 'fm4': ('fm4', 'fm4', 'orffm4'), + 'noe': ('noe', 'oe2n', 'orfnoe'), + 'wien': ('wie', 'oe2w', 'orfwie'), + 'burgenland': ('bgl', 'oe2b', 'orfbgl'), + 'ooe': ('ooe', 'oe2o', 'orfooe'), + 'steiermark': ('stm', 'oe2st', 'orfstm'), + 'kaernten': ('ktn', 'oe2k', 'orfktn'), + 'salzburg': ('sbg', 'oe2s', 'orfsbg'), + 'tirol': ('tir', 'oe2t', 'orftir'), + 'vorarlberg': ('vbg', 'oe2v', 'orfvbg'), + 'oe3': ('oe3', 'oe3', 'orfoe3'), + 'oe1': ('oe1', 'oe1', 'orfoe1'), + } + _ID_NAMES = ('id', 'guid', 'program') + + @classmethod + def _get_item_id(cls, data): + return traverse_obj(data, *cls._ID_NAMES, expected_type=txt_or_none) + + @classmethod + def _get_api_payload(cls, data, expected_id, in_payload=False): + if expected_id not in traverse_obj(data, ('payload',)[:1 if in_payload else 0] + (cls._ID_NAMES, T(txt_or_none))): + raise ExtractorError('Unexpected API data result', video_id=expected_id) + return data['payload'] + + @staticmethod + def _extract_podcast_upload(data): + return traverse_obj(data, { + 'url': ('enclosures', 0, 'url'), + 'ext': ('enclosures', 0, 'type', T(mimetype2ext)), + 'filesize': ('enclosures', 0, 'length', T(int_or_none)), + 'title': ('title', T(txt_or_none)), + 'description': ('description', T(clean_html)), + 'timestamp': (('published', 'postDate'), T(parse_iso8601)), + 'duration': ('duration', T(k_float_or_none)), + 'series': ('podcast', 'title'), + 'uploader': ((('podcast', 'author'), 'station'), T(txt_or_none)), + 'uploader_id': ('podcast', 'channel', T(txt_or_none)), + }, get_all=False) + + @classmethod + def _entries(cls, data, station, item_type=None): + if item_type in ('upload', 'podcast-episode'): + yield merge_dicts({ + 'id': cls._get_item_id(data), + 'ext': 'mp3', + 'vcodec': 'none', + }, cls._extract_podcast_upload(data), rev=True) + return + + loop_station = cls.STATION_INFO[station][1] + for info in traverse_obj(data, ((('streams', Ellipsis), 'stream'), T(lambda v: v if v['loopStreamId'] else None))): + item_id = info['loopStreamId'] + host = info.get('host') or 'loopstream01.apa.at' + yield merge_dicts({ + 'id': item_id.replace('.mp3', ''), + 'ext': 'mp3', + 'url': update_url_query('https://{0}/'.format(host), { + 'channel': loop_station, + 'id': item_id, + }), + 'vcodec': 'none', + # '_old_archive_ids': [make_archive_id(old_ie, video_id)], + }, traverse_obj(data, { + 'title': ('title', T(txt_or_none)), + 'description': ('subtitle', T(clean_html)), + 'uploader': 'station', + 'series': ('programTitle', T(txt_or_none)), + }), traverse_obj(info, { + 'duration': (('duration', + (None, T(lambda x: x['end'] - x['start']))), + T(k_float_or_none), any), + 'timestamp': (('start', 'startISO'), T(parse_iso8601), any), + })) + + +class ORFRadioIE(ORFRadioBase): + IE_NAME = 'orf:sound' + _STATION_RE = '|'.join(map(re.escape, ORFRadioBase.STATION_INFO.keys())) + + _VALID_URL = ( + r'https?://sound\.orf\.at/radio/(?P<station>{0})/sendung/(?P<id>\d+)(?:/(?P<show>\w+))?'.format(_STATION_RE), + r'https?://(?P<station>{0})\.orf\.at/player/(?P<date>\d{{8}})/(?P<id>\d+)'.format(_STATION_RE), + ) _TESTS = [{ - 'url': 'http://tvthek.orf.at/program/Aufgetischt/2745173/Aufgetischt-Mit-der-Steirischen-Tafelrunde/8891389', + 'url': 'https://sound.orf.at/radio/ooe/sendung/37802/guten-morgen-oberoesterreich-am-feiertag', + 'info_dict': { + 'id': '37802', + 'title': 'Guten Morgen Oberösterreich am Feiertag', + 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl', + }, 'playlist': [{ - 'md5': '2942210346ed779588f428a92db88712', + 'md5': 'f9ff8517dd681b642a2c900e2c9e6085', 'info_dict': { - 'id': '8896777', - 'ext': 'mp4', - 'title': 'Aufgetischt: Mit der Steirischen Tafelrunde', - 'description': 'md5:c1272f0245537812d4e36419c207b67d', - 'duration': 2668, - 'upload_date': '20141208', - }, + 'id': '2024-05-30_0559_tl_66_7DaysThu1_443862', + 'ext': 'mp3', + 'title': 'Guten Morgen Oberösterreich am Feiertag', + 'description': 'Oberösterreichs meistgehörte regionale Frühsendung.\nRegionale Nachrichten zu jeder halben Stunde.\nModeration: Wolfgang Lehner\nNachrichten: Stephan Schnabl', + 'timestamp': 1717041587, + 'upload_date': '20240530', + 'uploader': 'ooe', + 'duration': 14413.0, + } }], - 'skip': 'Blocked outside of Austria / Germany', + 'skip': 'Shows from ORF Sound are only available for 30 days.' }, { - 'url': 'http://tvthek.orf.at/topic/Im-Wandel-der-Zeit/8002126/Best-of-Ingrid-Thurnher/7982256', + 'url': 'https://oe1.orf.at/player/20240531/758136', + 'md5': '2397717aaf3ae9c22a4f090ee3b8d374', 'info_dict': { - 'id': '7982259', - 'ext': 'mp4', - 'title': 'Best of Ingrid Thurnher', - 'upload_date': '20140527', - 'description': 'Viele Jahre war Ingrid Thurnher das "Gesicht" der ZIB 2. Vor ihrem Wechsel zur ZIB 2 im Jahr 1995 moderierte sie unter anderem "Land und Leute", "Österreich-Bild" und "Niederösterreich heute".', - }, - 'params': { - 'skip_download': True, # rtsp downloads + 'id': '2024-05-31_1905_tl_51_7DaysFri35_2413387', + 'ext': 'mp3', + 'title': '"Who Cares?"', + 'description': 'Europas größte Netzkonferenz re:publica 2024', + 'timestamp': 1717175100, + 'upload_date': '20240531', + 'uploader': 'oe1', + 'duration': 1500, }, - 'skip': 'Blocked outside of Austria / Germany', - }, { - 'url': 'http://tvthek.orf.at/topic/Fluechtlingskrise/10463081/Heimat-Fremde-Heimat/13879132/Senioren-betreuen-Migrantenkinder/13879141', - 'only_matching': True, - }, { - 'url': 'http://tvthek.orf.at/profile/Universum/35429', - 'only_matching': True, + 'skip': 'Shows from ORF Sound are only available for 30 days.' }] def _real_extract(self, url): - playlist_id = self._match_id(url) - webpage = self._download_webpage(url, playlist_id) + m = self._match_valid_url(url) + station, show_id = m.group('station', 'id') + api_station, _, _ = self.STATION_INFO[station] + if 'date' in m.groupdict(): + data = self._download_json( + 'https://audioapi.orf.at/{0}/json/4.0/broadcast/{1}/{2}?_o={3}.orf.at'.format( + api_station, show_id, m.group('date'), station), show_id) + show_id = data['id'] + else: + data = self._download_json( + 'https://audioapi.orf.at/{0}/api/json/5.0/broadcast/{1}?_o=sound.orf.at'.format( + api_station, show_id), show_id) - data_jsb = self._parse_json( - self._search_regex( - r'<div[^>]+class=(["\']).*?VideoPlaylist.*?\1[^>]+data-jsb=(["\'])(?P<json>.+?)\2', - webpage, 'playlist', group='json'), - playlist_id, transform_source=unescapeHTML)['playlist']['videos'] + data = self._get_api_payload(data, show_id, in_payload=True) - entries = [] - for sd in data_jsb: - video_id, title = sd.get('id'), sd.get('title') - if not video_id or not title: - continue - video_id = compat_str(video_id) - formats = [] - for fd in sd['sources']: - src = url_or_none(fd.get('src')) - if not src: - continue - format_id_list = [] - for key in ('delivery', 'quality', 'quality_string'): - value = fd.get(key) - if value: - format_id_list.append(value) - format_id = '-'.join(format_id_list) - ext = determine_ext(src) - if ext == 'm3u8': - m3u8_formats = self._extract_m3u8_formats( - src, video_id, 'mp4', m3u8_id=format_id, fatal=False) - if any('/geoprotection' in f['url'] for f in m3u8_formats): - self.raise_geo_restricted() - formats.extend(m3u8_formats) - elif ext == 'f4m': - formats.extend(self._extract_f4m_formats( - src, video_id, f4m_id=format_id, fatal=False)) - elif ext == 'mpd': - formats.extend(self._extract_mpd_formats( - src, video_id, mpd_id=format_id, fatal=False)) - else: - formats.append({ - 'format_id': format_id, - 'url': src, - 'protocol': fd.get('protocol'), - }) - - # Check for geoblocking. - # There is a property is_geoprotection, but that's always false - geo_str = sd.get('geoprotection_string') - if geo_str: - try: - http_url = next( - f['url'] - for f in formats - if re.match(r'^https?://.*\.mp4$', f['url'])) - except StopIteration: - pass - else: - req = HEADRequest(http_url) - self._request_webpage( - req, video_id, - note='Testing for geoblocking', - errnote=(( - 'This video seems to be blocked outside of %s. ' - 'You may want to try the streaming-* formats.') - % geo_str), - fatal=False) - - self._check_formats(formats, video_id) - self._sort_formats(formats) + # site sends ISO8601 GMT date-times with separate TZ offset, ignored + # TODO: should `..._date` be calculated relative to TZ? - subtitles = {} - for sub in sd.get('subtitles', []): - sub_src = sub.get('src') - if not sub_src: - continue - subtitles.setdefault(sub.get('lang', 'de-AT'), []).append({ - 'url': sub_src, - }) - - upload_date = unified_strdate(sd.get('created_date')) + return merge_dicts( + {'_type': 'multi_video'}, + self.playlist_result( + self._entries(data, station), show_id, + txt_or_none(data.get('title')), + clean_html(data.get('subtitle')))) - thumbnails = [] - preview = sd.get('preview_image_url') - if preview: - thumbnails.append({ - 'id': 'preview', - 'url': preview, - 'preference': 0, - }) - image = sd.get('image_full_url') - if not image and len(data_jsb) == 1: - image = self._og_search_thumbnail(webpage) - if image: - thumbnails.append({ - 'id': 'full', - 'url': image, - 'preference': 1, - }) - entries.append({ - '_type': 'video', - 'id': video_id, - 'title': title, - 'formats': formats, - 'subtitles': subtitles, - 'description': sd.get('description'), - 'duration': int_or_none(sd.get('duration_in_seconds')), - 'upload_date': upload_date, - 'thumbnails': thumbnails, - }) - - return { - '_type': 'playlist', - 'entries': entries, - 'id': playlist_id, - } - - -class ORFRadioIE(InfoExtractor): - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - show_date = mobj.group('date') - show_id = mobj.group('show') +class ORFRadioCollectionIE(ORFRadioBase): + IE_NAME = 'orf:collection' + _VALID_URL = r'https?://sound\.orf\.at/collection/(?P<coll_id>\d+)(?:/(?P<item_id>\d+))?' - data = self._download_json( - 'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s' - % (self._API_STATION, show_id, show_date), show_id) - - entries = [] - for info in data['streams']: - loop_stream_id = str_or_none(info.get('loopStreamId')) - if not loop_stream_id: - continue - title = str_or_none(data.get('title')) - if not title: - continue - start = int_or_none(info.get('start'), scale=1000) - end = int_or_none(info.get('end'), scale=1000) - duration = end - start if end and start else None - entries.append({ - 'id': loop_stream_id.replace('.mp3', ''), - 'url': 'https://loopstream01.apa.at/?channel=%s&id=%s' % (self._LOOP_STATION, loop_stream_id), - 'title': title, - 'description': clean_html(data.get('subtitle')), - 'duration': duration, - 'timestamp': start, + _TESTS = [{ + 'url': 'https://sound.orf.at/collection/4/61908/was-das-uberschreiten-des-15-limits-bedeutet', + 'info_dict': { + 'id': '2577582', + }, + 'playlist': [{ + 'md5': '5789cec7d75575ff58d19c0428c80eb3', + 'info_dict': { + 'id': '2024-06-06_1659_tl_54_7DaysThu6_153926', 'ext': 'mp3', - 'series': data.get('programTitle'), - }) - - return { - '_type': 'playlist', - 'id': show_id, - 'title': data.get('title'), - 'description': clean_html(data.get('subtitle')), - 'entries': entries, - } - - -class ORFFM4IE(ORFRadioIE): - IE_NAME = 'orf:fm4' - IE_DESC = 'radio FM4' - _VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>4\w+)' - _API_STATION = 'fm4' - _LOOP_STATION = 'fm4' - - _TEST = { - 'url': 'http://fm4.orf.at/player/20170107/4CC', - 'md5': '2b0be47375432a7ef104453432a19212', + 'title': 'Klimakrise: Was das Überschreiten des 1,5°-Limits bedeutet', + 'timestamp': 1717686674, + 'upload_date': '20240606', + 'uploader': 'fm4', + }, + }], + 'skip': 'Shows from ORF Sound are only available for 30 days.' + }, { + # persistent playlist (FM4 Highlights) + 'url': 'https://sound.orf.at/collection/4/', 'info_dict': { - 'id': '2017-01-07_2100_tl_54_7DaysSat18_31295', - 'ext': 'mp3', - 'title': 'Solid Steel Radioshow', - 'description': 'Die Mixshow von Coldcut und Ninja Tune.', - 'duration': 3599, - 'timestamp': 1483819257, - 'upload_date': '20170107', + 'id': '4', }, - 'skip': 'Shows from ORF radios are only available for 7 days.', - 'only_matching': True, - } - - -class ORFNOEIE(ORFRadioIE): - IE_NAME = 'orf:noe' - IE_DESC = 'Radio Niederösterreich' - _VALID_URL = r'https?://(?P<station>noe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'noe' - _LOOP_STATION = 'oe2n' - - _TEST = { - 'url': 'https://noe.orf.at/player/20200423/NGM', - 'only_matching': True, - } - - -class ORFWIEIE(ORFRadioIE): - IE_NAME = 'orf:wien' - IE_DESC = 'Radio Wien' - _VALID_URL = r'https?://(?P<station>wien)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'wie' - _LOOP_STATION = 'oe2w' - - _TEST = { - 'url': 'https://wien.orf.at/player/20200423/WGUM', - 'only_matching': True, - } - - -class ORFBGLIE(ORFRadioIE): - IE_NAME = 'orf:burgenland' - IE_DESC = 'Radio Burgenland' - _VALID_URL = r'https?://(?P<station>burgenland)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'bgl' - _LOOP_STATION = 'oe2b' - - _TEST = { - 'url': 'https://burgenland.orf.at/player/20200423/BGM', - 'only_matching': True, - } - - -class ORFOOEIE(ORFRadioIE): - IE_NAME = 'orf:oberoesterreich' - IE_DESC = 'Radio Oberösterreich' - _VALID_URL = r'https?://(?P<station>ooe)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'ooe' - _LOOP_STATION = 'oe2o' + 'playlist_mincount': 10, + 'playlist_maxcount': 13, + }] - _TEST = { - 'url': 'https://ooe.orf.at/player/20200423/OGMO', - 'only_matching': True, - } + def _real_extract(self, url): + coll_id, item_id = self._match_valid_url(url).group('coll_id', 'item_id') + data = self._download_json( + 'https://collector.orf.at/api/frontend/collections/{0}?_o=sound.orf.at'.format( + coll_id), coll_id) + data = self._get_api_payload(data, coll_id, in_payload=True) + + def yield_items(): + for item in traverse_obj(data, ( + 'content', 'items', lambda _, v: any(k in v['target']['params'] for k in self._ID_NAMES))): + if item_id is None or item_id == txt_or_none(item.get('id')): + target = item['target'] + typed_item_id = self._get_item_id(target['params']) + station = target['params'].get('station') + item_type = target.get('type') + if typed_item_id and (station or item_type): + yield station, typed_item_id, item_type + if item_id is not None: + break + else: + if item_id is not None: + raise ExtractorError('Item not found in collection', + video_id=coll_id, expected=True) + + def item_playlist(station, typed_item_id, item_type): + if item_type == 'upload': + item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/upload/{0}?_o=sound.orf.at'.format( + typed_item_id), typed_item_id) + elif item_type == 'podcast-episode': + item_data = self._download_json('https://audioapi.orf.at/radiothek/api/2.0/episode/{0}?_o=sound.orf.at'.format( + typed_item_id), typed_item_id) + else: + api_station, _, _ = self.STATION_INFO[station] + item_data = self._download_json( + 'https://audioapi.orf.at/{0}/api/json/5.0/{1}/{2}?_o=sound.orf.at'.format( + api_station, item_type or 'broadcastitem', typed_item_id), typed_item_id) + item_data = self._get_api_payload(item_data, typed_item_id, in_payload=True) -class ORFSTMIE(ORFRadioIE): - IE_NAME = 'orf:steiermark' - IE_DESC = 'Radio Steiermark' - _VALID_URL = r'https?://(?P<station>steiermark)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'stm' - _LOOP_STATION = 'oe2st' + return merge_dicts( + {'_type': 'multi_video'}, + self.playlist_result( + self._entries(item_data, station, item_type), typed_item_id, + txt_or_none(data.get('title')), + clean_html(data.get('subtitle')))) - _TEST = { - 'url': 'https://steiermark.orf.at/player/20200423/STGMS', - 'only_matching': True, - } + def yield_item_entries(): + for station, typed_id, item_type in yield_items(): + yield item_playlist(station, typed_id, item_type) + if item_id is not None: + # coll_id = '/'.join((coll_id, item_id)) + return next(yield_item_entries()) -class ORFKTNIE(ORFRadioIE): - IE_NAME = 'orf:kaernten' - IE_DESC = 'Radio Kärnten' - _VALID_URL = r'https?://(?P<station>kaernten)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'ktn' - _LOOP_STATION = 'oe2k' + return self.playlist_result(yield_item_entries(), coll_id, data.get('title')) - _TEST = { - 'url': 'https://kaernten.orf.at/player/20200423/KGUMO', - 'only_matching': True, - } +class ORFPodcastIE(ORFRadioBase): + IE_NAME = 'orf:podcast' + _STATION_RE = '|'.join(map(re.escape, (x[0] for x in ORFRadioBase.STATION_INFO.values()))) + '|tv' + _VALID_URL = r'https?://sound\.orf\.at/podcast/(?P<station>{0})/(?P<show>[\w-]+)/(?P<id>[\w-]+)'.format(_STATION_RE) + _TESTS = [{ + 'url': 'https://sound.orf.at/podcast/stm/der-kraeutertipp-von-christine-lackner/rotklee', + 'md5': '1f2bab2ba90c2ce0c2754196ea78b35f', + 'info_dict': { + 'id': 'der-kraeutertipp-von-christine-lackner/rotklee', + 'ext': 'mp3', + 'title': 'Rotklee', + 'description': 'In der Natur weit verbreitet - in der Medizin längst anerkennt: Rotklee. Dieser Podcast begleitet die Sendung "Radio Steiermark am Vormittag", Radio Steiermark, 28. Mai 2024.', + 'timestamp': 1716891761, + 'upload_date': '20240528', + 'uploader_id': 'stm_kraeutertipp', + 'uploader': 'ORF Radio Steiermark', + 'duration': 101, + 'series': 'Der Kräutertipp von Christine Lackner', + }, + 'skip': 'ORF podcasts are only available for a limited time' + }] -class ORFSBGIE(ORFRadioIE): - IE_NAME = 'orf:salzburg' - IE_DESC = 'Radio Salzburg' - _VALID_URL = r'https?://(?P<station>salzburg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'sbg' - _LOOP_STATION = 'oe2s' + _ID_NAMES = ('slug', 'guid') - _TEST = { - 'url': 'https://salzburg.orf.at/player/20200423/SGUM', - 'only_matching': True, - } + def _real_extract(self, url): + station, show, show_id = self._match_valid_url(url).group('station', 'show', 'id') + data = self._download_json( + 'https://audioapi.orf.at/radiothek/api/2.0/podcast/{0}/{1}/{2}'.format( + station, show, show_id), show_id) + data = self._get_api_payload(data, show_id, in_payload=True) + return merge_dicts({ + 'id': '/'.join((show, show_id)), + 'ext': 'mp3', + 'vcodec': 'none', + }, self._extract_podcast_upload(data), rev=True) -class ORFTIRIE(ORFRadioIE): - IE_NAME = 'orf:tirol' - IE_DESC = 'Radio Tirol' - _VALID_URL = r'https?://(?P<station>tirol)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'tir' - _LOOP_STATION = 'oe2t' - _TEST = { - 'url': 'https://tirol.orf.at/player/20200423/TGUMO', - 'only_matching': True, - } +class ORFIPTVBase(InfoExtractor): + _TITLE_STRIP_RE = '' + def _extract_video(self, video_id, webpage, fatal=False): -class ORFVBGIE(ORFRadioIE): - IE_NAME = 'orf:vorarlberg' - IE_DESC = 'Radio Vorarlberg' - _VALID_URL = r'https?://(?P<station>vorarlberg)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'vbg' - _LOOP_STATION = 'oe2v' + data = self._download_json( + 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id, + video_id)[0] - _TEST = { - 'url': 'https://vorarlberg.orf.at/player/20200423/VGUM', - 'only_matching': True, - } + video = traverse_obj(data, ( + 'sources', ('default', 'q8c'), + T(lambda x: x if x['loadBalancerUrl'] else None), + any)) + load_balancer_url = video['loadBalancerUrl'] -class ORFOE3IE(ORFRadioIE): - IE_NAME = 'orf:oe3' - IE_DESC = 'Radio Österreich 3' - _VALID_URL = r'https?://(?P<station>oe3)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'oe3' - _LOOP_STATION = 'oe3' + try: + rendition = self._download_json( + load_balancer_url, video_id, transform_source=strip_jsonp) + except ExtractorError: + rendition = None + + if not rendition: + rendition = { + 'redirect': { + 'smil': re.sub( + r'(/)jsonp(/.+\.)mp4$', r'\1dash\2smil/manifest.mpd', + load_balancer_url), + }, + } - _TEST = { - 'url': 'https://oe3.orf.at/player/20200424/3WEK', - 'only_matching': True, - } + f = traverse_obj(video, { + 'abr': ('audioBitrate', T(int_or_none)), + 'vbr': ('bitrate', T(int_or_none)), + 'fps': ('videoFps', T(int_or_none)), + 'width': ('videoWidth', T(int_or_none)), + 'height': ('videoHeight', T(int_or_none)), + }) + formats = [] + for format_id, format_url in traverse_obj(rendition, ( + 'redirect', T(dict.items), Ellipsis)): + if format_id == 'rtmp': + ff = f.copy() + ff.update({ + 'url': format_url, + 'format_id': format_id, + }) + formats.append(ff) + elif determine_ext(format_url) == 'f4m': + formats.extend(self._extract_f4m_formats( + format_url, video_id, f4m_id=format_id)) + elif determine_ext(format_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', m3u8_id=format_id, + entry_protocol='m3u8_native')) + elif determine_ext(format_url) == 'mpd': + formats.extend(self._extract_mpd_formats( + format_url, video_id, mpd_id=format_id)) -class ORFOE1IE(ORFRadioIE): - IE_NAME = 'orf:oe1' - IE_DESC = 'Radio Österreich 1' - _VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/player/(?P<date>[0-9]+)/(?P<show>\w+)' - _API_STATION = 'oe1' - _LOOP_STATION = 'oe1' + if formats or fatal: + self._sort_formats(formats) + else: + return - _TEST = { - 'url': 'http://oe1.orf.at/player/20170108/456544', - 'md5': '34d8a6e67ea888293741c86a099b745b', - 'info_dict': { - 'id': '2017-01-08_0759_tl_51_7DaysSun6_256141', - 'ext': 'mp3', - 'title': 'Morgenjournal', - 'duration': 609, - 'timestamp': 1483858796, - 'upload_date': '20170108', - }, - 'skip': 'Shows from ORF radios are only available for 7 days.' - } + return merge_dicts({ + 'id': video_id, + 'title': re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage)), + 'description': self._og_search_description(webpage), + 'upload_date': unified_strdate(self._html_search_meta( + 'dc.date', webpage, 'upload date', fatal=False)), + 'formats': formats, + }, traverse_obj(data, { + 'duration': ('duration', T(k_float_or_none)), + 'thumbnail': ('sources', 'default', 'preview', T(url_or_none)), + }), rev=True) -class ORFIPTVIE(InfoExtractor): +class ORFIPTVIE(ORFIPTVBase): IE_NAME = 'orf:iptv' IE_DESC = 'iptv.ORF.at' + _WORKING = False # URLs redirect to orf.at/ _VALID_URL = r'https?://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)' + _TITLE_STRIP_RE = r'\s+-\s+iptv\.ORF\.at\S*$' _TEST = { 'url': 'http://iptv.orf.at/stories/2275236/', @@ -426,74 +413,32 @@ class ORFIPTVIE(InfoExtractor): video_id = self._search_regex( r'data-video(?:id)?="(\d+)"', webpage, 'video id') - data = self._download_json( - 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id, - video_id)[0] - - duration = float_or_none(data['duration'], 1000) + return self._extract_video(video_id, webpage) - video = data['sources']['default'] - load_balancer_url = video['loadBalancerUrl'] - abr = int_or_none(video.get('audioBitrate')) - vbr = int_or_none(video.get('bitrate')) - fps = int_or_none(video.get('videoFps')) - width = int_or_none(video.get('videoWidth')) - height = int_or_none(video.get('videoHeight')) - thumbnail = video.get('preview') - - rendition = self._download_json( - load_balancer_url, video_id, transform_source=strip_jsonp) - - f = { - 'abr': abr, - 'vbr': vbr, - 'fps': fps, - 'width': width, - 'height': height, - } - formats = [] - for format_id, format_url in rendition['redirect'].items(): - if format_id == 'rtmp': - ff = f.copy() - ff.update({ - 'url': format_url, - 'format_id': format_id, - }) - formats.append(ff) - elif determine_ext(format_url) == 'f4m': - formats.extend(self._extract_f4m_formats( - format_url, video_id, f4m_id=format_id)) - elif determine_ext(format_url) == 'm3u8': - formats.extend(self._extract_m3u8_formats( - format_url, video_id, 'mp4', m3u8_id=format_id)) - else: - continue - self._sort_formats(formats) - - title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at') - description = self._og_search_description(webpage) - upload_date = unified_strdate(self._html_search_meta( - 'dc.date', webpage, 'upload date')) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'duration': duration, - 'thumbnail': thumbnail, - 'upload_date': upload_date, - 'formats': formats, - } - - -class ORFFM4StoryIE(InfoExtractor): +class ORFFM4StoryIE(ORFIPTVBase): IE_NAME = 'orf:fm4:story' IE_DESC = 'fm4.orf.at stories' _VALID_URL = r'https?://fm4\.orf\.at/stories/(?P<id>\d+)' + _TITLE_STRIP_RE = r'\s+-\s+fm4\.ORF\.at\s*$' - _TEST = { + _TESTS = [{ + 'url': 'https://fm4.orf.at/stories/3041554/', + 'add_ie': ['Youtube'], + 'info_dict': { + 'id': '3041554', + 'title': 'Is The EU Green Deal In Mortal Danger?', + }, + 'playlist_count': 4, + 'params': { + 'format': 'bestvideo', + }, + }, { 'url': 'http://fm4.orf.at/stories/2865738/', + 'info_dict': { + 'id': '2865738', + 'title': 'Manu Delago und Inner Tongue live', + }, 'playlist': [{ 'md5': 'e1c2c706c45c7b34cf478bbf409907ca', 'info_dict': { @@ -510,83 +455,311 @@ class ORFFM4StoryIE(InfoExtractor): 'info_dict': { 'id': '547798', 'ext': 'flv', - 'title': 'Manu Delago und Inner Tongue live (2)', + 'title': 'Manu Delago und Inner Tongue https://vod-ww.mdn.ors.at/cms-worldwide_episodes_nas/_definst_/nas/cms-worldwide_episodes/online/14228823_0005.smil/chunklist_b992000_vo.m3u8live (2)', 'duration': 1504.08, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170913', 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', }, }], - } + 'skip': 'Videos gone', + }] def _real_extract(self, url): story_id = self._match_id(url) webpage = self._download_webpage(url, story_id) entries = [] - all_ids = orderedSet(re.findall(r'data-video(?:id)?="(\d+)"', webpage)) - for idx, video_id in enumerate(all_ids): - data = self._download_json( - 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id, - video_id)[0] + seen_ids = set() + for idx, video_id in enumerate(re.findall(r'data-video(?:id)?="(\d+)"', webpage)): + if video_id in seen_ids: + continue + seen_ids.add(video_id) + entry = self._extract_video(video_id, webpage, fatal=False) + if not entry: + continue + + if idx >= 1: + # Titles are duplicates, make them unique + entry['title'] = '%s (%d)' % (entry['title'], idx) - duration = float_or_none(data['duration'], 1000) + entries.append(entry) - video = data['sources']['q8c'] - load_balancer_url = video['loadBalancerUrl'] - abr = int_or_none(video.get('audioBitrate')) - vbr = int_or_none(video.get('bitrate')) - fps = int_or_none(video.get('videoFps')) - width = int_or_none(video.get('videoWidth')) - height = int_or_none(video.get('videoHeight')) - thumbnail = video.get('preview') + seen_ids = set() + for yt_id in re.findall( + r'data-id\s*=\s*["\']([\w-]+)[^>]+\bclass\s*=\s*["\']youtube\b', + webpage): + if yt_id in seen_ids: + continue + seen_ids.add(yt_id) + if YoutubeIE.suitable(yt_id): + entries.append(self.url_result(yt_id, ie='Youtube', video_id=yt_id)) + + return self.playlist_result( + entries, story_id, + re.sub(self._TITLE_STRIP_RE, '', self._og_search_title(webpage, default='') or None)) + + +class ORFONBase(InfoExtractor): + _ENC_PFX = '3dSlfek03nsLKdj4Jsd' + _API_PATH = 'episode' + + def _call_api(self, video_id, **kwargs): + encrypted_id = base64.b64encode('{0}{1}'.format( + self._ENC_PFX, video_id).encode('utf-8')).decode('ascii') + return self._download_json( + 'https://api-tvthek.orf.at/api/v4.3/public/{0}/encrypted/{1}'.format( + self._API_PATH, encrypted_id), + video_id, **kwargs) + + @classmethod + def _parse_metadata(cls, api_json): + return traverse_obj(api_json, { + 'id': ('id', T(int), T(txt_or_none)), + 'age_limit': ('age_classification', T(parse_age_limit)), + 'duration': ((('exact_duration', T(k_float_or_none)), + ('duration_second', T(float_or_none))),), + 'title': (('title', 'headline'), T(txt_or_none)), + 'description': (('description', 'teaser_text'), T(txt_or_none)), + # 'media_type': ('video_type', T(txt_or_none)), + 'thumbnail': ('_embedded', 'image', 'public_urls', 'highlight_teaser', 'url', T(url_or_none)), + 'timestamp': (('date', 'episode_date'), T(parse_iso8601)), + 'release_timestamp': ('release_date', T(parse_iso8601)), + # 'modified_timestamp': ('updated_at', T(parse_iso8601)), + }, get_all=False) + + def _extract_video(self, video_id, segment_id): + # Not a segmented episode: return single video + # Segmented episode without valid segment id: return entire playlist + # Segmented episode with valid segment id and yes-playlist: return entire playlist + # Segmented episode with valid segment id and no-playlist: return single video corresponding to segment id + # If a multi_video playlist would be returned, but an unsegmented source exists, that source is chosen instead. + + api_json = self._call_api(video_id) + + if traverse_obj(api_json, 'is_drm_protected'): + self.report_drm(video_id) + + # updates formats, subtitles + def extract_sources(src_json, video_id): + for manifest_type in traverse_obj(src_json, ('sources', T(dict.keys), Ellipsis)): + for manifest_url in traverse_obj(src_json, ('sources', manifest_type, Ellipsis, 'src', T(url_or_none))): + if manifest_type == 'hls': + fmts, subs = self._extract_m3u8_formats( + manifest_url, video_id, fatal=False, m3u8_id='hls', + ext='mp4', entry_protocol='m3u8_native'), {} + for f in fmts: + if '_vo.' in f['url']: + f['acodec'] = 'none' + elif manifest_type == 'dash': + fmts, subs = self._extract_mpd_formats_and_subtitles( + manifest_url, video_id, fatal=False, mpd_id='dash') + else: + continue + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) + + formats, subtitles = [], {} + if segment_id is None: + extract_sources(api_json, video_id) + if not formats: + segments = traverse_obj(api_json, ( + '_embedded', 'segments', lambda _, v: v['id'])) + if len(segments) > 1 and segment_id is not None: + if not self._yes_playlist(video_id, segment_id, playlist_label='collection', video_label='segment'): + segments = [next(s for s in segments if txt_or_none(s['id']) == segment_id)] + + entries = [] + for seg in segments: + formats, subtitles = [], {} + extract_sources(seg, segment_id) + self._sort_formats(formats) + entries.append(merge_dicts({ + 'formats': formats, + 'subtitles': subtitles, + }, self._parse_metadata(seg), rev=True)) + result = merge_dicts( + {'_type': 'multi_video' if len(entries) > 1 else 'playlist'}, + self._parse_metadata(api_json), + self.playlist_result(entries, video_id)) + # not yet processed in core for playlist/multi + self._downloader._fill_common_fields(result) + return result + else: + self._sort_formats(formats) - rendition = self._download_json( - load_balancer_url, video_id, transform_source=strip_jsonp) + for sub_url in traverse_obj(api_json, ( + '_embedded', 'subtitle', + ('xml_url', 'sami_url', 'stl_url', 'ttml_url', 'srt_url', 'vtt_url'), + T(url_or_none))): + self._merge_subtitles({'de': [{'url': sub_url}]}, target=subtitles) - f = { - 'abr': abr, - 'vbr': vbr, - 'fps': fps, - 'width': width, - 'height': height, - } + return merge_dicts({ + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + # '_old_archive_ids': [self._downloader._make_archive_id({'ie_key': 'ORFTVthek', 'id': video_id})], + }, self._parse_metadata(api_json), rev=True) - formats = [] - for format_id, format_url in rendition['redirect'].items(): - if format_id == 'rtmp': - ff = f.copy() - ff.update({ - 'url': format_url, - 'format_id': format_id, - }) - formats.append(ff) - elif determine_ext(format_url) == 'f4m': - formats.extend(self._extract_f4m_formats( - format_url, video_id, f4m_id=format_id)) - elif determine_ext(format_url) == 'm3u8': - formats.extend(self._extract_m3u8_formats( - format_url, video_id, 'mp4', m3u8_id=format_id)) - else: - continue - self._sort_formats(formats) + def _real_extract(self, url): + video_id, segment_id = self._match_valid_url(url).group('id', 'segment') + webpage = self._download_webpage(url, video_id) - title = remove_end(self._og_search_title(webpage), ' - fm4.ORF.at') - if idx >= 1: - # Titles are duplicates, make them unique - title += ' (' + str(idx + 1) + ')' - description = self._og_search_description(webpage) - upload_date = unified_strdate(self._html_search_meta( - 'dc.date', webpage, 'upload date')) - - entries.append({ - 'id': video_id, - 'title': title, - 'description': description, - 'duration': duration, - 'thumbnail': thumbnail, - 'upload_date': upload_date, - 'formats': formats, - }) - - return self.playlist_result(entries) + # ORF doesn't like 410 or 404 + if self._search_regex(r'<div\b[^>]*>\s*(Nicht mehr verfügbar)\s*</div>', webpage, 'Availability', default=False): + raise ExtractorError('Content is no longer available', expected=True, video_id=video_id) + + return merge_dicts({ + 'id': video_id, + 'title': self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None), + 'description': self._html_search_meta( + ['description', 'og:description', 'twitter:description'], webpage, default=None), + }, self._search_json_ld(webpage, video_id, default={}), + self._extract_video(video_id, segment_id), + rev=True) + + +class ORFONIE(ORFONBase): + IE_NAME = 'orf:on' + _VALID_URL = r'https?://on\.orf\.at/video/(?P<id>\d+)(?:/(?P<segment>\d+))?' + _TESTS = [{ + 'url': 'https://on.orf.at/video/14210000/school-of-champions-48', + 'info_dict': { + 'id': '14210000', + 'ext': 'mp4', + 'duration': 2651.08, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0167/98/thumb_16697671_segments_highlight_teaser.jpeg', + 'title': 'School of Champions (4/8)', + 'description': r're:(?s)Luca hat sein ganzes Leben in den Bergen Südtirols verbracht und ist bei seiner Mutter aufgewachsen, .{1029} Leo$', + # 'media_type': 'episode', + 'timestamp': 1706558922, + 'upload_date': '20240129', + 'release_timestamp': 1706472362, + 'release_date': '20240128', + # 'modified_timestamp': 1712756663, + # 'modified_date': '20240410', + # '_old_archive_ids': ['orftvthek 14210000'], + }, + 'params': { + 'format': 'bestvideo', + }, + 'skip': 'Available until 2024-08-12', + }, { + 'url': 'https://on.orf.at/video/3220355', + 'md5': '925a93b2b9a37da5c9b979d7cf71aa2e', + 'info_dict': { + 'id': '3220355', + 'ext': 'mp4', + 'duration': 445.04, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0002/60/thumb_159573_segments_highlight_teaser.png', + 'title': '50 Jahre Burgenland: Der Festumzug', + 'description': r're:(?s)Aus allen Landesteilen zogen festlich geschmückte Wagen und Musikkapellen .{270} Jenakowitsch$', + # 'media_type': 'episode', + 'timestamp': 52916400, + 'upload_date': '19710905', + 'release_timestamp': 52916400, + 'release_date': '19710905', + # 'modified_timestamp': 1498536049, + # 'modified_date': '20170627', + # '_old_archive_ids': ['orftvthek 3220355'], + }, + }, { + # Video with multiple segments selecting the second segment + 'url': 'https://on.orf.at/video/14226549/15639808/jugendbande-einbrueche-aus-langeweile', + 'md5': 'fc151bba8c05ea77ab5693617e4a33d3', + 'info_dict': { + 'id': '15639808', + 'ext': 'mp4', + 'duration': 97.707, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0175/43/thumb_17442704_segments_highlight_teaser.jpg', + 'title': 'Jugendbande: Einbrüche aus Langeweile', + 'description': r're:Jugendbande: Einbrüche aus Langeweile \| Neuer Kinder- und .{259} Wanda$', + # 'media_type': 'segment', + 'timestamp': 1715792400, + 'upload_date': '20240515', + # 'modified_timestamp': 1715794394, + # 'modified_date': '20240515', + # '_old_archive_ids': ['orftvthek 15639808'], + }, + 'params': { + 'noplaylist': True, + 'format': 'bestvideo', + }, + 'skip': 'Available until 2024-06-14', + }, { + # Video with multiple segments and no combined version + 'url': 'https://on.orf.at/video/14227864/formel-1-grosser-preis-von-monaco-2024', + 'info_dict': { + '_type': 'multi_video', + 'id': '14227864', + 'duration': 18410.52, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/04/thumb_17503881_segments_highlight_teaser.jpg', + 'title': 'Formel 1: Großer Preis von Monaco 2024', + 'description': 'md5:aeeb010710ccf70ce28ccb4482243d4f', + # 'media_type': 'episode', + 'timestamp': 1716721200, + 'upload_date': '20240526', + 'release_timestamp': 1716721802, + 'release_date': '20240526', + # 'modified_timestamp': 1716884702, + # 'modified_date': '20240528', + }, + 'playlist_count': 42, + 'skip': 'Gone: Nicht mehr verfügbar', + }, { + # Video with multiple segments, but with combined version + 'url': 'https://on.orf.at/video/14228172', + 'info_dict': { + 'id': '14228172', + 'ext': 'mp4', + 'duration': 3294.878, + 'thumbnail': 'https://api-tvthek.orf.at/assets/segments/0176/29/thumb_17528242_segments_highlight_teaser.jpg', + 'title': 'Willkommen Österreich mit Stermann & Grissemann', + 'description': r're:Zum Saisonfinale freuen sich die urlaubsreifen Gastgeber Stermann und .{1863} Geschichten\.$', + # 'media_type': 'episode', + 'timestamp': 1716926584, + 'upload_date': '20240528', + 'release_timestamp': 1716919202, + 'release_date': '20240528', + # 'modified_timestamp': 1716968045, + # 'modified_date': '20240529', + # '_old_archive_ids': ['orftvthek 14228172'], + }, + 'params': { + 'format': 'bestvideo', + }, + 'skip': 'Gone: Nicht mehr verfügbar', + }] + + +class ORFONLiveIE(ORFONBase): + _ENC_PFX = '8876324jshjd7293ktd' + _API_PATH = 'livestream' + _VALID_URL = r'https?://on\.orf\.at/livestream/(?P<id>\d+)(?:/(?P<segment>\d+))?' + _TESTS = [{ + 'url': 'https://on.orf.at/livestream/14320204/pressekonferenz-neos-zu-aktuellen-entwicklungen', + 'info_dict': { + 'id': '14320204', + 'ext': 'mp4', + 'title': 'Pressekonferenz: Neos zu aktuellen Entwicklungen', + 'description': r're:(?s)Neos-Chefin Beate Meinl-Reisinger informi.{598}ng\."', + 'timestamp': 1716886335, + 'upload_date': '20240528', + # 'modified_timestamp': 1712756663, + # 'modified_date': '20240410', + # '_old_archive_ids': ['orftvthek 14210000'], + }, + 'params': { + 'format': 'bestvideo', + }, + }] + + @classmethod + def _parse_metadata(cls, api_json): + return merge_dicts( + super(ORFONLiveIE, cls)._parse_metadata(api_json), + traverse_obj(api_json, { + 'timestamp': ('updated_at', T(parse_iso8601)), + 'release_timestamp': ('start', T(parse_iso8601)), + 'is_live': True, + })) diff --git a/youtube_dl/extractor/palcomp3.py b/youtube_dl/extractor/palcomp3.py index fb29d83f9..60f7a4d48 100644 --- a/youtube_dl/extractor/palcomp3.py +++ b/youtube_dl/extractor/palcomp3.py @@ -8,7 +8,7 @@ from ..compat import compat_str from ..utils import ( int_or_none, str_or_none, - try_get, + traverse_obj, ) @@ -109,7 +109,7 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE): } name''' - @ classmethod + @classmethod def suitable(cls, url): return False if re.match(PalcoMP3IE._VALID_URL, url) else super(PalcoMP3ArtistIE, cls).suitable(url) @@ -118,7 +118,8 @@ class PalcoMP3ArtistIE(PalcoMP3BaseIE): artist = self._call_api(artist_slug, self._ARTIST_FIELDS_TMPL)['artist'] def entries(): - for music in (try_get(artist, lambda x: x['musics']['nodes'], list) or []): + for music in traverse_obj(artist, ( + 'musics', 'nodes', lambda _, m: m['musicID'])): yield self._parse_music(music) return self.playlist_result( @@ -137,7 +138,7 @@ class PalcoMP3VideoIE(PalcoMP3BaseIE): 'title': 'Maiara e Maraisa - Você Faz Falta Aqui - DVD Ao Vivo Em Campo Grande', 'description': 'md5:7043342c09a224598e93546e98e49282', 'upload_date': '20161107', - 'uploader_id': 'maiaramaraisaoficial', + 'uploader_id': '@maiaramaraisaoficial', 'uploader': 'Maiara e Maraisa', } }] diff --git a/youtube_dl/extractor/senateisvp.py b/youtube_dl/extractor/senateisvp.py index db5ef8b57..b8ac58713 100644 --- a/youtube_dl/extractor/senateisvp.py +++ b/youtube_dl/extractor/senateisvp.py @@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor): ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'], ['arch', '', 'http://ussenate-f.akamaihd.net/'] ] - _IE_NAME = 'senate.gov' + IE_NAME = 'senate.gov' _VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)' _TESTS = [{ 'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png', diff --git a/youtube_dl/extractor/vidlii.py b/youtube_dl/extractor/vidlii.py index f4774256b..47f328e87 100644 --- a/youtube_dl/extractor/vidlii.py +++ b/youtube_dl/extractor/vidlii.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor + from ..utils import ( float_or_none, get_element_by_id, @@ -11,6 +12,7 @@ from ..utils import ( strip_or_none, unified_strdate, urljoin, + str_to_int, ) @@ -36,6 +38,26 @@ class VidLiiIE(InfoExtractor): 'tags': ['Vidlii', 'Jan', 'Videogames'], } }, { + # HD + 'url': 'https://www.vidlii.com/watch?v=2Ng8Abj2Fkl', + 'md5': '450e7da379c884788c3a4fa02a3ce1a4', + 'info_dict': { + 'id': '2Ng8Abj2Fkl', + 'ext': 'mp4', + 'title': 'test', + 'description': 'md5:cc55a86032a7b6b3cbfd0f6b155b52e9', + 'thumbnail': 'https://www.vidlii.com/usfi/thmp/2Ng8Abj2Fkl.jpg', + 'uploader': 'VidLii', + 'uploader_url': 'https://www.vidlii.com/user/VidLii', + 'upload_date': '20200927', + 'duration': 5, + 'view_count': int, + 'comment_count': int, + 'average_rating': float, + 'categories': ['Film & Animation'], + 'tags': list, + }, + }, { 'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0', 'only_matching': True, }] @@ -46,11 +68,32 @@ class VidLiiIE(InfoExtractor): webpage = self._download_webpage( 'https://www.vidlii.com/watch?v=%s' % video_id, video_id) - video_url = self._search_regex( - r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage, - 'video url', group='url') + formats = [] + + def add_format(format_url, height=None): + height = int(self._search_regex(r'(\d+)\.mp4', + format_url, 'height', default=360)) + + formats.append({ + 'url': format_url, + 'format_id': '%dp' % height if height else None, + 'height': height, + }) + + sources = re.findall( + r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', + webpage) + + formats = [] + if len(sources) > 1: + add_format(sources[1][1]) + self._check_formats(formats, video_id) + if len(sources) > 0: + add_format(sources[0][1]) + + self._sort_formats(formats) - title = self._search_regex( + title = self._html_search_regex( (r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage, 'title') @@ -82,9 +125,9 @@ class VidLiiIE(InfoExtractor): default=None) or self._search_regex( r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) - view_count = int_or_none(self._search_regex( - (r'<strong>(\d+)</strong> views', - r'Views\s*:\s*<strong>(\d+)</strong>'), + view_count = str_to_int(self._html_search_regex( + (r'<strong>([\d,.]+)</strong> views', + r'Views\s*:\s*<strong>([\d,.]+)</strong>'), webpage, 'view count', fatal=False)) comment_count = int_or_none(self._search_regex( @@ -109,7 +152,7 @@ class VidLiiIE(InfoExtractor): return { 'id': video_id, - 'url': video_url, + 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py index 84969f8e1..8da5b430f 100644 --- a/youtube_dl/extractor/yandexmusic.py +++ b/youtube_dl/extractor/yandexmusic.py @@ -106,6 +106,25 @@ class YandexMusicTrackIE(YandexMusicBaseIE): }, { 'url': 'http://music.yandex.com/album/540508/track/4878838', 'only_matching': True, + }, { + 'url': 'https://music.yandex.ru/album/16302456/track/85430762', + 'md5': '11b8d50ab03b57738deeaadf661a0a48', + 'info_dict': { + 'id': '85430762', + 'ext': 'mp3', + 'abr': 128, + 'title': 'Haddadi Von Engst, Phonic Youth, Super Flu - Til The End (Super Flu Remix)', + 'filesize': int, + 'duration': 431.14, + 'track': 'Til The End (Super Flu Remix)', + 'album': 'Til The End', + 'album_artist': 'Haddadi Von Engst, Phonic Youth', + 'artist': 'Haddadi Von Engst, Phonic Youth, Super Flu', + 'release_year': 2021, + 'genre': 'house', + 'disc_number': 1, + 'track_number': 2, + } }] def _real_extract(self, url): @@ -116,10 +135,14 @@ class YandexMusicTrackIE(YandexMusicBaseIE): 'track', tld, url, track_id, 'Downloading track JSON', {'track': '%s:%s' % (track_id, album_id)})['track'] track_title = track['title'] + track_version = track.get('version') + if track_version: + track_title = '%s (%s)' % (track_title, track_version) download_data = self._download_json( 'https://music.yandex.ru/api/v2.1/handlers/track/%s:%s/web-album_track-track-track-main/download/m' % (track_id, album_id), track_id, 'Downloading track location url JSON', + query={'hq': 1}, headers={'X-Retpath-Y': url}) fd_data = self._download_json( diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 90c16e172..b31798729 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -3,11 +3,14 @@ from __future__ import unicode_literals import collections +import hashlib import itertools import json import os.path import random import re +import string +import time import traceback from .common import InfoExtractor, SearchInfoExtractor @@ -24,11 +27,14 @@ from ..compat import ( ) from ..jsinterp import JSInterpreter from ..utils import ( + bug_reports_message, clean_html, dict_get, error_to_compat_str, ExtractorError, + filter_dict, float_or_none, + get_first, extract_attributes, get_element_by_attribute, int_or_none, @@ -43,6 +49,7 @@ from ..utils import ( parse_duration, parse_qs, qualities, + remove_end, remove_start, smuggle_url, str_or_none, @@ -60,11 +67,13 @@ from ..utils import ( url_or_none, urlencode_postdata, urljoin, + variadic, ) class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" + _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' @@ -78,9 +87,66 @@ class YoutubeBaseInfoExtractor(InfoExtractor): _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)' + _INNERTUBE_CLIENTS = { + 'ios': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS', + 'clientVersion': '20.10.4', + 'deviceMake': 'Apple', + 'deviceModel': 'iPhone16,2', + 'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)', + 'osName': 'iPhone', + 'osVersion': '18.3.2.22D82', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 5, + 'REQUIRE_JS_PLAYER': False, + 'REQUIRE_PO_TOKEN': True, + }, + # mweb has 'ultralow' formats + # See: https://github.com/yt-dlp/yt-dlp/pull/557 + 'mweb': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'MWEB', + 'clientVersion': '2.20250311.03.00', + # mweb previously did not require PO Token with this UA + 'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 2, + 'REQUIRE_PO_TOKEN': True, + 'SUPPORTS_COOKIES': True, + }, + 'tv': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'TVHTML5', + 'clientVersion': '7.20250312.16.00', + 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 7, + 'SUPPORTS_COOKIES': True, + }, + 'web': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'WEB', + 'clientVersion': '2.20250312.04.00', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 1, + 'REQUIRE_PO_TOKEN': True, + 'SUPPORTS_COOKIES': True, + }, + } + def _login(self): """ Attempt to log in to YouTube. + True is returned if successful or skipped. False is returned if login failed. @@ -136,7 +202,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], - 1, [None, None, []], None, None, None, True + 1, [None, None, []], None, None, None, True, ], username, ] @@ -158,7 +224,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): None, 1, None, [1, None, None, None, [password, None, True]], [ None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], - 1, [None, None, []], None, None, None, True + 1, [None, None, []], None, None, None, True, ]] challenge_results = req( @@ -211,7 +277,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): user_hash, None, 2, None, [ 9, None, None, None, None, None, None, None, - [None, tfa_code, True, 2] + [None, tfa_code, True, 2], ]] tfa_results = req( @@ -277,32 +343,57 @@ class YoutubeBaseInfoExtractor(InfoExtractor): if not self._login(): return - _DEFAULT_API_DATA = { - 'context': { - 'client': { - 'clientName': 'WEB', - 'clientVersion': '2.20201021.03.00', - } - }, - } + _DEFAULT_API_DATA = {'context': _INNERTUBE_CLIENTS['web']['INNERTUBE_CONTEXT']} _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)' - def _call_api(self, ep, query, video_id, fatal=True, headers=None): + _SAPISID = None + + def _generate_sapisidhash_header(self, origin='https://www.youtube.com'): + time_now = round(time.time()) + if self._SAPISID is None: + yt_cookies = self._get_cookies('https://www.youtube.com') + # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is. + # See: https://github.com/yt-dlp/yt-dlp/issues/393 + sapisid_cookie = dict_get( + yt_cookies, ('__Secure-3PAPISID', 'SAPISID')) + if sapisid_cookie and sapisid_cookie.value: + self._SAPISID = sapisid_cookie.value + self.write_debug('Extracted SAPISID cookie') + # SAPISID cookie is required if not already present + if not yt_cookies.get('SAPISID'): + self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie') + self._set_cookie( + '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600) + else: + self._SAPISID = False + if not self._SAPISID: + return None + # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323 + sapisidhash = hashlib.sha1( + '{0} {1} {2}'.format(time_now, self._SAPISID, origin).encode('utf-8')).hexdigest() + return 'SAPISIDHASH {0}_{1}'.format(time_now, sapisidhash) + + def _call_api(self, ep, query, video_id, fatal=True, headers=None, + note='Downloading API JSON'): data = self._DEFAULT_API_DATA.copy() data.update(query) real_headers = {'content-type': 'application/json'} if headers: real_headers.update(headers) + # was: 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8' + api_key = self.get_param('youtube_innertube_key') return self._download_json( 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id, - note='Downloading API JSON', errnote='Unable to download API page', + note=note, errnote='Unable to download API page', data=json.dumps(data).encode('utf8'), fatal=fatal, - headers=real_headers, - query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'}) + headers=real_headers, query=filter_dict({ + 'key': api_key, + 'prettyPrint': 'false', + })) def _extract_yt_initial_data(self, video_id, webpage): return self._parse_json( @@ -311,6 +402,22 @@ class YoutubeBaseInfoExtractor(InfoExtractor): self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'), video_id) + def _extract_visitor_data(self, *args): + """ + Extract visitorData from an API response or ytcfg + + Appears to be used to track session state + """ + visitor_data = self.get_param('youtube_visitor_data') + if visitor_data: + return visitor_data + + return get_first( + args, (('VISITOR_DATA', + ('INNERTUBE_CONTEXT', 'client', 'visitorData'), + ('responseContext', 'visitorData')), + T(compat_str))) + def _extract_ytcfg(self, video_id, webpage): return self._parse_json( self._search_regex( @@ -350,13 +457,33 @@ class YoutubeBaseInfoExtractor(InfoExtractor): 'uploader': uploader, } + @staticmethod + def _extract_thumbnails(data, *path_list, **kw_final_key): + """ + Extract thumbnails from thumbnails dict + @param path_list: path list to level that contains 'thumbnails' key + """ + final_key = kw_final_key.get('final_key', 'thumbnails') + + return traverse_obj(data, (( + tuple(variadic(path) + (final_key, Ellipsis) + for path in path_list or [()])), { + 'url': ('url', T(url_or_none), + # Sometimes youtube gives a wrong thumbnail URL. See: + # https://github.com/yt-dlp/yt-dlp/issues/233 + # https://github.com/ytdl-org/youtube-dl/issues/28023 + T(lambda u: update_url(u, query=None) if u and 'maxresdefault' in u else u)), + 'height': ('height', T(int_or_none)), + 'width': ('width', T(int_or_none)), + }, T(lambda t: t if t.get('url') else None))) + def _search_results(self, query, params): data = { 'context': { 'client': { 'clientName': 'WEB', 'clientVersion': '2.20201021.03.00', - } + }, }, 'query': query, } @@ -364,11 +491,15 @@ class YoutubeBaseInfoExtractor(InfoExtractor): data['params'] = params for page_num in itertools.count(1): search = self._download_json( - 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'https://www.youtube.com/youtubei/v1/search', video_id='query "%s"' % query, note='Downloading page %s' % page_num, errnote='Unable to download API page', fatal=False, data=json.dumps(data).encode('utf8'), + query={ + # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'prettyPrint': 'false', + }, headers={'content-type': 'application/json'}) if not search: break @@ -433,7 +564,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): # (HTML, videodetails, metadata, renderers) 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']), 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl', - ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']) + ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']), } if any((videodetails, metadata, renderers)): result = ( @@ -559,9 +690,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'invidious': '|'.join(_INVIDIOUS_SITES), } _PLAYER_INFO_RE = ( - r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player', - r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', - r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$', + r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/(?:tv-)?player', + r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', + r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$', ) _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') @@ -642,7 +773,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO', 'description': '', 'uploader': '8KVIDEO', - 'title': 'UHDTV TEST 8K VIDEO.mp4' + 'title': 'UHDTV TEST 8K VIDEO.mp4', }, 'params': { 'youtube_include_dash_manifest': True, @@ -682,7 +813,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist', 'title': 'Burning Everyone\'s Koran', 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', - } + }, }, # Age-gated videos { @@ -810,7 +941,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): }, 'expected_warnings': [ 'DASH manifest missing', - ] + ], }, # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431) { @@ -1454,6 +1585,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, } + _PLAYER_JS_VARIANT_MAP = ( + ('main', 'player_ias.vflset/en_US/base.js'), + ('tce', 'player_ias_tce.vflset/en_US/base.js'), + ('tv', 'tv-player-ias.vflset/tv-player-ias.js'), + ('tv_es6', 'tv-player-es6.vflset/tv-player-es6.js'), + ('phone', 'player-plasma-ias-phone-en_US.vflset/base.js'), + ('tablet', 'player-plasma-ias-tablet-en_US.vflset/base.js'), + ) + @classmethod def suitable(cls, url): if parse_qs(url).get('list', [None])[0]: @@ -1493,46 +1633,97 @@ class YoutubeIE(YoutubeBaseInfoExtractor): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) - @classmethod - def _extract_player_info(cls, player_url): - for player_re in cls._PLAYER_INFO_RE: - id_m = re.search(player_re, player_url) - if id_m: - break - else: - raise ExtractorError('Cannot identify player %r' % player_url) - return id_m.group('id') + def _extract_player_info(self, player_url): + try: + return self._search_regex( + self._PLAYER_INFO_RE, player_url, 'player info', group='id') + except ExtractorError as e: + raise ExtractorError( + 'Cannot identify player %r' % (player_url,), cause=e) - def _load_player(self, video_id, player_url, fatal=True, player_id=None): - if not player_id: + def _player_js_cache_key(self, player_url, extra_id=None, _cache={}): + if player_url not in _cache: player_id = self._extract_player_info(player_url) - if player_id not in self._code_cache: + player_path = remove_start( + compat_urllib_parse.urlparse(player_url).path, + '/s/player/{0}/'.format(player_id)) + variant = next((k for k, v in self._PLAYER_JS_VARIANT_MAP + if v == player_path), None) + if not variant: + variant = next( + (k for k, v in self._PLAYER_JS_VARIANT_MAP + if re.match(re.escape(v).replace('en_US', r'\w+') + '$', player_path)), + None) + if not variant: + self.write_debug( + 'Unable to determine player JS variant\n' + ' player = {0}'.format(player_url), only_once=True) + variant = re.sub(r'[^a-zA-Z0-9]', '_', remove_end(player_path, '.js')) + _cache[player_url] = join_nonempty(player_id, variant) + + if extra_id: + extra_id = '-'.join((_cache[player_url], extra_id)) + assert os.path.basename(extra_id) == extra_id + return extra_id + return _cache[player_url] + + def _load_player(self, video_id, player_url, fatal=True): + player_js_key = self._player_js_cache_key(player_url) + if player_js_key not in self._code_cache: code = self._download_webpage( player_url, video_id, fatal=fatal, - note='Downloading player ' + player_id, - errnote='Download of %s failed' % player_url) + note='Downloading player {0}'.format(player_js_key), + errnote='Download of {0} failed'.format(player_url)) if code: - self._code_cache[player_id] = code - return self._code_cache[player_id] if fatal else self._code_cache.get(player_id) + self._code_cache[player_js_key] = code + return self._code_cache.get(player_js_key) + + def _load_player_data_from_cache(self, name, player_url, extra_id=None): + cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id)) + data = self._player_cache.get(cache_id) + if data: + return data + + data = self.cache.load(*cache_id, min_ver='2025.04.07') + if data: + self._player_cache[cache_id] = data + return data + + def _store_player_data_to_cache(self, name, player_url, data, extra_id=None): + cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id)) + + if cache_id not in self._player_cache: + self.cache.store(cache_id[0], cache_id[1], data) + self._player_cache[cache_id] = data + + def _remove_player_data_from_cache(self, name, player_url, extra_id=None): + cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id)) + + if cache_id in self._player_cache: + self.cache.clear(*cache_id) + self._player_cache.pop(cache_id, None) def _extract_signature_function(self, video_id, player_url, example_sig): - player_id = self._extract_player_info(player_url) + # player_id = self._extract_player_info(player_url) # Read from filesystem cache - func_id = 'js_{0}_{1}'.format( - player_id, self._signature_cache_id(example_sig)) - assert os.path.basename(func_id) == func_id - - self.write_debug('Extracting signature function {0}'.format(func_id)) - cache_spec, code = self.cache.load('youtube-sigfuncs', func_id), None + extra_id = self._signature_cache_id(example_sig) + self.write_debug('Extracting signature function {0}-{1}'.format(player_url, extra_id)) + cache_spec, code = self._load_player_data_from_cache( + 'sigfuncs', player_url, extra_id=extra_id), None if not cache_spec: - code = self._load_player(video_id, player_url, player_id) - if code: - res = self._parse_sig_js(code) - test_string = ''.join(map(compat_chr, range(len(example_sig)))) - cache_spec = [ord(c) for c in res(test_string)] - self.cache.store('youtube-sigfuncs', func_id, cache_spec) + code = self._load_player(video_id, player_url) + if code: + res = self._parse_sig_js(code) + test_string = ''.join(map(compat_chr, range(len(example_sig)))) + cache_spec = [ord(c) for c in res(test_string)] + self._store_player_data_to_cache( + 'sigfuncs', player_url, cache_spec, extra_id=extra_id) + else: + self.report_warning( + 'Failed to compute signature function {0}-{1}'.format( + player_url, extra_id)) return lambda s: ''.join(s[i] for i in cache_spec) @@ -1578,26 +1769,54 @@ class YoutubeIE(YoutubeBaseInfoExtractor): ' return %s\n') % (signature_id_tuple, expr_code) self.to_screen('Extracted signature function:\n' + code) + def _extract_sig_fn(self, jsi, funcname): + var_ay = self._search_regex( + r'''(?x) + (?:\*/|\{|\n|^)\s*(?:'[^']+'\s*;\s*) + (var\s*[\w$]+\s*=\s*(?: + ('|")(?:\\\2|(?!\2).)+\2\s*\.\s*split\(\s*('|")\W+\3\s*\)| + \[\s*(?:('|")(?:\\\4|(?!\4).)*\4\s*(?:(?=\])|,\s*))+\] + ))(?=\s*[,;]) + ''', jsi.code, 'useful values', default='') + + sig_fn = jsi.extract_function_code(funcname) + + if var_ay: + sig_fn = (sig_fn[0], ';\n'.join((var_ay, sig_fn[1]))) + + return sig_fn + def _parse_sig_js(self, jscode): + # Examples where `sig` is funcname: + # sig=function(a){a=a.split(""); ... ;return a.join("")}; + # ;c&&(c=sig(decodeURIComponent(c)),a.set(b,encodeURIComponent(c)));return a}; + # {var l=f,m=h.sp,n=sig(decodeURIComponent(h.s));l.set(m,encodeURIComponent(n))} + # sig=function(J){J=J.split(""); ... ;return J.join("")}; + # ;N&&(N=sig(decodeURIComponent(N)),J.set(R,encodeURIComponent(N)));return J}; + # {var H=u,k=f.sp,v=sig(decodeURIComponent(f.s));H.set(k,encodeURIComponent(v))} funcname = self._search_regex( - (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)', - r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)', - r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?', - r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', + (r'\b(?P<var>[\w$]+)&&\((?P=var)=(?P<sig>[\w$]{2,})\(decodeURIComponent\((?P=var)\)\)', + r'(?P<sig>[\w$]+)\s*=\s*function\(\s*(?P<arg>[\w$]+)\s*\)\s*{\s*(?P=arg)\s*=\s*(?P=arg)\.split\(\s*""\s*\)\s*;\s*[^}]+;\s*return\s+(?P=arg)\.join\(\s*""\s*\)', + r'(?:\b|[^\w$])(?P<sig>[\w$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[\w$]{2}\.[\w$]{2}\(a,\d+\))?', + # Old patterns + r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[\w$]+)\(', + r'\b[\w]+\s*&&\s*[\w]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[\w$]+)\(', + r'\bm=(?P<sig>[\w$]{2,})\(decodeURIComponent\(h\.s\)\)', # Obsolete patterns - r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', - r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', - r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('), + r'("|\')signature\1\s*,\s*(?P<sig>[\w$]+)\(', + r'\.sig\|\|(?P<sig>[\w$]+)\(', + r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[\w$]+)\(', + r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[\w$]+)\(', + r'\bc\s*&&\s*[\w]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[\w$]+)\('), jscode, 'Initial JS player signature function name', group='sig') jsi = JSInterpreter(jscode) - initial_function = jsi.extract_function(funcname) - return lambda s: initial_function([s]) + + initial_function = self._extract_sig_fn(jsi, funcname) + + func = jsi.extract_function_from_code(*initial_function) + + return lambda s: func([s]) def _cached(self, func, *cache_id): def inner(*args, **kwargs): @@ -1636,7 +1855,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): try: jsi, player_id, func_code = self._extract_n_function_code(video_id, player_url) except ExtractorError as e: - raise ExtractorError('Unable to extract nsig jsi, player_id, func_codefunction code', cause=e) + raise ExtractorError('Unable to extract nsig function code', cause=e) if self.get_param('youtube_print_sig_code'): self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format( player_id, func_code[1])) @@ -1647,7 +1866,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): except JSInterpreter.Exception as e: self.report_warning( '%s (%s %s)' % ( - 'Unable to decode n-parameter: download likely to be throttled', + 'Unable to decode n-parameter: expect download to be blocked or throttled', error_to_compat_str(e), traceback.format_exc()), video_id=video_id) @@ -1657,41 +1876,103 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return ret def _extract_n_function_name(self, jscode): + func_name, idx = None, None + + def generic_n_function_search(func_name=None): + return self._search_regex( + r'''(?xs) + (?:(?<=[^\w$])|^) # instead of \b, which ignores $ + (?P<name>%s)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\) + \s*\{(?:(?!};).)+?(?: + ["']enhanced_except_ | + return\s*(?P<q>"|')[a-zA-Z\d-]+_w8_(?P=q)\s*\+\s*[\w$]+ + ) + ''' % (func_name or r'(?!\d)[a-zA-Z\d_$]+',), jscode, + 'Initial JS player n function name', group='name', + default=None if func_name else NO_DEFAULT) + + # these special cases are redundant and probably obsolete (2025-04): + # they make the tests run ~10% faster without fallback warnings + r""" func_name, idx = self._search_regex( - r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?\([\w$]+\)', - jscode, 'Initial JS player n function name', group=('nfunc', 'idx')) + # (y=NuD(),Mw(k),q=k.Z[y]||null)&&(q=narray[idx](q),k.set(y,q),k.V||NuD(''))}}; + # (R="nn"[+J.Z],mW(J),N=J.K[R]||null)&&(N=narray[idx](N),J.set(R,N))}}; + # or: (b=String.fromCharCode(110),c=a.get(b))&&c=narray[idx](c) + # or: (b="nn"[+a.D],c=a.get(b))&&(c=narray[idx](c) + # or: (PL(a),b=a.j.n||null)&&(b=narray[idx](b) + # or: (b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("") + # old: (b=a.get("n"))&&(b=narray[idx](b)(?P<c>[a-z])\s*=\s*[a-z]\s* + # older: (b=a.get("n"))&&(b=nfunc(b) + r'''(?x) + # (expr, ..., + \((?:(?:\s*[\w$]+\s*=)?(?:[\w$"+\.\s(\[]+(?:[)\]]\s*)?),)* + # b=... + (?P<b>[\w$]+)\s*=\s*(?!(?P=b)[^\w$])[\w$]+\s*(?:(?: + \.\s*[\w$]+ | + \[\s*[\w$]+\s*\] | + \.\s*get\s*\(\s*[\w$"]+\s*\) + )\s*){,2}(?:\s*\|\|\s*null(?=\s*\)))?\s* + \)\s*&&\s*\( # ...)&&( + # b = nfunc, b = narray[idx] + (?P=b)\s*=\s*(?P<nfunc>[\w$]+)\s* + (?:\[\s*(?P<idx>[\w$]+)\s*\]\s*)? + # (...) + \(\s*[\w$]+\s*\) + ''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'), + default=(None, None)) + """ + + if not func_name: + # nfunc=function(x){...}|function nfunc(x); ... + # ... var y=[nfunc]|y[idx]=nfunc); + # obvious REs hang, so use a two-stage tactic + for m in re.finditer(r'''(?x) + [\n;]var\s(?:(?:(?!,).)+,|\s)*?(?!\d)[\w$]+(?:\[(?P<idx>\d+)\])?\s*=\s* + (?(idx)|\[\s*)(?P<nfunc>(?!\d)[\w$]+)(?(idx)|\s*\]) + \s*?[;\n] + ''', jscode): + fn = self._search_regex( + r'[;,]\s*(function\s+)?({0})(?(1)|\s*=\s*function)\s*\((?!\d)[\w$]+\)\s*\{1}(?!\s*return\s)'.format( + re.escape(m.group('nfunc')), '{'), + jscode, 'Initial JS player n function name (2)', group=2, default=None) + if fn: + func_name = fn + idx = m.group('idx') + if generic_n_function_search(func_name): + # don't look any further + break + + # thx bashonly: yt-dlp/yt-dlp/pull/10611 + if not func_name: + self.report_warning('Falling back to generic n function search', only_once=True) + return generic_n_function_search() + if not idx: return func_name - return self._parse_json(self._search_regex( - r'var {0}\s*=\s*(\[.+?\])\s*[,;]'.format(re.escape(func_name)), jscode, - 'Initial JS player n function list ({0}.{1})'.format(func_name, idx)), - func_name, transform_source=js_to_json)[int(idx)] + return self._search_json( + r'(?<![\w-])var\s(?:(?:(?!,).)+,|\s)*?{0}\s*='.format(re.escape(func_name)), jscode, + 'Initial JS player n function list ({0}.{1})'.format(func_name, idx), + func_name, contains_pattern=r'\[.+\]', end_pattern='[,;]', + transform_source=js_to_json)[int(idx)] def _extract_n_function_code(self, video_id, player_url): player_id = self._extract_player_info(player_url) - func_code = self.cache.load('youtube-nsig', player_id) + func_code = self._load_player_data_from_cache('nsig', player_url) jscode = func_code or self._load_player(video_id, player_url) jsi = JSInterpreter(jscode) if func_code: return jsi, player_id, func_code - func_name = self._extract_n_function_name(jscode) + return self._extract_n_function_code_jsi(video_id, jsi, player_id, player_url) - # For redundancy - func_code = self._search_regex( - r'''(?xs)%s\s*=\s*function\s*\((?P<var>[\w$]+)\)\s* - # NB: The end of the regex is intentionally kept strict - {(?P<code>.+?}\s*return\ [\w$]+.join\(""\))};''' % func_name, - jscode, 'nsig function', group=('var', 'code'), default=None) - if func_code: - func_code = ([func_code[0]], func_code[1]) - else: - self.write_debug('Extracting nsig function with jsinterp') - func_code = jsi.extract_function_code(func_name) + def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None, player_url=None): + func_name = self._extract_n_function_name(jsi.code) - self.cache.store('youtube-nsig', player_id, func_code) + func_code = self._extract_sig_fn(jsi, func_name) + if player_url: + self._store_player_data_to_cache('nsig', player_url, func_code) return jsi, player_id, func_code def _extract_n_function_from_code(self, jsi, func_code): @@ -1699,13 +1980,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): def extract_nsig(s): try: - ret = func([s]) + ret = func([s], kwargs={'_ytdl_do_not_return': s}) except JSInterpreter.Exception: raise except Exception as e: raise JSInterpreter.Exception(traceback.format_exc(), cause=e) - if ret.startswith('enhanced_except_'): + if ret.startswith('enhanced_except_') or ret.endswith(s): raise JSInterpreter.Exception('Signature function returned an exception') return ret @@ -1724,7 +2005,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): n_param = n_param[-1] n_response = decrypt_nsig(n_param)(n_param, video_id, player_url) if n_response is None: - # give up if descrambling failed + # give up and forget cached data if descrambling failed + self._remove_player_data_from_cache('nsig', player_url) break fmt['url'] = update_url_query(fmt['url'], {'n': n_response}) @@ -1735,18 +2017,28 @@ class YoutubeIE(YoutubeBaseInfoExtractor): Required to tell API what sig/player version is in use. """ sts = traverse_obj(ytcfg, 'STS', expected_type=int) - if not sts: - # Attempt to extract from player - if player_url is None: - error_msg = 'Cannot extract signature timestamp without player_url.' - if fatal: - raise ExtractorError(error_msg) - self.report_warning(error_msg) - return - code = self._load_player(video_id, player_url, fatal=fatal) - sts = int_or_none(self._search_regex( - r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '', - 'JS player signature timestamp', group='sts', fatal=fatal)) + if sts: + return sts + + if not player_url: + error_msg = 'Cannot extract signature timestamp without player url' + if fatal: + raise ExtractorError(error_msg) + self.report_warning(error_msg) + return None + + sts = self._load_player_data_from_cache('sts', player_url) + if sts: + return sts + + # Attempt to extract from player + code = self._load_player(video_id, player_url, fatal=fatal) + sts = int_or_none(self._search_regex( + r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '', + 'JS player signature timestamp', group='sts', fatal=fatal)) + if sts: + self._store_player_data_to_cache('sts', player_url, sts) + return sts def _mark_watched(self, video_id, player_response): @@ -1758,8 +2050,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # cpn generation algorithm is reverse engineered from base.js. # In fact it works even with dummy cpn. - CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' - cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)) + CPN_ALPHABET = string.ascii_letters + string.digits + '-_' + cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16)) # more consistent results setting it to right before the end qs = parse_qs(playback_url) @@ -1819,8 +2111,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): mobj = re.match(cls._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) - video_id = mobj.group(2) - return video_id + return mobj.group(2) def _extract_chapters_from_json(self, data, video_id, duration): chapters_list = try_get( @@ -1881,9 +2172,89 @@ class YoutubeIE(YoutubeBaseInfoExtractor): player_response = self._extract_yt_initial_variable( webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') - if not player_response: + is_live = traverse_obj(player_response, ('videoDetails', 'isLive')) + + if False and not player_response: player_response = self._call_api( 'player', {'videoId': video_id}, video_id) + if True or not player_response: + origin = 'https://www.youtube.com' + pb_context = {'html5Preference': 'HTML5_PREF_WANTS'} + + player_url = self._extract_player_url(webpage) + ytcfg = self._extract_ytcfg(video_id, webpage or '') + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg) + if sts: + pb_context['signatureTimestamp'] = sts + + client_names = traverse_obj(self._INNERTUBE_CLIENTS, ( + T(dict.items), lambda _, k_v: not k_v[1].get('REQUIRE_PO_TOKEN'), + 0))[:1] + if 'web' not in client_names: + # webpage links won't download: ignore links and playability + player_response = filter_dict( + player_response or {}, + lambda k, _: k not in ('streamingData', 'playabilityStatus')) + + if is_live and 'ios' not in client_names: + client_names.append('ios') + + headers = { + 'Sec-Fetch-Mode': 'navigate', + 'Origin': origin, + 'X-Goog-Visitor-Id': self._extract_visitor_data(ytcfg) or '', + } + auth = self._generate_sapisidhash_header(origin) + if auth is not None: + headers['Authorization'] = auth + headers['X-Origin'] = origin + + for client in traverse_obj(self._INNERTUBE_CLIENTS, (client_names, T(dict))): + + query = { + 'playbackContext': { + 'contentPlaybackContext': pb_context, + }, + 'contentCheckOk': True, + 'racyCheckOk': True, + 'context': { + 'client': merge_dicts( + traverse_obj(client, ('INNERTUBE_CONTEXT', 'client')), { + 'hl': 'en', + 'timeZone': 'UTC', + 'utcOffsetMinutes': 0, + }), + }, + 'videoId': video_id, + } + + api_headers = merge_dicts(headers, traverse_obj(client, { + 'X-YouTube-Client-Name': 'INNERTUBE_CONTEXT_CLIENT_NAME', + 'X-YouTube-Client-Version': ( + 'INNERTUBE_CONTEXT', 'client', 'clientVersion'), + 'User-Agent': ( + 'INNERTUBE_CONTEXT', 'client', 'userAgent'), + })) + + api_player_response = self._call_api( + 'player', query, video_id, fatal=False, headers=api_headers, + note=join_nonempty( + 'Downloading', traverse_obj(query, ( + 'context', 'client', 'clientName')), + 'API JSON', delim=' ')) + + hls = traverse_obj( + (player_response, api_player_response), + (Ellipsis, 'streamingData', 'hlsManifestUrl', T(url_or_none))) + if len(hls) == 2 and not hls[0] and hls[1]: + player_response['streamingData']['hlsManifestUrl'] = hls[1] + else: + video_details = merge_dicts(*traverse_obj( + (player_response, api_player_response), + (Ellipsis, 'videoDetails', T(dict)))) + player_response.update(filter_dict( + api_player_response or {}, cndn=lambda k, _: k != 'captions')) + player_response['videoDetails'] = video_details def is_agegated(playability): if not isinstance(playability, dict): @@ -1932,7 +2303,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): headers = { 'X-YouTube-Client-Name': '85', 'X-YouTube-Client-Version': '2.0', - 'Origin': 'https://www.youtube.com' + 'Origin': 'https://www.youtube.com', } video_info = self._call_api('player', query, video_id, fatal=False, headers=headers) @@ -1961,8 +2332,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)]) search_meta = ( - lambda x: self._html_search_meta(x, webpage, default=None)) \ - if webpage else lambda x: None + (lambda x: self._html_search_meta(x, webpage, default=None)) + if webpage else lambda _: None) video_details = player_response.get('videoDetails') or {} microformat = try_get( @@ -2026,6 +2397,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): itag_qualities = {} q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres']) CHUNK_SIZE = 10 << 20 + is_live = video_details.get('isLive') streaming_data = player_response.get('streamingData') or {} streaming_formats = streaming_data.get('formats') or [] @@ -2034,7 +2406,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): def build_fragments(f): return LazyList({ 'url': update_url_query(f['url'], { - 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])) + 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])), }) } for range_start in range(0, f['filesize'], CHUNK_SIZE)) @@ -2133,7 +2505,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'protocol': 'http_dash_segments', 'fragments': build_fragments(dct), } if dct['filesize'] else { - 'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful? + 'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful? }) formats.append(dct) @@ -2170,7 +2542,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): hls_manifest_url = streaming_data.get('hlsManifestUrl') if hls_manifest_url: for f in self._extract_m3u8_formats( - hls_manifest_url, video_id, 'mp4', fatal=False): + hls_manifest_url, video_id, 'mp4', + entry_protocol='m3u8_native', live=is_live, fatal=False): if process_manifest_format( f, 'hls', None, self._search_regex( r'/itag/(\d+)', f['url'], 'itag', default=None)): @@ -2190,12 +2563,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): formats.append(f) playable_formats = [f for f in formats if not f.get('has_drm')] - if formats and not playable_formats: - # If there are no formats that definitely don't have DRM, all have DRM - self.report_drm(video_id) - formats[:] = playable_formats - - if not formats: + if formats: + if not playable_formats: + # If there are no formats that definitely don't have DRM, all have DRM + self.report_drm(video_id) + formats[:] = playable_formats + else: if streaming_data.get('licenseInfos'): raise ExtractorError( 'This video is DRM protected.', expected=True) @@ -2276,8 +2649,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # Strictly de-prioritize damaged formats f['preference'] = -10 - is_live = video_details.get('isLive') - owner_profile_url = self._yt_urljoin(self._extract_author_var( webpage, 'url', videodetails=video_details, metadata=microformat)) @@ -2311,9 +2682,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'is_live': is_live, } - pctr = try_get( - player_response, - lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict) + pctr = traverse_obj( + (player_response, api_player_response), + (Ellipsis, 'captions', 'playerCaptionsTracklistRenderer', T(dict))) if pctr: def process_language(container, base_url, lang_code, query): lang_subs = [] @@ -2327,31 +2698,35 @@ class YoutubeIE(YoutubeBaseInfoExtractor): }) container[lang_code] = lang_subs - subtitles = {} - for caption_track in (pctr.get('captionTracks') or []): - base_url = caption_track.get('baseUrl') - if not base_url: - continue - if caption_track.get('kind') != 'asr': - lang_code = caption_track.get('languageCode') - if not lang_code: + def process_subtitles(): + subtitles = {} + for caption_track in traverse_obj(pctr, ( + Ellipsis, 'captionTracks', lambda _, v: ( + v.get('baseUrl') and v.get('languageCode')))): + base_url = self._yt_urljoin(caption_track['baseUrl']) + if not base_url: continue - process_language( - subtitles, base_url, lang_code, {}) - continue - automatic_captions = {} - for translation_language in (pctr.get('translationLanguages') or []): - translation_language_code = translation_language.get('languageCode') - if not translation_language_code: + lang_code = caption_track['languageCode'] + if caption_track.get('kind') != 'asr': + process_language( + subtitles, base_url, lang_code, {}) continue + automatic_captions = {} process_language( - automatic_captions, base_url, translation_language_code, - {'tlang': translation_language_code}) - info['automatic_captions'] = automatic_captions - info['subtitles'] = subtitles + automatic_captions, base_url, lang_code, {}) + for translation_language in traverse_obj(pctr, ( + Ellipsis, 'translationLanguages', lambda _, v: v.get('languageCode'))): + translation_language_code = translation_language['languageCode'] + process_language( + automatic_captions, base_url, translation_language_code, + {'tlang': translation_language_code}) + info['automatic_captions'] = automatic_captions + info['subtitles'] = subtitles + + process_subtitles() parsed_url = compat_urllib_parse_urlparse(url) - for component in [parsed_url.fragment, parsed_url.query]: + for component in (parsed_url.fragment, parsed_url.query): query = compat_parse_qs(component) for k, v in query.items(): for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]: @@ -2581,7 +2956,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'title': 'Super Cooper Shorts - Shorts', 'uploader': 'Super Cooper Shorts', 'uploader_id': '@SuperCooperShorts', - } + }, }, { # Channel that does not have a Shorts tab. Test should just download videos on Home tab instead 'url': 'https://www.youtube.com/@emergencyawesome/shorts', @@ -2635,7 +3010,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'description': 'md5:609399d937ea957b0f53cbffb747a14c', 'uploader': 'ThirstForScience', 'uploader_id': '@ThirstForScience', - } + }, }, { 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists', 'only_matching': True, @@ -2934,7 +3309,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader': '3Blue1Brown', 'uploader_id': '@3blue1brown', 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', - } + }, }] @classmethod @@ -2959,8 +3334,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): expected_type=txt_or_none) def _grid_entries(self, grid_renderer): - for item in grid_renderer['items']: - if not isinstance(item, dict): + for item in traverse_obj(grid_renderer, ('items', Ellipsis, T(dict))): + lockup_view_model = traverse_obj(item, ('lockupViewModel', T(dict))) + if lockup_view_model: + entry = self._extract_lockup_view_model(lockup_view_model) + if entry: + yield entry continue renderer = self._extract_grid_item_renderer(item) if not isinstance(renderer, dict): @@ -3044,6 +3423,39 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): continue yield self._extract_video(renderer) + def _extract_lockup_view_model(self, view_model): + content_id = view_model.get('contentId') + if not content_id: + return + content_type = view_model.get('contentType') + if content_type not in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'): + self.report_warning( + 'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()), only_once=True) + return + return merge_dicts(self.url_result( + update_url_query('https://www.youtube.com/playlist', {'list': content_id}), + ie=YoutubeTabIE.ie_key(), video_id=content_id), { + 'title': traverse_obj(view_model, ( + 'metadata', 'lockupMetadataViewModel', 'title', 'content', T(compat_str))), + 'thumbnails': self._extract_thumbnails(view_model, ( + 'contentImage', 'collectionThumbnailViewModel', 'primaryThumbnail', + 'thumbnailViewModel', 'image'), final_key='sources'), + }) + + def _extract_shorts_lockup_view_model(self, view_model): + content_id = traverse_obj(view_model, ( + 'onTap', 'innertubeCommand', 'reelWatchEndpoint', 'videoId', + T(lambda v: v if YoutubeIE.suitable(v) else None))) + if not content_id: + return + return merge_dicts(self.url_result( + content_id, ie=YoutubeIE.ie_key(), video_id=content_id), { + 'title': traverse_obj(view_model, ( + 'overlayMetadata', 'primaryText', 'content', T(compat_str))), + 'thumbnails': self._extract_thumbnails( + view_model, 'thumbnail', final_key='sources'), + }) + def _video_entry(self, video_renderer): video_id = video_renderer.get('videoId') if video_id: @@ -3090,10 +3502,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): yield entry def _rich_grid_entries(self, contents): - for content in contents: - content = traverse_obj( - content, ('richItemRenderer', 'content'), - expected_type=dict) or {} + for content in traverse_obj( + contents, (Ellipsis, 'richItemRenderer', 'content'), + expected_type=dict): video_renderer = traverse_obj( content, 'videoRenderer', 'reelItemRenderer', expected_type=dict) @@ -3101,6 +3512,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): entry = self._video_entry(video_renderer) if entry: yield entry + # shorts item + shorts_lockup_view_model = content.get('shortsLockupViewModel') + if shorts_lockup_view_model: + entry = self._extract_shorts_lockup_view_model(shorts_lockup_view_model) + if entry: + yield entry # playlist renderer = traverse_obj( content, 'playlistRenderer', expected_type=dict) or {} @@ -3139,23 +3556,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): next_continuation = cls._extract_next_continuation_data(renderer) if next_continuation: return next_continuation - contents = [] - for key in ('contents', 'items'): - contents.extend(try_get(renderer, lambda x: x[key], list) or []) - for content in contents: - if not isinstance(content, dict): - continue - continuation_ep = try_get( - content, lambda x: x['continuationItemRenderer']['continuationEndpoint'], - dict) - if not continuation_ep: - continue - continuation = try_get( - continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) + for command in traverse_obj(renderer, ( + ('contents', 'items', 'rows'), Ellipsis, 'continuationItemRenderer', + ('continuationEndpoint', ('button', 'buttonRenderer', 'command')), + (('commandExecutorCommand', 'commands', Ellipsis), None), T(dict))): + continuation = traverse_obj(command, ('continuationCommand', 'token', T(compat_str))) if not continuation: continue - ctp = continuation_ep.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) + ctp = command.get('clickTrackingParams') + return cls._build_continuation_query(continuation, ctp) def _entries(self, tab, item_id, webpage): tab_content = try_get(tab, lambda x: x['content'], dict) @@ -3204,6 +3613,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): entry = self._video_entry(renderer) if entry: yield entry + renderer = isr_content.get('richGridRenderer') + if renderer: + for from_ in self._rich_grid_entries( + traverse_obj(renderer, ('contents', Ellipsis, T(dict)))): + yield from_ + continuation = self._extract_continuation(renderer) + continue if not continuation: continuation = self._extract_continuation(is_renderer) @@ -3213,8 +3629,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): rich_grid_renderer = tab_content.get('richGridRenderer') if not rich_grid_renderer: return - for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []): - yield entry + for from_ in self._rich_grid_entries( + traverse_obj(rich_grid_renderer, ('contents', Ellipsis, T(dict)))): + yield from_ continuation = self._extract_continuation(rich_grid_renderer) @@ -3232,7 +3649,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'client': { 'clientName': 'WEB', 'clientVersion': client_version, - } + }, } visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) @@ -3248,10 +3665,10 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): if not continuation: break if visitor_data: - headers['x-goog-visitor-id'] = visitor_data + headers['X-Goog-Visitor-Id'] = visitor_data data['continuation'] = continuation['continuation'] data['clickTracking'] = { - 'clickTrackingParams': continuation['itct'] + 'clickTrackingParams': continuation['itct'], } count = 0 retries = 3 @@ -3260,8 +3677,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): # Downloading page may result in intermittent 5xx HTTP error # that is usually worked around with a retry response = self._download_json( - 'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'https://www.youtube.com/youtubei/v1/browse', None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''), + query={ + # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'prettyPrint': 'false', + }, headers=headers, data=json.dumps(data).encode('utf8')) break except ExtractorError as e: @@ -3430,10 +3851,23 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): def _real_extract(self, url): item_id = self._match_id(url) url = update_url(url, netloc='www.youtube.com') - # Handle both video/playlist URLs qs = parse_qs(url) - video_id = qs.get('v', [None])[0] - playlist_id = qs.get('list', [None])[0] + + def qs_get(key, default=None): + return qs.get(key, [default])[-1] + + # Go around for /feeds/videos.xml?playlist_id={pl_id} + if item_id == 'feeds' and '/feeds/videos.xml?' in url: + playlist_id = qs_get('playlist_id') + if playlist_id: + return self.url_result( + update_url_query('https://www.youtube.com/playlist', { + 'list': playlist_id, + }), ie=self.ie_key(), video_id=playlist_id) + + # Handle both video/playlist URLs + video_id = qs_get('v') + playlist_id = qs_get('list') if video_id and playlist_id: if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % video_id) @@ -3510,7 +3944,7 @@ class YoutubePlaylistIE(InfoExtractor): 'uploader': 'milan', 'uploader_id': '@milan5503', 'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw', - } + }, }, { 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'playlist_mincount': 455, @@ -3520,7 +3954,7 @@ class YoutubePlaylistIE(InfoExtractor): 'uploader': 'LBK', 'uploader_id': '@music_king', 'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA', - } + }, }, { 'url': 'TLGGrESM50VT6acwMjAyMjAxNw', 'only_matching': True, @@ -3631,7 +4065,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor): 'info_dict': { 'id': 'youtube-dl test video', 'title': 'youtube-dl test video', - } + }, }] def _get_n_results(self, query, n): @@ -3651,7 +4085,7 @@ class YoutubeSearchDateIE(YoutubeSearchIE): 'info_dict': { 'id': 'youtube-dl test video', 'title': 'youtube-dl test video', - } + }, }] @@ -3666,7 +4100,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor): 'id': 'youtube-dl test video', 'title': 'youtube-dl test video', }, - 'params': {'playlistend': 5} + 'params': {'playlistend': 5}, }, { 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB', 'only_matching': True, @@ -3682,6 +4116,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor): class YoutubeFeedsInfoExtractor(YoutubeTabIE): """ Base class for feed extractors + Subclasses must define the _FEED_NAME property. """ _LOGIN_REQUIRED = True diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py index 86d902248..7630e2099 100644 --- a/youtube_dl/jsinterp.py +++ b/youtube_dl/jsinterp.py @@ -1,25 +1,38 @@ +# coding: utf-8 from __future__ import unicode_literals +import calendar import itertools import json import operator import re +import time -from functools import update_wrapper +from functools import update_wrapper, wraps from .utils import ( error_to_compat_str, ExtractorError, + float_or_none, + int_or_none, js_to_json, remove_quotes, + str_or_none, unified_timestamp, variadic, + write_string, ) from .compat import ( compat_basestring, compat_chr, compat_collections_chain_map as ChainMap, + compat_contextlib_suppress, + compat_filter as filter, + compat_int, + compat_integer_types, compat_itertools_zip_longest as zip_longest, + compat_map as map, + compat_numeric_types, compat_str, ) @@ -53,60 +66,150 @@ def wraps_op(op): # NB In principle NaN cannot be checked by membership. # Here all NaN values are actually this one, so _NaN is _NaN, -# although _NaN != _NaN. +# although _NaN != _NaN. Ditto Infinity. _NaN = float('nan') +_Infinity = float('inf') -def _js_bit_op(op): +class JS_Undefined(object): + pass + - def zeroise(x): - return 0 if x in (None, JS_Undefined, _NaN) else x +def _js_bit_op(op, is_shift=False): + + def zeroise(x, is_shift_arg=False): + if isinstance(x, compat_integer_types): + return (x % 32) if is_shift_arg else (x & 0xffffffff) + try: + x = float(x) + if is_shift_arg: + x = int(x % 32) + elif x < 0: + x = -compat_int(-x % 0xffffffff) + else: + x = compat_int(x % 0xffffffff) + except (ValueError, TypeError): + # also here for int(NaN), including float('inf') % 32 + x = 0 + return x @wraps_op(op) def wrapped(a, b): - return op(zeroise(a), zeroise(b)) & 0xffffffff + return op(zeroise(a), zeroise(b, is_shift)) & 0xffffffff return wrapped -def _js_arith_op(op): +def _js_arith_op(op, div=False): @wraps_op(op) def wrapped(a, b): if JS_Undefined in (a, b): return _NaN - return op(a or 0, b or 0) + # null, "" --> 0 + a, b = (float_or_none( + (x.strip() if isinstance(x, compat_basestring) else x) or 0, + default=_NaN) for x in (a, b)) + if _NaN in (a, b): + return _NaN + try: + return op(a, b) + except ZeroDivisionError: + return _NaN if not (div and (a or b)) else _Infinity return wrapped -def _js_div(a, b): - if JS_Undefined in (a, b) or not (a or b): - return _NaN - return operator.truediv(a or 0, b) if b else float('inf') +_js_arith_add = _js_arith_op(operator.add) + +def _js_add(a, b): + if not (isinstance(a, compat_basestring) or isinstance(b, compat_basestring)): + return _js_arith_add(a, b) + if not isinstance(a, compat_basestring): + a = _js_toString(a) + elif not isinstance(b, compat_basestring): + b = _js_toString(b) + return operator.concat(a, b) -def _js_mod(a, b): - if JS_Undefined in (a, b) or not b: - return _NaN - return (a or 0) % b + +_js_mod = _js_arith_op(operator.mod) +__js_exp = _js_arith_op(operator.pow) def _js_exp(a, b): if not b: return 1 # even 0 ** 0 !! - elif JS_Undefined in (a, b): - return _NaN - return (a or 0) ** b - - -def _js_eq_op(op): + return __js_exp(a, b) + + +def _js_to_primitive(v): + return ( + ','.join(map(_js_toString, v)) if isinstance(v, list) + else '[object Object]' if isinstance(v, dict) + else compat_str(v) if not isinstance(v, ( + compat_numeric_types, compat_basestring)) + else v + ) + + +# more exact: yt-dlp/yt-dlp#12110 +def _js_toString(v): + return ( + 'undefined' if v is JS_Undefined + else 'Infinity' if v == _Infinity + else 'NaN' if v is _NaN + else 'null' if v is None + # bool <= int: do this first + else ('false', 'true')[v] if isinstance(v, bool) + else re.sub(r'(?<=\d)\.?0*$', '', '{0:.7f}'.format(v)) if isinstance(v, compat_numeric_types) + else _js_to_primitive(v)) + + +_nullish = frozenset((None, JS_Undefined)) + + +def _js_eq(a, b): + # NaN != any + if _NaN in (a, b): + return False + # Object is Object + if isinstance(a, type(b)) and isinstance(b, (dict, list)): + return operator.is_(a, b) + # general case + if a == b: + return True + # null == undefined + a_b = set((a, b)) + if a_b & _nullish: + return a_b <= _nullish + a, b = _js_to_primitive(a), _js_to_primitive(b) + if not isinstance(a, compat_basestring): + a, b = b, a + # Number to String: convert the string to a number + # Conversion failure results in ... false + if isinstance(a, compat_basestring): + return float_or_none(a) == b + return a == b + + +def _js_neq(a, b): + return not _js_eq(a, b) + + +def _js_id_op(op): @wraps_op(op) def wrapped(a, b): - if set((a, b)) <= set((None, JS_Undefined)): - return op(a, a) + if _NaN in (a, b): + return op(_NaN, None) + if not isinstance(a, (compat_basestring, compat_numeric_types)): + a, b = b, a + # strings are === if == + # why 'a' is not 'a': https://stackoverflow.com/a/1504848 + if isinstance(a, (compat_basestring, compat_numeric_types)): + return a == b if op(0, 0) else a != b return op(a, b) return wrapped @@ -134,31 +237,52 @@ def _js_ternary(cndn, if_true=True, if_false=False): return if_true +def _js_unary_op(op): + + @wraps_op(op) + def wrapped(a, _): + return op(a) + + return wrapped + + +# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/typeof +def _js_typeof(expr): + with compat_contextlib_suppress(TypeError, KeyError): + return { + JS_Undefined: 'undefined', + _NaN: 'number', + _Infinity: 'number', + True: 'boolean', + False: 'boolean', + None: 'object', + }[expr] + for t, n in ( + (compat_basestring, 'string'), + (compat_numeric_types, 'number'), + ): + if isinstance(expr, t): + return n + if callable(expr): + return 'function' + # TODO: Symbol, BigInt + return 'object' + + # (op, definition) in order of binding priority, tightest first # avoid dict to maintain order # definition None => Defined in JSInterpreter._operator _OPERATORS = ( - ('>>', _js_bit_op(operator.rshift)), - ('<<', _js_bit_op(operator.lshift)), - ('+', _js_arith_op(operator.add)), + ('>>', _js_bit_op(operator.rshift, True)), + ('<<', _js_bit_op(operator.lshift, True)), + ('+', _js_add), ('-', _js_arith_op(operator.sub)), ('*', _js_arith_op(operator.mul)), ('%', _js_mod), - ('/', _js_div), + ('/', _js_arith_op(operator.truediv, div=True)), ('**', _js_exp), ) -_COMP_OPERATORS = ( - ('===', operator.is_), - ('!==', operator.is_not), - ('==', _js_eq_op(operator.eq)), - ('!=', _js_eq_op(operator.ne)), - ('<=', _js_comp_op(operator.le)), - ('>=', _js_comp_op(operator.ge)), - ('<', _js_comp_op(operator.lt)), - ('>', _js_comp_op(operator.gt)), -) - _LOG_OPERATORS = ( ('|', _js_bit_op(operator.or_)), ('^', _js_bit_op(operator.xor)), @@ -172,15 +296,30 @@ _SC_OPERATORS = ( ('&&', None), ) -_OPERATOR_RE = '|'.join(map(lambda x: re.escape(x[0]), _OPERATORS + _LOG_OPERATORS)) +_UNARY_OPERATORS_X = ( + ('void', _js_unary_op(lambda _: JS_Undefined)), + ('typeof', _js_unary_op(_js_typeof)), + # avoid functools.partial here since Py2 update_wrapper(partial) -> no __module__ + ('!', _js_unary_op(lambda x: _js_ternary(x, if_true=False, if_false=True))), +) + +_COMP_OPERATORS = ( + ('===', _js_id_op(operator.is_)), + ('!==', _js_id_op(operator.is_not)), + ('==', _js_eq), + ('!=', _js_neq), + ('<=', _js_comp_op(operator.le)), + ('>=', _js_comp_op(operator.ge)), + ('<', _js_comp_op(operator.lt)), + ('>', _js_comp_op(operator.gt)), +) + +_OPERATOR_RE = '|'.join(map(lambda x: re.escape(x[0]), _OPERATORS + _LOG_OPERATORS + _SC_OPERATORS)) _NAME_RE = r'[a-zA-Z_$][\w$]*' _MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]'))) _QUOTES = '\'"/' - - -class JS_Undefined(object): - pass +_NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?' class JS_Break(ExtractorError): @@ -217,7 +356,47 @@ class LocalNameSpace(ChainMap): raise NotImplementedError('Deleting is not supported') def __repr__(self): - return 'LocalNameSpace%s' % (self.maps, ) + return 'LocalNameSpace({0!r})'.format(self.maps) + + +class Debugger(object): + ENABLED = False + + @staticmethod + def write(*args, **kwargs): + level = kwargs.get('level', 100) + + def truncate_string(s, left, right=0): + if s is None or len(s) <= left + right: + return s + return '...'.join((s[:left - 3], s[-right:] if right else '')) + + write_string('[debug] JS: {0}{1}\n'.format( + ' ' * (100 - level), + ' '.join(truncate_string(compat_str(x), 50, 50) for x in args))) + + @classmethod + def wrap_interpreter(cls, f): + if not cls.ENABLED: + return f + + @wraps(f) + def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs): + if cls.ENABLED and stmt.strip(): + cls.write(stmt, level=allow_recursion) + try: + ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs) + except Exception as e: + if cls.ENABLED: + if isinstance(e, ExtractorError): + e = e.orig_msg + cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion) + raise + if cls.ENABLED and stmt.strip(): + if should_ret or repr(ret) != stmt: + cls.write(['->', '=>'][bool(should_ret)], repr(ret), '<-|', stmt, level=allow_recursion) + return ret, should_ret + return interpret_statement class JSInterpreter(object): @@ -236,14 +415,28 @@ class JSInterpreter(object): class Exception(ExtractorError): def __init__(self, msg, *args, **kwargs): expr = kwargs.pop('expr', None) + msg = str_or_none(msg, default='"None"') if expr is not None: msg = '{0} in: {1!r:.100}'.format(msg.rstrip(), expr) super(JSInterpreter.Exception, self).__init__(msg, *args, **kwargs) - class JS_RegExp(object): + class JS_Object(object): + def __getitem__(self, key): + if hasattr(self, key): + return getattr(self, key) + raise KeyError(key) + + def dump(self): + """Serialise the instance""" + raise NotImplementedError + + class JS_RegExp(JS_Object): RE_FLAGS = { # special knowledge: Python's re flags are bitmask values, current max 128 # invent new bitmask values well above that for literal parsing + # JS 'u' flag is effectively always set (surrogate pairs aren't seen), + # but \u{...} and \p{...} escapes aren't handled); no additional JS 'v' + # features are supported # TODO: execute matches with these flags (remaining: d, y) 'd': 1024, # Generate indices for substring matches 'g': 2048, # Global search @@ -251,21 +444,31 @@ class JSInterpreter(object): 'm': re.M, # Multi-line search 's': re.S, # Allows . to match newline characters 'u': re.U, # Treat a pattern as a sequence of unicode code points + 'v': re.U, # Like 'u' with extended character class and \p{} syntax 'y': 4096, # Perform a "sticky" search that matches starting at the current position in the target string } def __init__(self, pattern_txt, flags=0): if isinstance(flags, compat_str): flags, _ = self.regex_flags(flags) - # First, avoid https://github.com/python/cpython/issues/74534 self.__self = None - self.__pattern_txt = pattern_txt.replace('[[', r'[\[') + pattern_txt = str_or_none(pattern_txt) or '(?:)' + # escape unintended embedded flags + pattern_txt = re.sub( + r'(\(\?)([aiLmsux]*)(-[imsx]+:|(?<!\?)\))', + lambda m: ''.join( + (re.escape(m.group(1)), m.group(2), re.escape(m.group(3))) + if m.group(3) == ')' + else ('(?:', m.group(2), m.group(3))), + pattern_txt) + # Avoid https://github.com/python/cpython/issues/74534 + self.source = pattern_txt.replace('[[', r'[\[') self.__flags = flags def __instantiate(self): if self.__self: return - self.__self = re.compile(self.__pattern_txt, self.__flags) + self.__self = re.compile(self.source, self.__flags) # Thx: https://stackoverflow.com/questions/44773522/setattr-on-python2-sre-sre-pattern for name in dir(self.__self): # Only these? Obviously __class__, __init__. @@ -273,16 +476,15 @@ class JSInterpreter(object): # that can't be setattr'd but also can't need to be copied. if name in ('__class__', '__init__', '__weakref__'): continue - setattr(self, name, getattr(self.__self, name)) + if name == 'flags': + setattr(self, name, getattr(self.__self, name, self.__flags)) + else: + setattr(self, name, getattr(self.__self, name)) def __getattr__(self, name): self.__instantiate() - # make Py 2.6 conform to its lying documentation - if name == 'flags': - self.flags = self.__flags - return self.flags - elif name == 'pattern': - self.pattern = self.__pattern_txt + if name == 'pattern': + self.pattern = self.source return self.pattern elif hasattr(self.__self, name): v = getattr(self.__self, name) @@ -290,6 +492,26 @@ class JSInterpreter(object): return v elif name in ('groupindex', 'groups'): return 0 if name == 'groupindex' else {} + else: + flag_attrs = ( # order by 2nd elt + ('hasIndices', 'd'), + ('global', 'g'), + ('ignoreCase', 'i'), + ('multiline', 'm'), + ('dotAll', 's'), + ('unicode', 'u'), + ('unicodeSets', 'v'), + ('sticky', 'y'), + ) + for k, c in flag_attrs: + if name == k: + return bool(self.RE_FLAGS[c] & self.__flags) + else: + if name == 'flags': + return ''.join( + (c if self.RE_FLAGS[c] & self.__flags else '') + for _, c in flag_attrs) + raise AttributeError('{0} has no attribute named {1}'.format(self, name)) @classmethod @@ -303,12 +525,92 @@ class JSInterpreter(object): flags |= cls.RE_FLAGS[ch] return flags, expr[idx + 1:] + def dump(self): + return '(/{0}/{1})'.format( + re.sub(r'(?<!\\)/', r'\/', self.source), + self.flags) + + @staticmethod + def escape(string_): + return re.escape(string_) + + class JS_Date(JS_Object): + _t = None + + @staticmethod + def __ymd_etc(*args, **kw_is_utc): + # args: year, monthIndex, day, hours, minutes, seconds, milliseconds + is_utc = kw_is_utc.get('is_utc', False) + + args = list(args[:7]) + args += [0] * (9 - len(args)) + args[1] += 1 # month 0..11 -> 1..12 + ms = args[6] + for i in range(6, 9): + args[i] = -1 # don't know + if is_utc: + args[-1] = 1 + # TODO: [MDN] When a segment overflows or underflows its expected + # range, it usually "carries over to" or "borrows from" the higher segment. + try: + mktime = calendar.timegm if is_utc else time.mktime + return mktime(time.struct_time(args)) * 1000 + ms + except (OverflowError, ValueError): + return None + + @classmethod + def UTC(cls, *args): + t = cls.__ymd_etc(*args, is_utc=True) + return _NaN if t is None else t + + @staticmethod + def parse(date_str, **kw_is_raw): + is_raw = kw_is_raw.get('is_raw', False) + + t = unified_timestamp(str_or_none(date_str), False) + return int(t * 1000) if t is not None else t if is_raw else _NaN + + @staticmethod + def now(**kw_is_raw): + is_raw = kw_is_raw.get('is_raw', False) + + t = time.time() + return int(t * 1000) if t is not None else t if is_raw else _NaN + + def __init__(self, *args): + if not args: + args = [self.now(is_raw=True)] + if len(args) == 1: + if isinstance(args[0], JSInterpreter.JS_Date): + self._t = int_or_none(args[0].valueOf(), default=None) + else: + arg_type = _js_typeof(args[0]) + if arg_type == 'string': + self._t = self.parse(args[0], is_raw=True) + elif arg_type == 'number': + self._t = int(args[0]) + else: + self._t = self.__ymd_etc(*args) + + def toString(self): + try: + return time.strftime('%a %b %0d %Y %H:%M:%S %Z%z', self._t).rstrip() + except TypeError: + return "Invalid Date" + + def valueOf(self): + return _NaN if self._t is None else self._t + + def dump(self): + return '(new Date({0}))'.format(self.toString()) + @classmethod def __op_chars(cls): op_chars = set(';,[') for op in cls._all_operators(): - for c in op[0]: - op_chars.add(c) + if op[0].isalpha(): + continue + op_chars.update(op[0]) return op_chars def _named_object(self, namespace, obj): @@ -326,12 +628,22 @@ class JSInterpreter(object): # collections.Counter() is ~10% slower in both 2.7 and 3.9 counters = dict((k, 0) for k in _MATCHING_PARENS.values()) start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1 - in_quote, escaping, skipping = None, False, 0 - after_op, in_regex_char_group = True, False - + in_quote, escaping, after_op, in_regex_char_group = None, False, True, False + skipping = 0 + if skip_delims: + skip_delims = variadic(skip_delims) + skip_txt = None for idx, char in enumerate(expr): + if skip_txt and idx <= skip_txt[1]: + continue paren_delta = 0 if not in_quote: + if char == '/' and expr[idx:idx + 2] == '/*': + # skip a comment + skip_txt = expr[idx:].find('*/', 2) + skip_txt = [idx, idx + skip_txt + 1] if skip_txt >= 2 else None + if skip_txt: + continue if char in _MATCHING_PARENS: counters[_MATCHING_PARENS[char]] += 1 paren_delta = 1 @@ -355,7 +667,7 @@ class JSInterpreter(object): continue elif pos == 0 and skip_delims: here = expr[idx:] - for s in variadic(skip_delims): + for s in skip_delims: if here.startswith(s) and s: skipping = len(s) - 1 break @@ -364,28 +676,98 @@ class JSInterpreter(object): if pos < delim_len: pos += 1 continue - yield expr[start: idx - delim_len] + if skip_txt and skip_txt[0] >= start and skip_txt[1] <= idx - delim_len: + yield expr[start:skip_txt[0]] + expr[skip_txt[1] + 1: idx - delim_len] + else: + yield expr[start: idx - delim_len] + skip_txt = None start, pos = idx + 1, 0 splits += 1 if max_split and splits >= max_split: break - yield expr[start:] + if skip_txt and skip_txt[0] >= start: + yield expr[start:skip_txt[0]] + expr[skip_txt[1] + 1:] + else: + yield expr[start:] @classmethod def _separate_at_paren(cls, expr, delim=None): if delim is None: delim = expr and _MATCHING_PARENS[expr[0]] separated = list(cls._separate(expr, delim, 1)) - if len(separated) < 2: raise cls.Exception('No terminating paren {delim} in {expr!r:.5500}'.format(**locals())) return separated[0][1:].strip(), separated[1].strip() @staticmethod - def _all_operators(): - return itertools.chain( - # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence - _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS) + def _all_operators(_cached=[]): + if not _cached: + _cached.extend(itertools.chain( + # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence + _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS, _UNARY_OPERATORS_X)) + return _cached + + def _separate_at_op(self, expr, max_split=None): + + for op, _ in self._all_operators(): + # hackety: </> have higher priority than <</>>, but don't confuse them + skip_delim = (op + op) if op in '<>*?' else None + if op == '?': + skip_delim = (skip_delim, '?.') + separated = list(self._separate(expr, op, skip_delims=skip_delim)) + if len(separated) < 2: + continue + + right_expr = separated.pop() + # handle operators that are both unary and binary, minimal BODMAS + if op in ('+', '-'): + # simplify/adjust consecutive instances of these operators + undone = 0 + separated = [s.strip() for s in separated] + while len(separated) > 1 and not separated[-1]: + undone += 1 + separated.pop() + if op == '-' and undone % 2 != 0: + right_expr = op + right_expr + elif op == '+': + while len(separated) > 1 and set(separated[-1]) <= self.OP_CHARS: + right_expr = separated.pop() + right_expr + if separated[-1][-1:] in self.OP_CHARS: + right_expr = separated.pop() + right_expr + # hanging op at end of left => unary + (strip) or - (push right) + separated.append(right_expr) + dm_ops = ('*', '%', '/', '**') + dm_chars = set(''.join(dm_ops)) + + def yield_terms(s): + skip = False + for i, term in enumerate(s[:-1]): + if skip: + skip = False + continue + if not (dm_chars & set(term)): + yield term + continue + for dm_op in dm_ops: + bodmas = list(self._separate(term, dm_op, skip_delims=skip_delim)) + if len(bodmas) > 1 and not bodmas[-1].strip(): + bodmas[-1] = (op if op == '-' else '') + s[i + 1] + yield dm_op.join(bodmas) + skip = True + break + else: + if term: + yield term + + if not skip and s[-1]: + yield s[-1] + + separated = list(yield_terms(separated)) + right_expr = separated.pop() if len(separated) > 1 else None + expr = op.join(separated) + if right_expr is None: + continue + return op, separated, right_expr def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion): if op in ('||', '&&'): @@ -397,7 +779,7 @@ class JSInterpreter(object): elif op == '?': right_expr = _js_ternary(left_val, *self._separate(right_expr, ':', 1)) - right_val = self.interpret_expression(right_expr, local_vars, allow_recursion) + right_val = self.interpret_expression(right_expr, local_vars, allow_recursion) if right_expr else left_val opfunc = op and next((v for k, v in self._all_operators() if k == op), None) if not opfunc: return right_val @@ -408,17 +790,21 @@ class JSInterpreter(object): except Exception as e: raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e) - def _index(self, obj, idx, allow_undefined=False): - if idx == 'length': + def _index(self, obj, idx, allow_undefined=None): + if idx == 'length' and isinstance(obj, list): return len(obj) try: - return obj[int(idx)] if isinstance(obj, list) else obj[idx] - except Exception as e: - if allow_undefined: + return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)] + except (TypeError, KeyError, IndexError, ValueError) as e: + # allow_undefined is None gives correct behaviour + if allow_undefined or ( + allow_undefined is None and not isinstance(e, TypeError)): return JS_Undefined - raise self.Exception('Cannot get index {idx:.100}'.format(**locals()), expr=repr(obj), cause=e) + raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e) def _dump(self, obj, namespace): + if obj is JS_Undefined: + return 'undefined' try: return json.dumps(obj) except TypeError: @@ -426,7 +812,7 @@ class JSInterpreter(object): # used below _VAR_RET_THROW_RE = re.compile(r'''(?x) - (?P<var>(?:var|const|let)\s)|return(?:\s+|(?=["'])|$)|(?P<throw>throw\s+) + (?:(?P<var>var|const|let)\s+|(?P<ret>return)(?:\s+|(?=["'])|$)|(?P<throw>throw)\s+) ''') _COMPOUND_RE = re.compile(r'''(?x) (?P<try>try)\s*\{| @@ -438,6 +824,11 @@ class JSInterpreter(object): _FINALLY_RE = re.compile(r'finally\s*\{') _SWITCH_RE = re.compile(r'switch\s*\(') + def _eval_operator(self, op, left_expr, right_expr, expr, local_vars, allow_recursion): + left_val = self.interpret_expression(left_expr, local_vars, allow_recursion) + return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion) + + @Debugger.wrap_interpreter def interpret_statement(self, stmt, local_vars, allow_recursion=100): if allow_recursion < 0: raise self.Exception('Recursion limit reached') @@ -448,6 +839,7 @@ class JSInterpreter(object): # fails on (eg) if (...) stmt1; else stmt2; sub_statements = list(self._separate(stmt, ';')) or [''] expr = stmt = sub_statements.pop().strip() + for sub_stmt in sub_statements: ret, should_return = self.interpret_statement(sub_stmt, local_vars, allow_recursion) if should_return: @@ -458,7 +850,7 @@ class JSInterpreter(object): expr = stmt[len(m.group(0)):].strip() if m.group('throw'): raise JS_Throw(self.interpret_expression(expr, local_vars, allow_recursion)) - should_return = not m.group('var') + should_return = 'return' if m.group('ret') else False if not expr: return None, should_return @@ -475,7 +867,7 @@ class JSInterpreter(object): new_kw, _, obj = expr.partition('new ') if not new_kw: - for klass, konstr in (('Date', lambda x: int(unified_timestamp(x, False) * 1000)), + for klass, konstr in (('Date', lambda *x: self.JS_Date(*x).valueOf()), ('RegExp', self.JS_RegExp), ('Error', self.Exception)): if not obj.startswith(klass + '('): @@ -490,9 +882,19 @@ class JSInterpreter(object): else: raise self.Exception('Unsupported object {obj:.100}'.format(**locals()), expr=expr) - if expr.startswith('void '): - left = self.interpret_expression(expr[5:], local_vars, allow_recursion) - return None, should_return + # apply unary operators (see new above) + for op, _ in _UNARY_OPERATORS_X: + if not expr.startswith(op): + continue + operand = expr[len(op):] + if not operand or (op.isalpha() and operand[0] != ' '): + continue + separated = self._separate_at_op(operand, max_split=1) + if separated: + next_op, separated, right_expr = separated + separated.append(right_expr) + operand = next_op.join(separated) + return self._eval_operator(op, operand, '', expr, local_vars, allow_recursion), should_return if expr.startswith('{'): inner, outer = self._separate_at_paren(expr) @@ -511,7 +913,6 @@ class JSInterpreter(object): expr = self._dump(inner, local_vars) + outer if expr.startswith('('): - m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr) if m: # short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig` @@ -540,7 +941,7 @@ class JSInterpreter(object): if_expr, expr = self._separate_at_paren(expr) else: # may lose ... else ... because of ll.368-374 - if_expr, expr = self._separate_at_paren(expr, delim=';') + if_expr, expr = self._separate_at_paren(' %s;' % (expr,), delim=';') else_expr = None m = re.match(r'else\s*(?P<block>\{)?', expr) if m: @@ -588,8 +989,7 @@ class JSInterpreter(object): if m.group('err'): catch_vars[m.group('err')] = err.error if isinstance(err, JS_Throw) else err catch_vars = local_vars.new_child(m=catch_vars) - err = None - pending = self.interpret_statement(sub_expr, catch_vars, allow_recursion) + err, pending = None, self.interpret_statement(sub_expr, catch_vars, allow_recursion) m = self._FINALLY_RE.match(expr) if m: @@ -679,7 +1079,7 @@ class JSInterpreter(object): start, end = m.span() sign = m.group('pre_sign') or m.group('post_sign') ret = local_vars[var] - local_vars[var] += 1 if sign[0] == '+' else -1 + local_vars[var] = _js_add(ret, 1 if sign[0] == '+' else -1) if m.group('pre_sign'): ret = local_vars[var] expr = expr[:start] + self._dump(ret, local_vars) + expr[end:] @@ -689,15 +1089,18 @@ class JSInterpreter(object): m = re.match(r'''(?x) (?P<assign> - (?P<out>{_NAME_RE})(?:\[(?P<index>[^\]]+?)\])?\s* + (?P<out>{_NAME_RE})(?P<out_idx>(?:\[{_NESTED_BRACKETS}\])+)?\s* (?P<op>{_OPERATOR_RE})? =(?!=)(?P<expr>.*)$ )|(?P<return> - (?!if|return|true|false|null|undefined)(?P<name>{_NAME_RE})$ - )|(?P<indexing> - (?P<in>{_NAME_RE})\[(?P<idx>.+)\]$ + (?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$ )|(?P<attribute> - (?P<var>{_NAME_RE})(?:(?P<nullish>\?)?\.(?P<member>[^(]+)|\[(?P<member2>[^\]]+)\])\s* + (?P<var>{_NAME_RE})(?: + (?P<nullish>\?)?\.(?P<member>[^(]+)| + \[(?P<member2>{_NESTED_BRACKETS})\] + )\s* + )|(?P<indexing> + (?P<in>{_NAME_RE})(?P<in_idx>\[.+\])$ )|(?P<function> (?P<fname>{_NAME_RE})\((?P<args>.*)\)$ )'''.format(**globals()), expr) @@ -705,19 +1108,28 @@ class JSInterpreter(object): if md.get('assign'): left_val = local_vars.get(m.group('out')) - if not m.group('index'): + if not m.group('out_idx'): local_vars[m.group('out')] = self._operator( m.group('op'), left_val, m.group('expr'), expr, local_vars, allow_recursion) return local_vars[m.group('out')], should_return elif left_val in (None, JS_Undefined): raise self.Exception('Cannot index undefined variable ' + m.group('out'), expr=expr) - idx = self.interpret_expression(m.group('index'), local_vars, allow_recursion) - if not isinstance(idx, (int, float)): - raise self.Exception('List index %s must be integer' % (idx, ), expr=expr) - idx = int(idx) + indexes = md['out_idx'] + while indexes: + idx, indexes = self._separate_at_paren(indexes) + idx = self.interpret_expression(idx, local_vars, allow_recursion) + if indexes: + left_val = self._index(left_val, idx) + if isinstance(idx, float): + idx = int(idx) + if isinstance(left_val, list) and len(left_val) <= int_or_none(idx, default=-1): + # JS Array is a sparsely assignable list + # TODO: handle extreme sparsity without memory bloat, eg using auxiliary dict + left_val.extend((idx - len(left_val) + 1) * [JS_Undefined]) left_val[idx] = self._operator( - m.group('op'), self._index(left_val, idx), m.group('expr'), expr, local_vars, allow_recursion) + m.group('op'), self._index(left_val, idx) if m.group('op') else None, + m.group('expr'), expr, local_vars, allow_recursion) return left_val[idx], should_return elif expr.isdigit(): @@ -727,57 +1139,42 @@ class JSInterpreter(object): raise JS_Break() elif expr == 'continue': raise JS_Continue() - elif expr == 'undefined': return JS_Undefined, should_return elif expr == 'NaN': return _NaN, should_return + elif expr == 'Infinity': + return _Infinity, should_return elif md.get('return'): - return local_vars[m.group('name')], should_return + ret = local_vars[m.group('name')] + # challenge may try to force returning the original value + # use an optional internal var to block this + if should_return == 'return': + if '_ytdl_do_not_return' not in local_vars: + return ret, True + return (ret, True) if ret != local_vars['_ytdl_do_not_return'] else (ret, False) + else: + return ret, should_return - try: + with compat_contextlib_suppress(ValueError): ret = json.loads(js_to_json(expr)) # strict=True) if not md.get('attribute'): return ret, should_return - except ValueError: - pass if md.get('indexing'): val = local_vars[m.group('in')] - idx = self.interpret_expression(m.group('idx'), local_vars, allow_recursion) - return self._index(val, idx), should_return - - for op, _ in self._all_operators(): - # hackety: </> have higher priority than <</>>, but don't confuse them - skip_delim = (op + op) if op in '<>*?' else None - if op == '?': - skip_delim = (skip_delim, '?.') - separated = list(self._separate(expr, op, skip_delims=skip_delim)) - if len(separated) < 2: - continue - - right_expr = separated.pop() - # handle operators that are both unary and binary, minimal BODMAS - if op in ('+', '-'): - undone = 0 - while len(separated) > 1 and not separated[-1].strip(): - undone += 1 - separated.pop() - if op == '-' and undone % 2 != 0: - right_expr = op + right_expr - left_val = separated[-1] - for dm_op in ('*', '%', '/', '**'): - bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim)) - if len(bodmas) > 1 and not bodmas[-1].strip(): - expr = op.join(separated) + op + right_expr - right_expr = None - break - if right_expr is None: - continue - - left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion) - return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), should_return + indexes = m.group('in_idx') + while indexes: + idx, indexes = self._separate_at_paren(indexes) + idx = self.interpret_expression(idx, local_vars, allow_recursion) + val = self._index(val, idx) + return val, should_return + + separated = self._separate_at_op(expr) + if separated: + op, separated, right_expr = separated + return self._eval_operator(op, op.join(separated), right_expr, expr, local_vars, allow_recursion), should_return if md.get('attribute'): variable, member, nullish = m.group('var', 'member', 'nullish') @@ -795,12 +1192,18 @@ class JSInterpreter(object): memb = member raise self.Exception('{memb} {msg}'.format(**locals()), expr=expr) - def eval_method(): + def eval_method(variable, member): if (variable, member) == ('console', 'debug'): + if Debugger.ENABLED: + Debugger.write(self.interpret_expression('[{0}]'.format(arg_str), local_vars, allow_recursion)) return types = { 'String': compat_str, 'Math': float, + 'Array': list, + 'Date': self.JS_Date, + 'RegExp': self.JS_RegExp, + # 'Error': self.Exception, # has no std static methods } obj = local_vars.get(variable) if obj in (JS_Undefined, None): @@ -808,7 +1211,7 @@ class JSInterpreter(object): if obj is JS_Undefined: try: if variable not in self._objects: - self._objects[variable] = self.extract_object(variable) + self._objects[variable] = self.extract_object(variable, local_vars) obj = self._objects[variable] except self.Exception: if not nullish: @@ -826,70 +1229,125 @@ class JSInterpreter(object): self.interpret_expression(v, local_vars, allow_recursion) for v in self._separate(arg_str)] - if obj == compat_str: + # Fixup prototype call + if isinstance(obj, type): + new_member, rest = member.partition('.')[0::2] + if new_member == 'prototype': + new_member, func_prototype = rest.partition('.')[0::2] + assertion(argvals, 'takes one or more arguments') + assertion(isinstance(argvals[0], obj), 'must bind to type {0}'.format(obj)) + if func_prototype == 'call': + obj = argvals.pop(0) + elif func_prototype == 'apply': + assertion(len(argvals) == 2, 'takes two arguments') + obj, argvals = argvals + assertion(isinstance(argvals, list), 'second argument must be a list') + else: + raise self.Exception('Unsupported Function method ' + func_prototype, expr) + member = new_member + + if obj is compat_str: if member == 'fromCharCode': assertion(argvals, 'takes one or more arguments') - return ''.join(map(compat_chr, argvals)) + return ''.join(compat_chr(int(n)) for n in argvals) raise self.Exception('Unsupported string method ' + member, expr=expr) - elif obj == float: + elif obj is float: if member == 'pow': assertion(len(argvals) == 2, 'takes two arguments') return argvals[0] ** argvals[1] raise self.Exception('Unsupported Math method ' + member, expr=expr) + elif obj is self.JS_Date: + return getattr(obj, member)(*argvals) if member == 'split': - assertion(argvals, 'takes one or more arguments') - assertion(len(argvals) == 1, 'with limit argument is not implemented') - return obj.split(argvals[0]) if argvals[0] else list(obj) + assertion(len(argvals) <= 2, 'takes at most two arguments') + if len(argvals) > 1: + limit = argvals[1] + assertion(isinstance(limit, int) and limit >= 0, 'integer limit >= 0') + if limit == 0: + return [] + else: + limit = 0 + if len(argvals) == 0: + argvals = [JS_Undefined] + elif isinstance(argvals[0], self.JS_RegExp): + # avoid re.split(), similar but not enough + + def where(): + for m in argvals[0].finditer(obj): + yield m.span(0) + yield (None, None) + + def splits(limit=limit): + i = 0 + for j, jj in where(): + if j == jj == 0: + continue + if j is None and i >= len(obj): + break + yield obj[i:j] + if jj is None or limit == 1: + break + limit -= 1 + i = jj + + return list(splits()) + return ( + obj.split(argvals[0], limit - 1) if argvals[0] and argvals[0] != JS_Undefined + else list(obj)[:limit or None]) elif member == 'join': assertion(isinstance(obj, list), 'must be applied on a list') - assertion(len(argvals) == 1, 'takes exactly one argument') - return argvals[0].join(obj) + assertion(len(argvals) <= 1, 'takes at most one argument') + return (',' if len(argvals) == 0 or argvals[0] in (None, JS_Undefined) + else argvals[0]).join( + ('' if x in (None, JS_Undefined) else _js_toString(x)) + for x in obj) elif member == 'reverse': assertion(not argvals, 'does not take any arguments') obj.reverse() return obj elif member == 'slice': - assertion(isinstance(obj, list), 'must be applied on a list') - assertion(len(argvals) == 1, 'takes exactly one argument') - return obj[argvals[0]:] + assertion(isinstance(obj, (list, compat_str)), 'must be applied on a list or string') + # From [1]: + # .slice() - like [:] + # .slice(n) - like [n:] (not [slice(n)] + # .slice(m, n) - like [m:n] or [slice(m, n)] + # [1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/slice + assertion(len(argvals) <= 2, 'takes between 0 and 2 arguments') + if len(argvals) < 2: + argvals += (None,) + return obj[slice(*argvals)] elif member == 'splice': assertion(isinstance(obj, list), 'must be applied on a list') assertion(argvals, 'takes one or more arguments') - index, howMany = map(int, (argvals + [len(obj)])[:2]) + index, how_many = map(int, (argvals + [len(obj)])[:2]) if index < 0: index += len(obj) - add_items = argvals[2:] - res = [] - for i in range(index, min(index + howMany, len(obj))): - res.append(obj.pop(index)) - for i, item in enumerate(add_items): - obj.insert(index + i, item) + res = [obj.pop(index) + for _ in range(index, min(index + how_many, len(obj)))] + obj[index:index] = argvals[2:] return res - elif member == 'unshift': - assertion(isinstance(obj, list), 'must be applied on a list') - assertion(argvals, 'takes one or more arguments') - for item in reversed(argvals): - obj.insert(0, item) - return obj - elif member == 'pop': + elif member in ('shift', 'pop'): assertion(isinstance(obj, list), 'must be applied on a list') assertion(not argvals, 'does not take any arguments') - if not obj: - return - return obj.pop() + return obj.pop(0 if member == 'shift' else -1) if len(obj) > 0 else JS_Undefined + elif member == 'unshift': + assertion(isinstance(obj, list), 'must be applied on a list') + # not enforced: assertion(argvals, 'takes one or more arguments') + obj[0:0] = argvals + return len(obj) elif member == 'push': - assertion(argvals, 'takes one or more arguments') + # not enforced: assertion(argvals, 'takes one or more arguments') obj.extend(argvals) - return obj + return len(obj) elif member == 'forEach': assertion(argvals, 'takes one or more arguments') - assertion(len(argvals) <= 2, 'takes at-most 2 arguments') + assertion(len(argvals) <= 2, 'takes at most 2 arguments') f, this = (argvals + [''])[:2] return [f((item, idx, obj), {'this': this}, allow_recursion) for idx, item in enumerate(obj)] elif member == 'indexOf': assertion(argvals, 'takes one or more arguments') - assertion(len(argvals) <= 2, 'takes at-most 2 arguments') + assertion(len(argvals) <= 2, 'takes at most 2 arguments') idx, start = (argvals + [0])[:2] try: return obj.index(idx, start) @@ -898,7 +1356,7 @@ class JSInterpreter(object): elif member == 'charCodeAt': assertion(isinstance(obj, compat_str), 'must be applied on a string') # assertion(len(argvals) == 1, 'takes exactly one argument') # but not enforced - idx = argvals[0] if isinstance(argvals[0], int) else 0 + idx = argvals[0] if len(argvals) > 0 and isinstance(argvals[0], int) else 0 if idx >= len(obj): return None return ord(obj[idx]) @@ -907,7 +1365,8 @@ class JSInterpreter(object): assertion(len(argvals) == 2, 'takes exactly two arguments') # TODO: argvals[1] callable, other Py vs JS edge cases if isinstance(argvals[0], self.JS_RegExp): - count = 0 if argvals[0].flags & self.JS_RegExp.RE_FLAGS['g'] else 1 + # access JS member with Py reserved name + count = 0 if self._index(argvals[0], 'global') else 1 assertion(member != 'replaceAll' or count == 0, 'replaceAll must be called with a global RegExp') return argvals[0].sub(argvals[1], obj, count=count) @@ -919,11 +1378,11 @@ class JSInterpreter(object): if remaining: ret, should_abort = self.interpret_statement( - self._named_object(local_vars, eval_method()) + remaining, + self._named_object(local_vars, eval_method(variable, member)) + remaining, local_vars, allow_recursion) return ret, should_return or should_abort else: - return eval_method(), should_return + return eval_method(variable, member), should_return elif md.get('function'): fname = m.group('fname') @@ -948,31 +1407,29 @@ class JSInterpreter(object): for v in self._separate(list_txt): yield self.interpret_expression(v, local_vars, allow_recursion) - def extract_object(self, objname): - _FUNC_NAME_RE = r'''(?:[a-zA-Z$0-9]+|"[a-zA-Z$0-9]+"|'[a-zA-Z$0-9]+')''' + def extract_object(self, objname, *global_stack): + _FUNC_NAME_RE = r'''(?:{n}|"{n}"|'{n}')'''.format(n=_NAME_RE) obj = {} - fields = None - for obj_m in re.finditer( + fields = next(filter(None, ( + obj_m.group('fields') for obj_m in re.finditer( r'''(?xs) {0}\s*\.\s*{1}|{1}\s*=\s*\{{\s* (?P<fields>({2}\s*:\s*function\s*\(.*?\)\s*\{{.*?}}(?:,\s*)?)*) }}\s*; '''.format(_NAME_RE, re.escape(objname), _FUNC_NAME_RE), - self.code): - fields = obj_m.group('fields') - if fields: - break - else: + self.code))), None) + if not fields: raise self.Exception('Could not find object ' + objname) # Currently, it only supports function definitions - fields_m = re.finditer( - r'''(?x) - (?P<key>%s)\s*:\s*function\s*\((?P<args>(?:%s|,)*)\){(?P<code>[^}]+)} - ''' % (_FUNC_NAME_RE, _NAME_RE), - fields) - for f in fields_m: + for f in re.finditer( + r'''(?x) + (?P<key>%s)\s*:\s*function\s*\((?P<args>(?:%s|,)*)\){(?P<code>[^}]+)} + ''' % (_FUNC_NAME_RE, _NAME_RE), + fields): argnames = self.build_arglist(f.group('args')) - obj[remove_quotes(f.group('key'))] = self.build_function(argnames, f.group('code')) + name = remove_quotes(f.group('key')) + obj[name] = function_with_repr( + self.build_function(argnames, f.group('code'), *global_stack), 'F<{0}>'.format(name)) return obj @@ -1004,27 +1461,31 @@ class JSInterpreter(object): code, _ = self._separate_at_paren(func_m.group('code')) # refine the match return self.build_arglist(func_m.group('args')), code - def extract_function(self, funcname): + def extract_function(self, funcname, *global_stack): return function_with_repr( - self.extract_function_from_code(*self.extract_function_code(funcname)), - 'F<%s>' % (funcname, )) + self.extract_function_from_code(*itertools.chain( + self.extract_function_code(funcname), global_stack)), + 'F<%s>' % (funcname,)) def extract_function_from_code(self, argnames, code, *global_stack): local_vars = {} + + start = None while True: - mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code) + mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code[start:]) if mobj is None: break - start, body_start = mobj.span() - body, remaining = self._separate_at_paren(code[body_start - 1:], '}') + start, body_start = ((start or 0) + x for x in mobj.span()) + body, remaining = self._separate_at_paren(code[body_start - 1:]) name = self._named_object(local_vars, self.extract_function_from_code( [x.strip() for x in mobj.group('args').split(',')], body, local_vars, *global_stack)) code = code[:start] + name + remaining + return self.build_function(argnames, code, local_vars, *global_stack) - def call_function(self, funcname, *args): - return self.extract_function(funcname)(args) + def call_function(self, funcname, *args, **kw_global_vars): + return self.extract_function(funcname)(args, kw_global_vars) @classmethod def build_arglist(cls, arg_text): @@ -1043,9 +1504,9 @@ class JSInterpreter(object): global_stack = list(global_stack) or [{}] argnames = tuple(argnames) - def resf(args, kwargs={}, allow_recursion=100): - global_stack[0].update( - zip_longest(argnames, args, fillvalue=None)) + def resf(args, kwargs=None, allow_recursion=100): + kwargs = kwargs or {} + global_stack[0].update(zip_longest(argnames, args, fillvalue=JS_Undefined)) global_stack[0].update(kwargs) var_stack = LocalNameSpace(*global_stack) ret, should_abort = self.interpret_statement(code.replace('\n', ' '), var_stack, allow_recursion - 1) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 434f520d3..61705d1f0 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -534,6 +534,10 @@ def parseOpts(overrideArguments=None): action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation') workarounds.add_option( + '--no-check-extensions', + action='store_true', dest='no_check_extensions', default=False, + help='Suppress file extension validation') + workarounds.add_option( '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure', help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index cd4303566..c4262936e 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1717,21 +1717,6 @@ TIMEZONE_NAMES = { 'PST': -8, 'PDT': -7 # Pacific } -KNOWN_EXTENSIONS = ( - 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', - 'flv', 'f4v', 'f4a', 'f4b', - 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', - 'mkv', 'mka', 'mk3d', - 'avi', 'divx', - 'mov', - 'asf', 'wmv', 'wma', - '3gp', '3g2', - 'mp3', - 'flac', - 'ape', - 'wav', - 'f4f', 'f4m', 'm3u8', 'smil') - # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'], @@ -2406,7 +2391,7 @@ class ExtractorError(YoutubeDLError): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ - + self.orig_msg = msg if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if video_id is not None: @@ -3959,19 +3944,22 @@ def parse_duration(s): return duration -def prepend_extension(filename, ext, expected_real_ext=None): +def _change_extension(prepend, filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) - return ( - '{0}.{1}{2}'.format(name, ext, real_ext) - if not expected_real_ext or real_ext[1:] == expected_real_ext - else '{0}.{1}'.format(filename, ext)) + sanitize_extension = _UnsafeExtensionError.sanitize_extension + if not expected_real_ext or real_ext.partition('.')[0::2] == ('', expected_real_ext): + filename = name + if prepend and real_ext: + sanitize_extension(ext, prepend=prepend) + return ''.join((filename, '.', ext, real_ext)) -def replace_extension(filename, ext, expected_real_ext=None): - name, real_ext = os.path.splitext(filename) - return '{0}.{1}'.format( - name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, - ext) + # Mitigate path traversal and file impersonation attacks + return '.'.join((filename, sanitize_extension(ext))) + + +prepend_extension = functools.partial(_change_extension, True) +replace_extension = functools.partial(_change_extension, False) def check_executable(exe, args=[]): @@ -4216,12 +4204,16 @@ def lowercase_escape(s): s) -def escape_rfc3986(s): +def escape_rfc3986(s, safe=None): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0): s = _encode_compat_str(s, 'utf-8') + if safe is not None: + safe = _encode_compat_str(safe, 'utf-8') + if safe is None: + safe = b"%/;:@&=+$,!~*'()?#[]" # ensure unicode: after quoting, it can always be converted - return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")) + return compat_str(compat_urllib_parse.quote(s, safe)) def escape_url(url): @@ -6561,3 +6553,169 @@ def join_nonempty(*values, **kwargs): if from_dict is not None: values = (traverse_obj(from_dict, variadic(v)) for v in values) return delim.join(map(compat_str, filter(None, values))) + + +class Namespace(object): + """Immutable namespace""" + + def __init__(self, **kw_attr): + self.__dict__.update(kw_attr) + + def __iter__(self): + return iter(self.__dict__.values()) + + @property + def items_(self): + return self.__dict__.items() + + +MEDIA_EXTENSIONS = Namespace( + common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'), + video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'), + common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'), + audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'), + thumbnails=('jpg', 'png', 'webp'), + # storyboards=('mhtml', ), + subtitles=('srt', 'vtt', 'ass', 'lrc', 'ttml'), + manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'), +) +MEDIA_EXTENSIONS.video = MEDIA_EXTENSIONS.common_video + MEDIA_EXTENSIONS.video +MEDIA_EXTENSIONS.audio = MEDIA_EXTENSIONS.common_audio + MEDIA_EXTENSIONS.audio + +KNOWN_EXTENSIONS = ( + MEDIA_EXTENSIONS.video + MEDIA_EXTENSIONS.audio + + MEDIA_EXTENSIONS.manifests +) + + +class _UnsafeExtensionError(Exception): + """ + Mitigation exception for unwanted file overwrite/path traversal + + Ref: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-79w7-vh3h-8g4j + """ + _ALLOWED_EXTENSIONS = frozenset(itertools.chain( + ( # internal + 'description', + 'json', + 'meta', + 'orig', + 'part', + 'temp', + 'uncut', + 'unknown_video', + 'ytdl', + ), + # video + MEDIA_EXTENSIONS.video, ( + 'asx', + 'ismv', + 'm2t', + 'm2ts', + 'm2v', + 'm4s', + 'mng', + 'mp2v', + 'mp4v', + 'mpe', + 'mpeg', + 'mpeg1', + 'mpeg2', + 'mpeg4', + 'mxf', + 'ogm', + 'qt', + 'rm', + 'swf', + 'ts', + 'vob', + 'vp9', + ), + # audio + MEDIA_EXTENSIONS.audio, ( + '3ga', + 'ac3', + 'adts', + 'aif', + 'au', + 'dts', + 'isma', + 'it', + 'mid', + 'mod', + 'mpga', + 'mp1', + 'mp2', + 'mp4a', + 'mpa', + 'ra', + 'shn', + 'xm', + ), + # image + MEDIA_EXTENSIONS.thumbnails, ( + 'avif', + 'bmp', + 'gif', + 'ico', + 'heic', + 'jng', + 'jpeg', + 'jxl', + 'svg', + 'tif', + 'tiff', + 'wbmp', + ), + # subtitle + MEDIA_EXTENSIONS.subtitles, ( + 'dfxp', + 'fs', + 'ismt', + 'json3', + 'sami', + 'scc', + 'srv1', + 'srv2', + 'srv3', + 'ssa', + 'tt', + 'xml', + ), + # others + MEDIA_EXTENSIONS.manifests, + ( + # not used in yt-dl + # *MEDIA_EXTENSIONS.storyboards, + # 'desktop', + # 'ism', + # 'm3u', + # 'sbv', + # 'swp', + # 'url', + # 'webloc', + ))) + + def __init__(self, extension): + super(_UnsafeExtensionError, self).__init__('unsafe file extension: {0!r}'.format(extension)) + self.extension = extension + + # support --no-check-extensions + lenient = False + + @classmethod + def sanitize_extension(cls, extension, **kwargs): + # ... /, *, prepend=False + prepend = kwargs.get('prepend', False) + + if '/' in extension or '\\' in extension: + raise cls(extension) + + if not prepend: + last = extension.rpartition('.')[-1] + if last == 'bin': + extension = last = 'unknown_video' + if not (cls.lenient or last.lower() in cls._ALLOWED_EXTENSIONS): + raise cls(extension) + + return extension diff --git a/youtube_dl/version.py b/youtube_dl/version.py index b82fbc702..c70d9d2af 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2021.12.17' +__version__ = '2025.04.07' |