aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/ci.yml19
-rw-r--r--test/helper.py8
-rw-r--r--test/test_cache.py14
-rw-r--r--test/test_jsinterp.py63
-rw-r--r--test/test_traversal.py190
-rw-r--r--test/test_utils.py46
-rw-r--r--test/test_youtube_signature.py68
-rwxr-xr-xyoutube_dl/YoutubeDL.py39
-rw-r--r--youtube_dl/__init__.py2
-rw-r--r--youtube_dl/cache.py95
-rw-r--r--youtube_dl/compat.py255
-rw-r--r--youtube_dl/downloader/common.py28
-rw-r--r--youtube_dl/extractor/common.py91
-rw-r--r--youtube_dl/extractor/youtube.py941
-rw-r--r--youtube_dl/jsinterp.py278
-rw-r--r--youtube_dl/options.py15
-rw-r--r--youtube_dl/traversal.py4
-rw-r--r--youtube_dl/utils.py236
-rw-r--r--youtube_dl/version.py2
19 files changed, 1793 insertions, 601 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d3b9ae016..073c4458c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -116,29 +116,29 @@ jobs:
strategy:
fail-fast: true
matrix:
- os: [ubuntu-20.04]
+ os: [ubuntu-22.04]
python-version: ${{ fromJSON(needs.select.outputs.cpython-versions) }}
python-impl: [cpython]
ytdl-test-set: ${{ fromJSON(needs.select.outputs.test-set) }}
run-tests-ext: [sh]
include:
- - os: windows-2019
+ - os: windows-2022
python-version: 3.4
python-impl: cpython
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
run-tests-ext: bat
- - os: windows-2019
+ - os: windows-2022
python-version: 3.4
python-impl: cpython
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
run-tests-ext: bat
# jython
- - os: ubuntu-20.04
+ - os: ubuntu-22.04
python-version: 2.7
python-impl: jython
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
run-tests-ext: sh
- - os: ubuntu-20.04
+ - os: ubuntu-22.04
python-version: 2.7
python-impl: jython
ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
@@ -160,7 +160,7 @@ jobs:
# NB may run apt-get install in Linux
uses: ytdl-org/setup-python@v1
env:
- # Temporary workaround for Python 3.5 failures - May 2024
+ # Temporary (?) workaround for Python 3.5 failures - May 2024
PIP_TRUSTED_HOST: "pypi.python.org pypi.org files.pythonhosted.org"
with:
python-version: ${{ matrix.python-version }}
@@ -240,7 +240,10 @@ jobs:
# install 2.7
shell: bash
run: |
- sudo apt-get install -y python2 python-is-python2
+ # Ubuntu 22.04 no longer has python-is-python2: fetch it
+ curl -L "http://launchpadlibrarian.net/474693132/python-is-python2_2.7.17-4_all.deb" -o python-is-python2.deb
+ sudo apt-get install -y python2
+ sudo dpkg --force-breaks -i python-is-python2.deb
echo "PYTHONHOME=/usr" >> "$GITHUB_ENV"
#-------- Python 2.6 --
- name: Set up Python 2.6 environment
@@ -362,7 +365,7 @@ jobs:
python -m ensurepip || python -m pip --version || { \
get_pip="${{ contains(needs.select.outputs.own-pip-versions, matrix.python-version) && format('{0}/', matrix.python-version) || '' }}"; \
curl -L -O "https://bootstrap.pypa.io/pip/${get_pip}get-pip.py"; \
- python get-pip.py; }
+ python get-pip.py --no-setuptools --no-wheel; }
- name: Set up Python 2.6 pip
if: ${{ matrix.python-version == '2.6' }}
shell: bash
diff --git a/test/helper.py b/test/helper.py
index 6f2129eff..fac069d25 100644
--- a/test/helper.py
+++ b/test/helper.py
@@ -85,10 +85,10 @@ class FakeYDL(YoutubeDL):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
- def report_warning(self, message):
+ def report_warning(self, message, *args, **kwargs):
if re.match(regex, message):
return
- old_report_warning(message)
+ old_report_warning(message, *args, **kwargs)
self.report_warning = types.MethodType(report_warning, self)
@@ -265,11 +265,11 @@ def assertRegexpMatches(self, text, regexp, msg=None):
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
- def _report_warning(w):
+ def _report_warning(self, w, *args, **kwargs):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
- ydl.report_warning = _report_warning
+ ydl.report_warning = types.MethodType(_report_warning, ydl)
def http_server_port(httpd):
diff --git a/test/test_cache.py b/test/test_cache.py
index 931074aa1..0431f4f15 100644
--- a/test/test_cache.py
+++ b/test/test_cache.py
@@ -63,9 +63,21 @@ class TestCache(unittest.TestCase):
obj = {'x': 1, 'y': ['รค', '\\a', True]}
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k.', min_ver='1970.01.01'), obj)
- new_version = '.'.join(('%d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__)))
+ new_version = '.'.join(('%0.2d' % ((v + 1) if i == 0 else v, )) for i, v in enumerate(version_tuple(__version__)))
self.assertIs(c.load('test_cache', 'k.', min_ver=new_version), None)
+ def test_cache_clear(self):
+ ydl = FakeYDL({
+ 'cachedir': self.test_dir,
+ })
+ c = Cache(ydl)
+ c.store('test_cache', 'k.', 'kay')
+ c.store('test_cache', 'l.', 'ell')
+ self.assertEqual(c.load('test_cache', 'k.'), 'kay')
+ c.clear('test_cache', 'k.')
+ self.assertEqual(c.load('test_cache', 'k.'), None)
+ self.assertEqual(c.load('test_cache', 'l.'), 'ell')
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_jsinterp.py b/test/test_jsinterp.py
index 3c9650ab6..479cb43a0 100644
--- a/test/test_jsinterp.py
+++ b/test/test_jsinterp.py
@@ -7,6 +7,7 @@ from __future__ import unicode_literals
import os
import sys
import unittest
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import math
@@ -146,6 +147,25 @@ class TestJSInterpreter(unittest.TestCase):
# https://github.com/ytdl-org/youtube-dl/issues/32815
self._test('function f(){return 0 - 7 * - 6;}', 42)
+ def test_bitwise_operators_typecast(self):
+ # madness
+ self._test('function f(){return null << 5}', 0)
+ self._test('function f(){return undefined >> 5}', 0)
+ self._test('function f(){return 42 << NaN}', 42)
+ self._test('function f(){return 42 << Infinity}', 42)
+ self._test('function f(){return 0.0 << null}', 0)
+ self._test('function f(){return NaN << 42}', 0)
+ self._test('function f(){return "21.9" << 1}', 42)
+ self._test('function f(){return true << "5";}', 32)
+ self._test('function f(){return true << true;}', 2)
+ self._test('function f(){return "19" & "21.9";}', 17)
+ self._test('function f(){return "19" & false;}', 0)
+ self._test('function f(){return "11.0" >> "2.1";}', 2)
+ self._test('function f(){return 5 ^ 9;}', 12)
+ self._test('function f(){return 0.0 << NaN}', 0)
+ self._test('function f(){return null << undefined}', 0)
+ self._test('function f(){return 21 << 4294967297}', 42)
+
def test_array_access(self):
self._test('function f(){var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2.0] = 7; return x;}', [5, 2, 7])
@@ -160,6 +180,7 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f(){var x = 20; x = 30 + 1; return x;}', 31)
self._test('function f(){var x = 20; x += 30 + 1; return x;}', 51)
self._test('function f(){var x = 20; x -= 30 + 1; return x;}', -11)
+ self._test('function f(){var x = 2; var y = ["a", "b"]; y[x%y["length"]]="z"; return y}', ['z', 'b'])
def test_comments(self):
self._test('''
@@ -351,6 +372,13 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f() { a=5; return (a -= 1, a+=3, a); }', 7)
self._test('function f() { return (l=[0,1,2,3], function(a, b){return a+b})((l[1], l[2]), l[3]) }', 5)
+ def test_not(self):
+ self._test('function f() { return ! undefined; }', True)
+ self._test('function f() { return !0; }', True)
+ self._test('function f() { return !!0; }', False)
+ self._test('function f() { return ![]; }', False)
+ self._test('function f() { return !0 !== false; }', True)
+
def test_void(self):
self._test('function f() { return void 42; }', JS_Undefined)
@@ -435,6 +463,7 @@ class TestJSInterpreter(unittest.TestCase):
def test_regex(self):
self._test('function f() { let a=/,,[/,913,/](,)}/; }', None)
+ self._test('function f() { let a=/,,[/,913,/](,)}/; return a.source; }', ',,[/,913,/](,)}')
jsi = JSInterpreter('''
function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; }
@@ -482,25 +511,6 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f(){return -524999584 << 5}', 379882496)
self._test('function f(){return 1236566549 << 5}', 915423904)
- def test_bitwise_operators_typecast(self):
- # madness
- self._test('function f(){return null << 5}', 0)
- self._test('function f(){return undefined >> 5}', 0)
- self._test('function f(){return 42 << NaN}', 42)
- self._test('function f(){return 42 << Infinity}', 42)
- self._test('function f(){return 0.0 << null}', 0)
- self._test('function f(){return NaN << 42}', 0)
- self._test('function f(){return "21.9" << 1}', 42)
- self._test('function f(){return 21 << 4294967297}', 42)
- self._test('function f(){return true << "5";}', 32)
- self._test('function f(){return true << true;}', 2)
- self._test('function f(){return "19" & "21.9";}', 17)
- self._test('function f(){return "19" & false;}', 0)
- self._test('function f(){return "11.0" >> "2.1";}', 2)
- self._test('function f(){return 5 ^ 9;}', 12)
- self._test('function f(){return 0.0 << NaN}', 0)
- self._test('function f(){return null << undefined}', 0)
-
def test_negative(self):
self._test('function f(){return 2 * -2.0 ;}', -4)
self._test('function f(){return 2 - - -2 ;}', 0)
@@ -543,6 +553,8 @@ class TestJSInterpreter(unittest.TestCase):
test_result = list('test')
tests = [
'function f(a, b){return a.split(b)}',
+ 'function f(a, b){return a["split"](b)}',
+ 'function f(a, b){let x = ["split"]; return a[x[0]](b)}',
'function f(a, b){return String.prototype.split.call(a, b)}',
'function f(a, b){return String.prototype.split.apply(a, [b])}',
]
@@ -593,6 +605,9 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f(){return "012345678".slice(-1, 1)}', '')
self._test('function f(){return "012345678".slice(-3, -1)}', '67')
+ def test_splice(self):
+ self._test('function f(){var T = ["0", "1", "2"]; T["splice"](2, 1, "0")[0]; return T }', ['0', '1', '0'])
+
def test_pop(self):
# pop
self._test('function f(){var a = [0, 1, 2, 3, 4, 5, 6, 7, 8]; return [a.pop(), a]}',
@@ -627,6 +642,16 @@ class TestJSInterpreter(unittest.TestCase):
'return [ret.length, ret[0][0], ret[1][1], ret[0][2]]}',
[2, 4, 1, [4, 2]])
+ def test_extract_function(self):
+ jsi = JSInterpreter('function a(b) { return b + 1; }')
+ func = jsi.extract_function('a')
+ self.assertEqual(func([2]), 3)
+
+ def test_extract_function_with_global_stack(self):
+ jsi = JSInterpreter('function c(d) { return d + e + f + g; }')
+ func = jsi.extract_function('c', {'e': 10}, {'f': 100, 'g': 1000})
+ self.assertEqual(func([1]), 1111)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_traversal.py b/test/test_traversal.py
index 00a428edb..504cdee37 100644
--- a/test/test_traversal.py
+++ b/test/test_traversal.py
@@ -9,21 +9,32 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+import itertools
import re
from youtube_dl.traversal import (
dict_get,
get_first,
+ require,
+ subs_list_to_dict,
T,
traverse_obj,
+ unpack,
+ value,
)
from youtube_dl.compat import (
+ compat_chr as chr,
compat_etree_fromstring,
compat_http_cookies,
+ compat_map as map,
compat_str,
+ compat_zip as zip,
)
from youtube_dl.utils import (
+ determine_ext,
+ ExtractorError,
int_or_none,
+ join_nonempty,
str_or_none,
)
@@ -446,42 +457,164 @@ class TestTraversal(_TestCase):
msg='`any` should allow further branching')
def test_traversal_morsel(self):
- values = {
- 'expires': 'a',
- 'path': 'b',
- 'comment': 'c',
- 'domain': 'd',
- 'max-age': 'e',
- 'secure': 'f',
- 'httponly': 'g',
- 'version': 'h',
- 'samesite': 'i',
- }
- # SameSite added in Py3.8, breaks .update for 3.5-3.7
- if sys.version_info < (3, 8):
- del values['samesite']
morsel = compat_http_cookies.Morsel()
+ # SameSite added in Py3.8, breaks .update for 3.5-3.7
+ # Similarly Partitioned, Py3.14, thx Grub4k
+ values = dict(zip(morsel, map(chr, itertools.count(ord('a')))))
morsel.set(str('item_key'), 'item_value', 'coded_value')
morsel.update(values)
- values['key'] = str('item_key')
- values['value'] = 'item_value'
+ values.update({
+ 'key': str('item_key'),
+ 'value': 'item_value',
+ }),
values = dict((str(k), v) for k, v in values.items())
- # make test pass even without ordered dict
- value_set = set(values.values())
- for key, value in values.items():
- self.assertEqual(traverse_obj(morsel, key), value,
+ for key, val in values.items():
+ self.assertEqual(traverse_obj(morsel, key), val,
msg='Morsel should provide access to all values')
- self.assertEqual(set(traverse_obj(morsel, Ellipsis)), value_set,
- msg='`...` should yield all values')
- self.assertEqual(set(traverse_obj(morsel, lambda k, v: True)), value_set,
- msg='function key should yield all values')
+ values = list(values.values())
+ self.assertMaybeCountEqual(traverse_obj(morsel, Ellipsis), values,
+ msg='`...` should yield all values')
+ self.assertMaybeCountEqual(traverse_obj(morsel, lambda k, v: True), values,
+ msg='function key should yield all values')
self.assertIs(traverse_obj(morsel, [(None,), any]), morsel,
msg='Morsel should not be implicitly changed to dict on usage')
- def test_get_first(self):
- self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam')
-
+ def test_traversal_filter(self):
+ data = [None, False, True, 0, 1, 0.0, 1.1, '', 'str', {}, {0: 0}, [], [1]]
+
+ self.assertEqual(
+ traverse_obj(data, (Ellipsis, filter)),
+ [True, 1, 1.1, 'str', {0: 0}, [1]],
+ '`filter` should filter falsy values')
+
+
+class TestTraversalHelpers(_TestCase):
+ def test_traversal_require(self):
+ with self.assertRaises(ExtractorError, msg='Missing `value` should raise'):
+ traverse_obj(_TEST_DATA, ('None', T(require('value'))))
+ self.assertEqual(
+ traverse_obj(_TEST_DATA, ('str', T(require('value')))), 'str',
+ '`require` should pass through non-`None` values')
+
+ def test_subs_list_to_dict(self):
+ self.assertEqual(traverse_obj([
+ {'name': 'de', 'url': 'https://example.com/subs/de.vtt'},
+ {'name': 'en', 'url': 'https://example.com/subs/en1.ass'},
+ {'name': 'en', 'url': 'https://example.com/subs/en2.ass'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'url': 'url',
+ }, all, T(subs_list_to_dict)]), {
+ 'de': [{'url': 'https://example.com/subs/de.vtt'}],
+ 'en': [
+ {'url': 'https://example.com/subs/en1.ass'},
+ {'url': 'https://example.com/subs/en2.ass'},
+ ],
+ }, 'function should build subtitle dict from list of subtitles')
+ self.assertEqual(traverse_obj([
+ {'name': 'de', 'url': 'https://example.com/subs/de.ass'},
+ {'name': 'de'},
+ {'name': 'en', 'content': 'content'},
+ {'url': 'https://example.com/subs/en'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'data': 'content',
+ 'url': 'url',
+ }, all, T(subs_list_to_dict(lang=None))]), {
+ 'de': [{'url': 'https://example.com/subs/de.ass'}],
+ 'en': [{'data': 'content'}],
+ }, 'subs with mandatory items missing should be filtered')
+ self.assertEqual(traverse_obj([
+ {'url': 'https://example.com/subs/de.ass', 'name': 'de'},
+ {'url': 'https://example.com/subs/en', 'name': 'en'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'ext': ['url', T(determine_ext(default_ext=None))],
+ 'url': 'url',
+ }, all, T(subs_list_to_dict(ext='ext'))]), {
+ 'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}],
+ 'en': [{'url': 'https://example.com/subs/en', 'ext': 'ext'}],
+ }, '`ext` should set default ext but leave existing value untouched')
+ self.assertEqual(traverse_obj([
+ {'name': 'en', 'url': 'https://example.com/subs/en2', 'prio': True},
+ {'name': 'en', 'url': 'https://example.com/subs/en1', 'prio': False},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'quality': ['prio', T(int)],
+ 'url': 'url',
+ }, all, T(subs_list_to_dict(ext='ext'))]), {'en': [
+ {'url': 'https://example.com/subs/en1', 'ext': 'ext'},
+ {'url': 'https://example.com/subs/en2', 'ext': 'ext'},
+ ]}, '`quality` key should sort subtitle list accordingly')
+ self.assertEqual(traverse_obj([
+ {'name': 'de', 'url': 'https://example.com/subs/de.ass'},
+ {'name': 'de'},
+ {'name': 'en', 'content': 'content'},
+ {'url': 'https://example.com/subs/en'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'url': 'url',
+ 'data': 'content',
+ }, all, T(subs_list_to_dict(lang='en'))]), {
+ 'de': [{'url': 'https://example.com/subs/de.ass'}],
+ 'en': [
+ {'data': 'content'},
+ {'url': 'https://example.com/subs/en'},
+ ],
+ }, 'optionally provided lang should be used if no id available')
+ self.assertEqual(traverse_obj([
+ {'name': 1, 'url': 'https://example.com/subs/de1'},
+ {'name': {}, 'url': 'https://example.com/subs/de2'},
+ {'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
+ {'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'url': 'url',
+ 'ext': 'ext',
+ }, all, T(subs_list_to_dict(lang=None))]), {
+ 'de': [
+ {'url': 'https://example.com/subs/de3'},
+ {'url': 'https://example.com/subs/de4'},
+ ],
+ }, 'non str types should be ignored for id and ext')
+ self.assertEqual(traverse_obj([
+ {'name': 1, 'url': 'https://example.com/subs/de1'},
+ {'name': {}, 'url': 'https://example.com/subs/de2'},
+ {'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
+ {'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
+ ], [Ellipsis, {
+ 'id': 'name',
+ 'url': 'url',
+ 'ext': 'ext',
+ }, all, T(subs_list_to_dict(lang='de'))]), {
+ 'de': [
+ {'url': 'https://example.com/subs/de1'},
+ {'url': 'https://example.com/subs/de2'},
+ {'url': 'https://example.com/subs/de3'},
+ {'url': 'https://example.com/subs/de4'},
+ ],
+ }, 'non str types should be replaced by default id')
+
+ def test_unpack(self):
+ self.assertEqual(
+ unpack(lambda *x: ''.join(map(compat_str, x)))([1, 2, 3]), '123')
+ self.assertEqual(
+ unpack(join_nonempty)([1, 2, 3]), '1-2-3')
+ self.assertEqual(
+ unpack(join_nonempty, delim=' ')([1, 2, 3]), '1 2 3')
+ with self.assertRaises(TypeError):
+ unpack(join_nonempty)()
+ with self.assertRaises(TypeError):
+ unpack()
+
+ def test_value(self):
+ self.assertEqual(
+ traverse_obj(_TEST_DATA, ('str', T(value('other')))), 'other',
+ '`value` should substitute specified value')
+
+
+class TestDictGet(_TestCase):
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
@@ -504,6 +637,9 @@ class TestTraversal(_TestCase):
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
+ def test_get_first(self):
+ self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam')
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_utils.py b/test/test_utils.py
index 2947cce7e..1106f2819 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -69,6 +69,7 @@ from youtube_dl.utils import (
parse_iso8601,
parse_resolution,
parse_qs,
+ partial_application,
pkcs1pad,
prepend_extension,
read_batch_urls,
@@ -664,6 +665,8 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
+ self.assertEqual(parse_duration('3 hours, 11 minutes, 53 seconds'), 11513)
+ self.assertEqual(parse_duration('3 hours, 11 mins, 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
@@ -682,6 +685,10 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
+ self.assertEqual(parse_duration('01:02:03:050'), 3723.05)
+ self.assertEqual(parse_duration('103:050'), 103.05)
+ self.assertEqual(parse_duration('1HR 3MIN'), 3780)
+ self.assertEqual(parse_duration('2hrs 3mins'), 7380)
def test_fix_xml_ampersands(self):
self.assertEqual(
@@ -895,6 +902,30 @@ class TestUtil(unittest.TestCase):
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
+ self.assertEqual(parse_codecs('vp9.2'), {
+ 'vcodec': 'vp9.2',
+ 'acodec': 'none',
+ 'dynamic_range': 'HDR10',
+ })
+ self.assertEqual(parse_codecs('vp09.02.50.10.01.09.18.09.00'), {
+ 'vcodec': 'vp09.02.50.10.01.09.18.09.00',
+ 'acodec': 'none',
+ 'dynamic_range': 'HDR10',
+ })
+ self.assertEqual(parse_codecs('av01.0.12M.10.0.110.09.16.09.0'), {
+ 'vcodec': 'av01.0.12M.10.0.110.09.16.09.0',
+ 'acodec': 'none',
+ 'dynamic_range': 'HDR10',
+ })
+ self.assertEqual(parse_codecs('dvhe'), {
+ 'vcodec': 'dvhe',
+ 'acodec': 'none',
+ 'dynamic_range': 'DV',
+ })
+ self.assertEqual(parse_codecs('fLaC'), {
+ 'vcodec': 'none',
+ 'acodec': 'flac',
+ })
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
@@ -1723,6 +1754,21 @@ Line 1
'a', 'b', 'c', 'd',
from_dict={'a': 'c', 'c': [], 'b': 'd', 'd': None}), 'c-d')
+ def test_partial_application(self):
+ test_fn = partial_application(lambda x, kwarg=None: '{0}, kwarg={1!r}'.format(x, kwarg))
+ self.assertTrue(
+ callable(test_fn(kwarg=10)),
+ 'missing positional parameter should apply partially')
+ self.assertEqual(
+ test_fn(10, kwarg=42), '10, kwarg=42',
+ 'positionally passed argument should call function')
+ self.assertEqual(
+ test_fn(x=10), '10, kwarg=None',
+ 'keyword passed positional should call function')
+ self.assertEqual(
+ test_fn(kwarg=42)(10), '10, kwarg=42',
+ 'call after partial application should call the function')
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index 8fdcce8d4..98221b9c2 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -95,10 +95,50 @@ _SIG_TESTS = [
'0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
),
(
+ 'https://www.youtube.com/s/player/363db69b/player_ias_tce.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ '0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ ),
+ (
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
),
+ (
+ 'https://www.youtube.com/s/player/4fcd6e4a/player_ias_tce.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ 'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player_ias.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player_ias_tce.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ '7AOq0QJ8wRAIgXmPlOPSBkkAs1bYFYlJCfe29xx8jOv1pDL0Q2bdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0qaw',
+ ),
+ (
+ 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0',
+ ),
+ (
+ 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js',
+ '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
+ 'IAOAOq0QJ8wRAAgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_E2u-m37KtXJoOySqa0',
+ ),
]
_NSIG_TESTS = [
@@ -272,7 +312,7 @@ _NSIG_TESTS = [
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
- 'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
+ 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
@@ -286,6 +326,30 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
+ (
+ 'https://www.youtube.com/s/player/20830619/tv-player-ias.vflset/tv-player-ias.js',
+ 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player-plasma-ias-phone-en_US.vflset/base.js',
+ 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4',
+ ),
+ (
+ 'https://www.youtube.com/s/player/20830619/player-plasma-ias-tablet-en_US.vflset/base.js',
+ 'ir9-V6cdbCiyKxhr', '9YE85kNjZiS4',
+ ),
+ (
+ 'https://www.youtube.com/s/player/8a8ac953/player_ias_tce.vflset/en_US/base.js',
+ 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE',
+ ),
+ (
+ 'https://www.youtube.com/s/player/8a8ac953/tv-player-es6.vflset/tv-player-es6.js',
+ 'MiBYeXx_vRREbiCCmh', 'RtZYMVvmkE0JE',
+ ),
+ (
+ 'https://www.youtube.com/s/player/aa3fc80b/player_ias.vflset/en_US/base.js',
+ '0qY9dal2uzOnOGwa-48hha', 'VSh1KDfQMk-eag',
+ ),
]
@@ -335,7 +399,7 @@ def t_factory(name, sig_func, url_pattern):
test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id'))
def test_func(self):
- basename = 'player-{0}-{1}.js'.format(name, test_id)
+ basename = 'player-{0}.js'.format(test_id)
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 9e5620eef..ec1d35c3a 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -357,7 +357,7 @@ class YoutubeDL(object):
_NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
- 'timestamp', 'upload_year', 'upload_month', 'upload_day',
+ 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'available_at',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
@@ -540,10 +540,14 @@ class YoutubeDL(object):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
- def _write_string(self, s, out=None):
+ def _write_string(self, s, out=None, only_once=False, _cache=set()):
+ if only_once and s in _cache:
+ return
write_string(s, out=out, encoding=self.params.get('encoding'))
+ if only_once:
+ _cache.add(s)
- def to_stdout(self, message, skip_eol=False, check_quiet=False):
+ def to_stdout(self, message, skip_eol=False, check_quiet=False, only_once=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
@@ -552,9 +556,9 @@ class YoutubeDL(object):
terminator = ['\n', ''][skip_eol]
output = message + terminator
- self._write_string(output, self._screen_file)
+ self._write_string(output, self._screen_file, only_once=only_once)
- def to_stderr(self, message):
+ def to_stderr(self, message, only_once=False):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
@@ -562,7 +566,7 @@ class YoutubeDL(object):
else:
message = self._bidi_workaround(message)
output = message + '\n'
- self._write_string(output, self._err_file)
+ self._write_string(output, self._err_file, only_once=only_once)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
@@ -641,18 +645,11 @@ class YoutubeDL(object):
raise DownloadError(message, exc_info)
self._download_retcode = 1
- def report_warning(self, message, only_once=False, _cache={}):
+ def report_warning(self, message, only_once=False):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
- if only_once:
- m_hash = hash((self, message))
- m_cnt = _cache.setdefault(m_hash, 0)
- _cache[m_hash] = m_cnt + 1
- if m_cnt > 0:
- return
-
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
@@ -663,7 +660,7 @@ class YoutubeDL(object):
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
- self.to_stderr(warning_message)
+ self.to_stderr(warning_message, only_once=only_once)
def report_error(self, message, *args, **kwargs):
'''
@@ -677,6 +674,16 @@ class YoutubeDL(object):
kwargs['message'] = '%s %s' % (_msg_header, message)
self.trouble(*args, **kwargs)
+ def write_debug(self, message, only_once=False):
+ '''Log debug message or Print message to stderr'''
+ if not self.params.get('verbose', False):
+ return
+ message = '[debug] {0}'.format(message)
+ if self.params.get('logger'):
+ self.params['logger'].debug(message)
+ else:
+ self.to_stderr(message, only_once)
+
def report_unscoped_cookies(self, *args, **kwargs):
# message=None, tb=False, is_error=False
if len(args) <= 2:
@@ -2514,7 +2521,7 @@ class YoutubeDL(object):
self.get_encoding()))
write_string(encoding_str, encoding=None)
- writeln_debug = lambda *s: self._write_string('[debug] %s\n' % (''.join(s), ))
+ writeln_debug = lambda *s: self.write_debug(''.join(s))
writeln_debug('youtube-dl version ', __version__)
if _LAZY_LOADER:
writeln_debug('Lazy loading extractors enabled')
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 3c1272e7b..202f2c9b9 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -409,6 +409,8 @@ def _real_main(argv=None):
'include_ads': opts.include_ads,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
+ 'youtube_player_js_version': opts.youtube_player_js_version,
+ 'youtube_player_js_variant': opts.youtube_player_js_variant,
'encoding': opts.encoding,
'extract_flat': opts.extract_flat,
'mark_watched': opts.mark_watched,
diff --git a/youtube_dl/cache.py b/youtube_dl/cache.py
index 54123da0e..eb0a729c2 100644
--- a/youtube_dl/cache.py
+++ b/youtube_dl/cache.py
@@ -1,3 +1,4 @@
+# coding: utf-8
from __future__ import unicode_literals
import errno
@@ -10,12 +11,14 @@ import traceback
from .compat import (
compat_getenv,
compat_open as open,
+ compat_os_makedirs,
)
from .utils import (
error_to_compat_str,
+ escape_rfc3986,
expand_path,
is_outdated_version,
- try_get,
+ traverse_obj,
write_json_file,
)
from .version import __version__
@@ -30,23 +33,35 @@ class Cache(object):
def __init__(self, ydl):
self._ydl = ydl
+ def _write_debug(self, *args, **kwargs):
+ self._ydl.write_debug(*args, **kwargs)
+
+ def _report_warning(self, *args, **kwargs):
+ self._ydl.report_warning(*args, **kwargs)
+
+ def _to_screen(self, *args, **kwargs):
+ self._ydl.to_screen(*args, **kwargs)
+
+ def _get_param(self, k, default=None):
+ return self._ydl.params.get(k, default)
+
def _get_root_dir(self):
- res = self._ydl.params.get('cachedir')
+ res = self._get_param('cachedir')
if res is None:
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, self._YTDL_DIR)
return expand_path(res)
def _get_cache_fn(self, section, key, dtype):
- assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
+ assert re.match(r'^[\w.-]+$', section), \
'invalid section %r' % section
- assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
+ key = escape_rfc3986(key, safe='').replace('%', ',') # encode non-ascii characters
return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype))
@property
def enabled(self):
- return self._ydl.params.get('cachedir') is not False
+ return self._get_param('cachedir') is not False
def store(self, section, key, data, dtype='json'):
assert dtype in ('json',)
@@ -56,61 +71,75 @@ class Cache(object):
fn = self._get_cache_fn(section, key, dtype)
try:
- try:
- os.makedirs(os.path.dirname(fn))
- except OSError as ose:
- if ose.errno != errno.EEXIST:
- raise
+ compat_os_makedirs(os.path.dirname(fn), exist_ok=True)
+ self._write_debug('Saving {section}.{key} to cache'.format(section=section, key=key))
write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn)
except Exception:
tb = traceback.format_exc()
- self._ydl.report_warning(
- 'Writing cache to %r failed: %s' % (fn, tb))
+ self._report_warning('Writing cache to {fn!r} failed: {tb}'.format(fn=fn, tb=tb))
+
+ def clear(self, section, key, dtype='json'):
+
+ if not self.enabled:
+ return
+
+ fn = self._get_cache_fn(section, key, dtype)
+ self._write_debug('Clearing {section}.{key} from cache'.format(section=section, key=key))
+ try:
+ os.remove(fn)
+ except Exception as e:
+ if getattr(e, 'errno') == errno.ENOENT:
+ # file not found
+ return
+ tb = traceback.format_exc()
+ self._report_warning('Clearing cache from {fn!r} failed: {tb}'.format(fn=fn, tb=tb))
def _validate(self, data, min_ver):
- version = try_get(data, lambda x: x[self._VERSION_KEY])
+ version = traverse_obj(data, self._VERSION_KEY)
if not version: # Backward compatibility
data, version = {'data': data}, self._DEFAULT_VERSION
if not is_outdated_version(version, min_ver or '0', assume_new=False):
return data['data']
- self._ydl.to_screen(
- 'Discarding old cache from version {version} (needs {min_ver})'.format(**locals()))
+ self._write_debug('Discarding old cache from version {version} (needs {min_ver})'.format(version=version, min_ver=min_ver))
- def load(self, section, key, dtype='json', default=None, min_ver=None):
+ def load(self, section, key, dtype='json', default=None, **kw_min_ver):
assert dtype in ('json',)
+ min_ver = kw_min_ver.get('min_ver')
if not self.enabled:
return default
cache_fn = self._get_cache_fn(section, key, dtype)
try:
+ with open(cache_fn, encoding='utf-8') as cachef:
+ self._write_debug('Loading {section}.{key} from cache'.format(section=section, key=key), only_once=True)
+ return self._validate(json.load(cachef), min_ver)
+ except (ValueError, KeyError):
try:
- with open(cache_fn, 'r', encoding='utf-8') as cachef:
- return self._validate(json.load(cachef), min_ver)
- except ValueError:
- try:
- file_size = os.path.getsize(cache_fn)
- except (OSError, IOError) as oe:
- file_size = error_to_compat_str(oe)
- self._ydl.report_warning(
- 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
- except IOError:
- pass # No cache available
+ file_size = 'size: %d' % os.path.getsize(cache_fn)
+ except (OSError, IOError) as oe:
+ file_size = error_to_compat_str(oe)
+ self._report_warning('Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
+ except Exception as e:
+ if getattr(e, 'errno') == errno.ENOENT:
+ # no cache available
+ return
+ self._report_warning('Cache retrieval from %s failed' % (cache_fn,))
return default
def remove(self):
if not self.enabled:
- self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
+ self._to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
return
cachedir = self._get_root_dir()
if not any((term in cachedir) for term in ('cache', 'tmp')):
- raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
+ raise Exception('Not removing directory %s - this does not look like a cache dir' % (cachedir,))
- self._ydl.to_screen(
- 'Removing cache dir %s .' % cachedir, skip_eol=True)
+ self._to_screen(
+ 'Removing cache dir %s .' % (cachedir,), skip_eol=True, ),
if os.path.exists(cachedir):
- self._ydl.to_screen('.', skip_eol=True)
+ self._to_screen('.', skip_eol=True)
shutil.rmtree(cachedir)
- self._ydl.to_screen('.')
+ self._to_screen('.')
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 8910a4dac..a985cb03e 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -55,7 +55,7 @@ except AttributeError:
try:
import collections.abc as compat_collections_abc
except ImportError:
- import collections as compat_collections_abc
+ compat_collections_abc = collections
# compat_urllib_request
@@ -2498,8 +2498,7 @@ try:
from urllib.parse import urlencode as compat_urllib_parse_urlencode
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
- _asciire = (compat_urllib_parse._asciire if hasattr(compat_urllib_parse, '_asciire')
- else re.compile(r'([\x00-\x7f]+)'))
+ _asciire = getattr(compat_urllib_parse, '_asciire', None) or re.compile(r'([\x00-\x7f]+)')
# HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
# implementations from cpython 3.4.3's stdlib. Python 2's version
@@ -2567,24 +2566,21 @@ except ImportError: # Python 2
# Possible solutions are to either port it from python 3 with all
# the friends or manually ensure input query contains only byte strings.
# We will stick with latter thus recursively encoding the whole query.
- def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
+ def compat_urllib_parse_urlencode(query, doseq=0, safe='', encoding='utf-8', errors='strict'):
+
def encode_elem(e):
if isinstance(e, dict):
e = encode_dict(e)
elif isinstance(e, (list, tuple,)):
- list_e = encode_list(e)
- e = tuple(list_e) if isinstance(e, tuple) else list_e
+ e = type(e)(encode_elem(el) for el in e)
elif isinstance(e, compat_str):
- e = e.encode(encoding)
+ e = e.encode(encoding, errors)
return e
def encode_dict(d):
- return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
-
- def encode_list(l):
- return [encode_elem(e) for e in l]
+ return tuple((encode_elem(k), encode_elem(v)) for k, v in d.items())
- return compat_urllib_parse._urlencode(encode_elem(query), doseq=doseq)
+ return compat_urllib_parse._urlencode(encode_elem(query), doseq=doseq).decode('ascii')
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
@@ -2639,6 +2635,57 @@ except ImportError: # Python 2
('parse_qs', compat_parse_qs)):
setattr(compat_urllib_parse, name, fix)
+ try:
+ all(chr(i) in b'' for i in range(256))
+ except TypeError:
+ # not all chr(i) are str: patch Python2 quote
+
+ _safemaps = getattr(compat_urllib_parse, '_safemaps', {})
+ _always_safe = frozenset(compat_urllib_parse.always_safe)
+
+ def _quote(s, safe='/'):
+ """quote('abc def') -> 'abc%20def'"""
+
+ if not s and s is not None: # fast path
+ return s
+ safe = frozenset(safe)
+ cachekey = (safe, _always_safe)
+ try:
+ safe_map = _safemaps[cachekey]
+ except KeyError:
+ safe = _always_safe | safe
+ safe_map = {}
+ for i in range(256):
+ c = chr(i)
+ safe_map[c] = (
+ c if (i < 128 and c in safe)
+ else b'%{0:02X}'.format(i))
+ _safemaps[cachekey] = safe_map
+
+ if safe.issuperset(s):
+ return s
+ return ''.join(safe_map[c] for c in s)
+
+ # linked code
+ def _quote_plus(s, safe=''):
+ return (
+ _quote(s, safe + b' ').replace(b' ', b'+') if b' ' in s
+ else _quote(s, safe))
+
+ # linked code
+ def _urlcleanup():
+ if compat_urllib_parse._urlopener:
+ compat_urllib_parse._urlopener.cleanup()
+ _safemaps.clear()
+ compat_urllib_parse.ftpcache.clear()
+
+ for name, fix in (
+ ('quote', _quote),
+ ('quote_plus', _quote_plus),
+ ('urlcleanup', _urlcleanup)):
+ setattr(compat_urllib_parse, '_' + name, getattr(compat_urllib_parse, name))
+ setattr(compat_urllib_parse, name, fix)
+
compat_urllib_parse_parse_qs = compat_parse_qs
@@ -3120,6 +3167,21 @@ else:
compat_os_path_expanduser = compat_expanduser
+# compat_os_makedirs
+try:
+ os.makedirs('.', exist_ok=True)
+ compat_os_makedirs = os.makedirs
+except TypeError: # < Py3.2
+ from errno import EEXIST as _errno_EEXIST
+
+ def compat_os_makedirs(name, mode=0o777, exist_ok=False):
+ try:
+ return os.makedirs(name, mode=mode)
+ except OSError as ose:
+ if not (exist_ok and ose.errno == _errno_EEXIST):
+ raise
+
+
# compat_os_path_realpath
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
@@ -3390,6 +3452,8 @@ except ImportError:
except ImportError:
compat_map = map
+
+# compat_filter, compat_filter_fns
try:
from future_builtins import filter as compat_filter
except ImportError:
@@ -3397,6 +3461,9 @@ except ImportError:
from itertools import ifilter as compat_filter
except ImportError:
compat_filter = filter
+# "Is this function one or maybe the other filter()?"
+compat_filter_fns = tuple(set((filter, compat_filter)))
+
# compat_zip
try:
@@ -3416,6 +3483,40 @@ except ImportError:
from itertools import izip_longest as compat_itertools_zip_longest
+# compat_abc_ABC
+try:
+ from abc import ABC as compat_abc_ABC
+except ImportError:
+ # Py < 3.4
+ from abc import ABCMeta as _ABCMeta
+ compat_abc_ABC = _ABCMeta(str('ABC'), (object,), {})
+
+
+# dict mixin used here
+# like UserDict.DictMixin, without methods created by MutableMapping
+class _DictMixin(compat_abc_ABC):
+ def has_key(self, key):
+ return key in self
+
+ # get(), clear(), setdefault() in MM
+
+ def iterkeys(self):
+ return (k for k in self)
+
+ def itervalues(self):
+ return (self[k] for k in self)
+
+ def iteritems(self):
+ return ((k, self[k]) for k in self)
+
+ # pop(), popitem() in MM
+
+ def copy(self):
+ return type(self)(self)
+
+ # update() in MM
+
+
# compat_collections_chain_map
# collections.ChainMap: new class
try:
@@ -3570,6 +3671,129 @@ except ImportError:
compat_zstandard = None
+# compat_thread
+try:
+ import _thread as compat_thread
+except ImportError:
+ try:
+ import thread as compat_thread
+ except ImportError:
+ import dummy_thread as compat_thread
+
+
+# compat_dict
+# compat_builtins_dict
+# compat_dict_items
+if sys.version_info >= (3, 6):
+ compat_dict = compat_builtins_dict = dict
+ compat_dict_items = dict.items
+else:
+ _get_ident = compat_thread.get_ident
+
+ class compat_dict(compat_collections_abc.MutableMapping, _DictMixin, dict):
+ """`dict` that preserves insertion order with interface like Py3.7+"""
+
+ _order = [] # default that should never be used
+
+ def __init__(self, *mappings_or_iterables, **kwargs):
+ # order an unordered dict using a list of keys: actual Py 2.7+
+ # OrderedDict uses a doubly linked list for better performance
+ self._order = []
+ for arg in mappings_or_iterables:
+ self.__update(arg)
+ if kwargs:
+ self.__update(kwargs)
+
+ def __getitem__(self, key):
+ return dict.__getitem__(self, key)
+
+ def __setitem__(self, key, value):
+ try:
+ if key not in self._order:
+ self._order.append(key)
+ dict.__setitem__(self, key, value)
+ except Exception:
+ if key in self._order[-1:] and key not in self:
+ del self._order[-1]
+ raise
+
+ def __len__(self):
+ return dict.__len__(self)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ try:
+ # expected case, O(len(self)), but who dels anyway?
+ self._order.remove(key)
+ except ValueError:
+ pass
+
+ def __iter__(self):
+ for from_ in self._order:
+ if from_ in self:
+ yield from_
+
+ def __del__(self):
+ for attr in ('_order',):
+ try:
+ delattr(self, attr)
+ except Exception:
+ pass
+
+ def __repr__(self, _repr_running={}):
+ # skip recursive items ...
+ call_key = id(self), _get_ident()
+ if _repr_running.get(call_key):
+ return '...'
+ _repr_running[call_key] = True
+ try:
+ return '%s({%s})' % (
+ type(self).__name__,
+ ','.join('%r: %r' % k_v for k_v in self.items()))
+ finally:
+ del _repr_running[call_key]
+
+ # merge/update (PEP 584)
+
+ def __or__(self, other):
+ if not isinstance(other, compat_collections_abc.Mapping):
+ return NotImplemented
+ new = type(self)(self)
+ new.update(other)
+ return new
+
+ def __ror__(self, other):
+ if not isinstance(other, compat_collections_abc.Mapping):
+ return NotImplemented
+ new = type(other)(other)
+ new.update(self)
+ return new
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ # optimisations
+
+ def __reversed__(self):
+ for from_ in reversed(self._order):
+ if from_ in self:
+ yield from_
+
+ def __contains__(self, item):
+ return dict.__contains__(self, item)
+
+ # allow overriding update without breaking __init__
+ def __update(self, *args, **kwargs):
+ super(compat_dict, self).update(*args, **kwargs)
+
+ compat_builtins_dict = dict
+ # Using the object's method, not dict's:
+ # an ordered dict's items can be returned unstably by unordered
+ # dict.items as if the method was not ((k, self[k]) for k in self)
+ compat_dict_items = lambda d: d.items()
+
+
legacy = [
'compat_HTMLParseError',
'compat_HTMLParser',
@@ -3600,9 +3824,11 @@ legacy = [
__all__ = [
'compat_Struct',
+ 'compat_abc_ABC',
'compat_base64_b64decode',
'compat_basestring',
'compat_brotli',
+ 'compat_builtins_dict',
'compat_casefold',
'compat_chr',
'compat_collections_abc',
@@ -3610,9 +3836,12 @@ __all__ = [
'compat_contextlib_suppress',
'compat_ctypes_WINFUNCTYPE',
'compat_datetime_timedelta_total_seconds',
+ 'compat_dict',
+ 'compat_dict_items',
'compat_etree_fromstring',
'compat_etree_iterfind',
'compat_filter',
+ 'compat_filter_fns',
'compat_get_terminal_size',
'compat_getenv',
'compat_getpass_getpass',
@@ -3637,6 +3866,7 @@ __all__ = [
'compat_numeric_types',
'compat_open',
'compat_ord',
+ 'compat_os_makedirs',
'compat_os_name',
'compat_os_path_expanduser',
'compat_os_path_realpath',
@@ -3653,6 +3883,7 @@ __all__ = [
'compat_struct_unpack',
'compat_subprocess_get_DEVNULL',
'compat_subprocess_Popen',
+ 'compat_thread',
'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py
index 91e691776..8354030a9 100644
--- a/youtube_dl/downloader/common.py
+++ b/youtube_dl/downloader/common.py
@@ -11,6 +11,7 @@ from ..utils import (
decodeArgument,
encodeFilename,
error_to_compat_str,
+ float_or_none,
format_bytes,
shell_quote,
timeconvert,
@@ -367,14 +368,27 @@ class FileDownloader(object):
})
return True
- min_sleep_interval = self.params.get('sleep_interval')
- if min_sleep_interval:
- max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
- sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
+ min_sleep_interval, max_sleep_interval = (
+ float_or_none(self.params.get(interval), default=0)
+ for interval in ('sleep_interval', 'max_sleep_interval'))
+
+ sleep_note = ''
+ available_at = info_dict.get('available_at')
+ if available_at:
+ forced_sleep_interval = available_at - int(time.time())
+ if forced_sleep_interval > min_sleep_interval:
+ sleep_note = 'as required by the site'
+ min_sleep_interval = forced_sleep_interval
+ if forced_sleep_interval > max_sleep_interval:
+ max_sleep_interval = forced_sleep_interval
+
+ sleep_interval = random.uniform(
+ min_sleep_interval, max_sleep_interval or min_sleep_interval)
+
+ if sleep_interval > 0:
self.to_screen(
- '[download] Sleeping %s seconds...' % (
- int(sleep_interval) if sleep_interval.is_integer()
- else '%.2f' % sleep_interval))
+ '[download] Sleeping %.2f seconds %s...' % (
+ sleep_interval, sleep_note))
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index cb67b976d..a0901dab5 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -214,6 +214,7 @@ class InfoExtractor(object):
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
+ * available_at Unix timestamp of when a format will be available to download
* downloader_options A dictionary of downloader options as
described in FileDownloader
@@ -505,7 +506,7 @@ class InfoExtractor(object):
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
- if not self._downloader.params.get('geo_bypass', True):
+ if not self.get_param('geo_bypass', True):
return
if not geo_bypass_context:
@@ -527,7 +528,7 @@ class InfoExtractor(object):
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
- ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
+ ip_block = self.get_param('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
@@ -538,8 +539,8 @@ class InfoExtractor(object):
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
- if self._downloader.params.get('verbose', False):
- self._downloader.to_screen(
+ if self.get_param('verbose', False):
+ self.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
@@ -548,7 +549,7 @@ class InfoExtractor(object):
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
- country = self._downloader.params.get('geo_bypass_country', None)
+ country = self.get_param('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
@@ -559,8 +560,8 @@ class InfoExtractor(object):
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
- if self._downloader.params.get('verbose', False):
- self._downloader.to_screen(
+ if self.get_param('verbose', False):
+ self.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
@@ -586,9 +587,9 @@ class InfoExtractor(object):
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
- if (not self._downloader.params.get('geo_bypass_country', None)
+ if (not self.get_param('geo_bypass_country', None)
and self._GEO_BYPASS
- and self._downloader.params.get('geo_bypass', True)
+ and self.get_param('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
@@ -698,7 +699,7 @@ class InfoExtractor(object):
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
- self._downloader.report_warning(errmsg)
+ self.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
@@ -770,11 +771,11 @@ class InfoExtractor(object):
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
- if self._downloader.params.get('dump_intermediate_pages', False):
+ if self.get_param('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
- self._downloader.to_screen(dump)
- if self._downloader.params.get('write_pages', False):
+ self.to_screen(dump)
+ if self.get_param('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
@@ -976,19 +977,9 @@ class InfoExtractor(object):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen(self.__ie_msg(msg))
- def write_debug(self, msg, only_once=False, _cache=[]):
+ def write_debug(self, msg, only_once=False):
'''Log debug message or Print message to stderr'''
- if not self.get_param('verbose', False):
- return
- message = '[debug] ' + self.__ie_msg(msg)
- logger = self.get_param('logger')
- if logger:
- logger.debug(message)
- else:
- if only_once and hash(message) in _cache:
- return
- self._downloader.to_stderr(message)
- _cache.append(hash(message))
+ self._downloader.write_debug(self.__ie_msg(msg), only_once=only_once)
# name, default=None, *args, **kwargs
def get_param(self, name, *args, **kwargs):
@@ -1084,7 +1075,7 @@ class InfoExtractor(object):
if mobj:
break
- if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
+ if not self.get_param('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
@@ -1102,7 +1093,7 @@ class InfoExtractor(object):
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
- self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
+ self.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _search_json(self, start_pattern, string, name, video_id, **kwargs):
@@ -1172,7 +1163,7 @@ class InfoExtractor(object):
username = None
password = None
- if self._downloader.params.get('usenetrc', False):
+ if self.get_param('usenetrc', False):
try:
netrc_machine = netrc_machine or self._NETRC_MACHINE
info = netrc.netrc().authenticators(netrc_machine)
@@ -1183,7 +1174,7 @@ class InfoExtractor(object):
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (AttributeError, IOError, netrc.NetrcParseError) as err:
- self._downloader.report_warning(
+ self.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
@@ -1220,10 +1211,10 @@ class InfoExtractor(object):
"""
if self._downloader is None:
return None
- downloader_params = self._downloader.params
- if downloader_params.get('twofactor') is not None:
- return downloader_params['twofactor']
+ twofactor = self.get_param('twofactor')
+ if twofactor is not None:
+ return twofactor
return compat_getpass('Type %s and press [Return]: ' % note)
@@ -1358,7 +1349,7 @@ class InfoExtractor(object):
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
- self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
+ self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
@@ -1589,7 +1580,7 @@ class InfoExtractor(object):
if f.get('vcodec') == 'none': # audio only
preference -= 50
- if self._downloader.params.get('prefer_free_formats'):
+ if self.get_param('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
@@ -1601,7 +1592,7 @@ class InfoExtractor(object):
else:
if f.get('acodec') == 'none': # video only
preference -= 40
- if self._downloader.params.get('prefer_free_formats'):
+ if self.get_param('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
@@ -1667,7 +1658,7 @@ class InfoExtractor(object):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
- if self._downloader.params.get('prefer_insecure', False)
+ if self.get_param('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
@@ -3199,7 +3190,7 @@ class InfoExtractor(object):
if fatal:
raise ExtractorError(msg)
else:
- self._downloader.report_warning(msg)
+ self.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
@@ -3209,7 +3200,7 @@ class InfoExtractor(object):
if fatal:
raise ExtractorError(msg)
else:
- self._downloader.report_warning(msg)
+ self.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
@@ -3218,12 +3209,12 @@ class InfoExtractor(object):
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
- self._downloader.cookiejar.set_cookie(cookie)
+ self.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
- self._downloader.cookiejar.add_cookie_header(req)
+ self.cookiejar.add_cookie_header(req)
return compat_cookies_SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
@@ -3283,8 +3274,8 @@ class InfoExtractor(object):
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
- if (self._downloader.params.get('writesubtitles', False)
- or self._downloader.params.get('listsubtitles')):
+ if (self.get_param('writesubtitles', False)
+ or self.get_param('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
@@ -3305,7 +3296,11 @@ class InfoExtractor(object):
""" Merge subtitle dictionaries, language by language. """
# ..., * , target=None
- target = kwargs.get('target') or dict(subtitle_dict1)
+ target = kwargs.get('target')
+ if target is None:
+ target = dict(subtitle_dict1)
+ else:
+ subtitle_dicts = (subtitle_dict1,) + subtitle_dicts
for subtitle_dict in subtitle_dicts:
for lang in subtitle_dict:
@@ -3313,8 +3308,8 @@ class InfoExtractor(object):
return target
def extract_automatic_captions(self, *args, **kwargs):
- if (self._downloader.params.get('writeautomaticsub', False)
- or self._downloader.params.get('listsubtitles')):
+ if (self.get_param('writeautomaticsub', False)
+ or self.get_param('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
@@ -3322,9 +3317,9 @@ class InfoExtractor(object):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
- if (self._downloader.params.get('mark_watched', False)
+ if (self.get_param('mark_watched', False)
and (self._get_login_info()[0] is not None
- or self._downloader.params.get('cookiefile') is not None)):
+ or self.get_param('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
@@ -3332,7 +3327,7 @@ class InfoExtractor(object):
def geo_verification_headers(self):
headers = {}
- geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
+ geo_verification_proxy = self.get_param('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index ce97fd75b..7965fa08a 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1,5 +1,4 @@
# coding: utf-8
-
from __future__ import unicode_literals
import collections
@@ -18,6 +17,8 @@ from ..compat import (
compat_chr,
compat_HTTPError,
compat_map as map,
+ compat_dict as o_dict,
+ compat_dict_items as dict_items,
compat_str,
compat_urllib_parse,
compat_urllib_parse_parse_qs as compat_parse_qs,
@@ -49,6 +50,7 @@ from ..utils import (
parse_duration,
parse_qs,
qualities,
+ remove_end,
remove_start,
smuggle_url,
str_or_none,
@@ -86,8 +88,24 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
- _INNERTUBE_CLIENTS = {
- 'ios': {
+ # priority order for now
+ _INNERTUBE_CLIENTS = o_dict((
+ # Doesn't require a PoToken for some reason: thx yt-dlp/yt-dlp#14693
+ ('android_sdkless', {
+ 'INNERTUBE_CONTEXT': {
+ 'client': {
+ 'clientName': 'ANDROID',
+ 'clientVersion': '20.10.38',
+ 'userAgent': 'com.google.android.youtube/20.10.38 (Linux; U; Android 11) gzip',
+ 'osName': 'Android',
+ 'osVersion': '11',
+ },
+ },
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
+ 'REQUIRE_JS_PLAYER': False,
+ 'WITH_COOKIES': False,
+ }),
+ ('ios', {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
@@ -100,47 +118,95 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
- 'REQUIRE_JS_PLAYER': False,
'REQUIRE_PO_TOKEN': True,
- },
+ 'REQUIRE_JS_PLAYER': False,
+ 'WITH_COOKIES': False,
+ }),
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
- 'mweb': {
+ ('mweb', {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
- 'clientVersion': '2.20250311.03.00',
+ 'clientVersion': '2.2.20250925.01.00',
# mweb previously did not require PO Token with this UA
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 2,
'REQUIRE_PO_TOKEN': True,
- 'SUPPORTS_COOKIES': True,
- },
- 'tv': {
+ }),
+ ('tv_downgraded', {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5',
- 'clientVersion': '7.20250312.16.00',
+ 'clientVersion': '4', # avoids SABR formats, thx yt-dlp/yt-dlp#14887
'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
'SUPPORTS_COOKIES': True,
- },
- 'web': {
+ }),
+ ('tv', {
+ 'INNERTUBE_CONTEXT': {
+ 'client': {
+ 'clientName': 'TVHTML5',
+ 'clientVersion': '7.20250312.16.00',
+ # See: https://github.com/youtube/cobalt/blob/main/cobalt/browser/user_agent/user_agent_platform_info.cc#L506
+ 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/25.lts.30.1034943-gold (unlike Gecko), Unknown_TV_Unknown_0/Unknown (Unknown, Unknown)',
+ },
+ },
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
+ }),
+ ('web', {
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
- 'clientVersion': '2.20250312.04.00',
+ 'clientVersion': '2.20250925.01.00',
+ 'userAgent': 'Mozilla/5.0',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
'REQUIRE_PO_TOKEN': True,
+ }),
+ ('web_embedded', {
+ 'INNERTUBE_CONTEXT': {
+ 'client': {
+ 'clientName': 'WEB_EMBEDDED_PLAYER',
+ 'clientVersion': '1.20250923.21.00',
+ 'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
+ },
+ },
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 56,
'SUPPORTS_COOKIES': True,
- },
- }
+ }),
+ # Safari UA returns pre-merged video+audio 144p/240p/360p/720p/1080p HLS formats
+ ('web_safari', {
+ 'INNERTUBE_CONTEXT': {
+ 'client': {
+ 'clientName': 'WEB',
+ 'clientVersion': '2.20250925.01.00',
+ 'userAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.5 Safari/605.1.15,gzip(gfe)',
+ },
+ },
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
+ 'SUPPORTS_COOKIES': True,
+ 'REQUIRE_PO': True,
+ }),
+ # This client now requires sign-in for every video
+ ('web_creator', {
+ 'INNERTUBE_CONTEXT': {
+ 'client': {
+ 'clientName': 'WEB_CREATOR',
+ 'clientVersion': '1.20250922.03.00',
+ },
+ },
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
+ 'REQUIRE_AUTH': True,
+ 'SUPPORTS_COOKIES': True,
+ 'WITH_COOKIES': True,
+ }),
+ ))
def _login(self):
"""
@@ -342,14 +408,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
if not self._login():
return
- _DEFAULT_API_DATA = {
- 'context': {
- 'client': {
- 'clientName': 'WEB',
- 'clientVersion': '2.20201021.03.00',
- },
- },
- }
+ _DEFAULT_API_DATA = {'context': _INNERTUBE_CLIENTS['web']['INNERTUBE_CONTEXT']}
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
@@ -424,11 +483,22 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
('responseContext', 'visitorData')),
T(compat_str)))
+ # @functools.cached_property
+ def is_authenticated(self, _cache={}):
+ if self not in _cache:
+ _cache[self] = bool(self._generate_sapisidhash_header())
+ return _cache[self]
+
def _extract_ytcfg(self, video_id, webpage):
- return self._parse_json(
- self._search_regex(
- r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
- default='{}'), video_id, fatal=False) or {}
+ ytcfg = self._search_json(
+ r'ytcfg\.set\s*\(', webpage, 'ytcfg', video_id,
+ end_pattern=r'\)\s*;', default={})
+
+ traverse_obj(ytcfg, (
+ 'INNERTUBE_CONTEXT', 'client', 'configInfo',
+ T(lambda x: x.pop('appInstallData', None))))
+
+ return ytcfg
def _extract_video(self, renderer):
video_id = renderer['videoId']
@@ -464,9 +534,31 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
}
@staticmethod
+ def _get_text(data, *path_list, **kw_max_runs):
+ max_runs = kw_max_runs.get('max_runs')
+
+ for path in path_list or [None]:
+ if path is None:
+ obj = [data] # shortcut
+ else:
+ obj = traverse_obj(data, tuple(variadic(path) + (all,)))
+ for runs in traverse_obj(
+ obj, ('simpleText', {'text': T(compat_str)}, all, filter),
+ ('runs', lambda _, r: isinstance(r.get('text'), compat_str), all, filter),
+ (T(list), lambda _, r: isinstance(r.get('text'), compat_str)),
+ default=[]):
+ max_runs = int_or_none(max_runs, default=len(runs))
+ if max_runs < len(runs):
+ runs = runs[:max_runs]
+ text = ''.join(traverse_obj(runs, (Ellipsis, 'text')))
+ if text:
+ return text
+
+ @staticmethod
def _extract_thumbnails(data, *path_list, **kw_final_key):
"""
Extract thumbnails from thumbnails dict
+
@param path_list: path list to level that contains 'thumbnails' key
"""
final_key = kw_final_key.get('final_key', 'thumbnails')
@@ -497,42 +589,38 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
data['params'] = params
for page_num in itertools.count(1):
search = self._download_json(
- 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
+ 'https://www.youtube.com/youtubei/v1/search',
video_id='query "%s"' % query,
note='Downloading page %s' % page_num,
errnote='Unable to download API page', fatal=False,
data=json.dumps(data).encode('utf8'),
+ query={
+ # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
+ 'prettyPrint': 'false',
+ },
headers={'content-type': 'application/json'})
if not search:
break
- slr_contents = try_get(
+ slr_contents = traverse_obj(
search,
- (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
- lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
- list)
+ ('contents', 'twoColumnSearchResultsRenderer', 'primaryContents',
+ 'sectionListRenderer', 'contents'),
+ ('onResponseReceivedCommands', 0, 'appendContinuationItemsAction',
+ 'continuationItems'),
+ expected_type=list)
if not slr_contents:
break
- for slr_content in slr_contents:
- isr_contents = try_get(
- slr_content,
- lambda x: x['itemSectionRenderer']['contents'],
- list)
- if not isr_contents:
- continue
- for content in isr_contents:
- if not isinstance(content, dict):
- continue
- video = content.get('videoRenderer')
- if not isinstance(video, dict):
- continue
- video_id = video.get('videoId')
- if not video_id:
- continue
- yield self._extract_video(video)
- token = try_get(
+ for video in traverse_obj(
+ slr_contents,
+ (Ellipsis, 'itemSectionRenderer', 'contents',
+ Ellipsis, 'videoRenderer',
+ T(lambda v: v if v.get('videoId') else None))):
+ yield self._extract_video(video)
+
+ token = traverse_obj(
slr_contents,
- lambda x: x[-1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
- compat_str)
+ (-1, 'continuationItemRenderer', 'continuationEndpoint',
+ 'continuationCommand', 'token', T(compat_str)))
if not token:
break
data['continuation'] = token
@@ -696,7 +784,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$',
)
- _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
+ _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'srt', 'vtt')
_GEO_BYPASS = False
@@ -1587,6 +1675,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
}
+ _PLAYER_JS_VARIANT_MAP = (
+ ('main', 'player_ias.vflset/en_US/base.js'),
+ ('tcc', 'player_ias_tcc.vflset/en_US/base.js'),
+ ('tce', 'player_ias_tce.vflset/en_US/base.js'),
+ ('es5', 'player_es5.vflset/en_US/base.js'),
+ ('es6', 'player_es6.vflset/en_US/base.js'),
+ ('tv', 'tv-player-ias.vflset/tv-player-ias.js'),
+ ('tv_es6', 'tv-player-es6.vflset/tv-player-es6.js'),
+ ('phone', 'player-plasma-ias-phone-en_US.vflset/base.js'),
+ ('tablet', 'player-plasma-ias-tablet-en_US.vflset/base.js'),
+ )
+
@classmethod
def suitable(cls, url):
if parse_qs(url).get('list', [None])[0]:
@@ -1598,6 +1698,20 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self._code_cache = {}
self._player_cache = {}
+ def _get_player_js_version(self):
+ player_js_version = self.get_param('youtube_player_js_version')
+ if player_js_version:
+ sts_hash = self._search_regex(
+ ('^actual$(^)?(^)?', r'^([0-9]{5,})@([0-9a-f]{8,})$'),
+ player_js_version, 'player_js_version', group=(1, 2), default=None)
+ if sts_hash:
+ return sts_hash
+ self.report_warning(
+ 'Invalid player JS version "{0}" specified. '
+ 'It should be "{1}" or in the format of {2}'.format(
+ player_js_version, 'actual', 'SignatureTimeStamp@Hash'), only_once=True)
+ return None, None
+
# *ytcfgs, webpage=None
def _extract_player_url(self, *ytcfgs, **kw_webpage):
if ytcfgs and not isinstance(ytcfgs[0], dict):
@@ -1608,9 +1722,34 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
webpage or '', 'player URL', fatal=False)
if player_url:
ytcfgs = ytcfgs + ({'PLAYER_JS_URL': player_url},)
- return traverse_obj(
+ player_url = traverse_obj(
ytcfgs, (Ellipsis, 'PLAYER_JS_URL'), (Ellipsis, 'WEB_PLAYER_CONTEXT_CONFIGS', Ellipsis, 'jsUrl'),
- get_all=False, expected_type=lambda u: urljoin('https://www.youtube.com', u))
+ get_all=False, expected_type=self._yt_urljoin)
+
+ requested_js_variant = self.get_param('youtube_player_js_variant')
+ variant_js = next(
+ (v for k, v in self._PLAYER_JS_VARIANT_MAP if k == requested_js_variant),
+ None)
+ if variant_js:
+ player_id_override = self._get_player_js_version()[1]
+ player_id = player_id_override or self._extract_player_info(player_url)
+ original_url = player_url
+ player_url = self._yt_urljoin(
+ '/s/player/{0}/{1}'.format(player_id, variant_js))
+ if original_url != player_url:
+ self.write_debug(
+ 'Forcing "{0}" player JS variant for player {1}\n'
+ ' original url = {2}'.format(
+ requested_js_variant, player_id, original_url),
+ only_once=True)
+ elif requested_js_variant != 'actual':
+ self.report_warning(
+ 'Invalid player JS variant name "{0}" requested. '
+ 'Valid choices are: {1}'.format(
+ requested_js_variant, ','.join(k for k, _ in self._PLAYER_JS_VARIANT_MAP)),
+ only_once=True)
+
+ return player_url
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
@@ -1618,9 +1757,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res or '', 'player version', fatal=fatal,
- default=NO_DEFAULT if res else None)
- if player_version:
- return 'https://www.youtube.com/s/player/{0}/player_ias.vflset/en_US/base.js'.format(player_version)
+ default=NO_DEFAULT if res else None) or None
+ return player_version and 'https://www.youtube.com/s/player/{0}/player_ias.vflset/en_US/base.js'.format(player_version)
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
@@ -1634,36 +1772,89 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
raise ExtractorError(
'Cannot identify player %r' % (player_url,), cause=e)
- def _load_player(self, video_id, player_url, fatal=True, player_id=None):
- if not player_id:
+ def _player_js_cache_key(self, player_url, extra_id=None, _cache={}):
+ if player_url not in _cache:
player_id = self._extract_player_info(player_url)
- if player_id not in self._code_cache:
+ player_path = remove_start(
+ compat_urllib_parse.urlparse(player_url).path,
+ '/s/player/{0}/'.format(player_id))
+ variant = next((k for k, v in self._PLAYER_JS_VARIANT_MAP
+ if v == player_path), None)
+ if not variant:
+ variant = next(
+ (k for k, v in self._PLAYER_JS_VARIANT_MAP
+ if re.match(re.escape(v).replace('en_US', r'\w+') + '$', player_path)),
+ None)
+ if not variant:
+ self.write_debug(
+ 'Unable to determine player JS variant\n'
+ ' player = {0}'.format(player_url), only_once=True)
+ variant = re.sub(r'[^a-zA-Z0-9]', '_', remove_end(player_path, '.js'))
+ _cache[player_url] = join_nonempty(player_id, variant)
+
+ if extra_id:
+ extra_id = '-'.join((_cache[player_url], extra_id))
+ assert os.path.basename(extra_id) == extra_id
+ return extra_id
+ return _cache[player_url]
+
+ def _load_player(self, video_id, player_url, fatal=True):
+ player_js_key = self._player_js_cache_key(player_url)
+ if player_js_key not in self._code_cache:
code = self._download_webpage(
player_url, video_id, fatal=fatal,
- note='Downloading player ' + player_id,
- errnote='Download of %s failed' % player_url)
+ note='Downloading player {0}'.format(player_js_key),
+ errnote='Download of {0} failed'.format(player_url))
if code:
- self._code_cache[player_id] = code
- return self._code_cache[player_id] if fatal else self._code_cache.get(player_id)
+ self._code_cache[player_js_key] = code
+ return self._code_cache.get(player_js_key)
+
+ def _load_player_data_from_cache(self, name, player_url, extra_id=None):
+ cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id))
+ data = self._player_cache.get(cache_id)
+ if data:
+ return data
+
+ data = self.cache.load(*cache_id, min_ver='2025.04.07')
+ if data:
+ self._player_cache[cache_id] = data
+ return data
+
+ def _store_player_data_to_cache(self, name, player_url, data, extra_id=None):
+ cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id))
+
+ if cache_id not in self._player_cache:
+ self.cache.store(cache_id[0], cache_id[1], data)
+ self._player_cache[cache_id] = data
+
+ def _remove_player_data_from_cache(self, name, player_url, extra_id=None):
+ cache_id = ('youtube-{0}'.format(name), self._player_js_cache_key(player_url, extra_id))
+
+ if cache_id in self._player_cache:
+ self.cache.clear(*cache_id)
+ self._player_cache.pop(cache_id, None)
def _extract_signature_function(self, video_id, player_url, example_sig):
- player_id = self._extract_player_info(player_url)
+ # player_id = self._extract_player_info(player_url)
# Read from filesystem cache
- func_id = 'js_{0}_{1}'.format(
- player_id, self._signature_cache_id(example_sig))
- assert os.path.basename(func_id) == func_id
-
- self.write_debug('Extracting signature function {0}'.format(func_id))
- cache_spec, code = self.cache.load('youtube-sigfuncs', func_id), None
+ extra_id = self._signature_cache_id(example_sig)
+ self.write_debug('Extracting signature function {0}-{1}'.format(player_url, extra_id))
+ cache_spec, code = self._load_player_data_from_cache(
+ 'sigfuncs', player_url, extra_id=extra_id), None
if not cache_spec:
- code = self._load_player(video_id, player_url, player_id)
- if code:
- res = self._parse_sig_js(code)
- test_string = ''.join(map(compat_chr, range(len(example_sig))))
- cache_spec = [ord(c) for c in res(test_string)]
- self.cache.store('youtube-sigfuncs', func_id, cache_spec)
+ code = self._load_player(video_id, player_url)
+ if code:
+ res = self._parse_sig_js(code)
+ test_string = ''.join(map(compat_chr, range(len(example_sig))))
+ cache_spec = [ord(c) for c in res(test_string)]
+ self._store_player_data_to_cache(
+ 'sigfuncs', player_url, cache_spec, extra_id=extra_id)
+ else:
+ self.report_warning(
+ 'Failed to compute signature function {0}-{1}'.format(
+ player_url, extra_id))
return lambda s: ''.join(s[i] for i in cache_spec)
@@ -1816,6 +2007,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return ret
def _extract_n_function_name(self, jscode):
+ func_name, idx = None, None
+
+ def generic_n_function_search(func_name=None):
+ return self._search_regex(
+ r'''(?xs)
+ (?:(?<=[^\w$])|^) # instead of \b, which ignores $
+ (?P<name>%s)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
+ \s*\{(?:(?!};).)+?(?:
+ ["']enhanced_except_ |
+ return\s*(?P<q>"|')[a-zA-Z\d-]+_w8_(?P=q)\s*\+\s*[\w$]+
+ )
+ ''' % (func_name or r'(?!\d)[a-zA-Z\d_$]+',), jscode,
+ 'Initial JS player n function name', group='name',
+ default=None if func_name else NO_DEFAULT)
+
+ # these special cases are redundant and probably obsolete (2025-04):
+ # they make the tests run ~10% faster without fallback warnings
+ r"""
func_name, idx = self._search_regex(
# (y=NuD(),Mw(k),q=k.Z[y]||null)&&(q=narray[idx](q),k.set(y,q),k.V||NuD(''))}};
# (R="nn"[+J.Z],mW(J),N=J.K[R]||null)&&(N=narray[idx](N),J.set(R,N))}};
@@ -1842,45 +2051,59 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
\(\s*[\w$]+\s*\)
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'),
default=(None, None))
+ """
+
+ if not func_name:
+ # nfunc=function(x){...}|function nfunc(x); ...
+ # ... var y=[nfunc]|y[idx]=nfunc);
+ # obvious REs hang, so use a two-stage tactic
+ for m in re.finditer(r'''(?x)
+ [\n;]var\s(?:(?:(?!,).)+,|\s)*?(?!\d)[\w$]+(?:\[(?P<idx>\d+)\])?\s*=\s*
+ (?(idx)|\[\s*)(?P<nfunc>(?!\d)[\w$]+)(?(idx)|\s*\])
+ \s*?[;\n]
+ ''', jscode):
+ fn = self._search_regex(
+ r'[;,]\s*(function\s+)?({0})(?(1)|\s*=\s*function)\s*\((?!\d)[\w$]+\)\s*\{1}(?!\s*return\s)'.format(
+ re.escape(m.group('nfunc')), '{'),
+ jscode, 'Initial JS player n function name (2)', group=2, default=None)
+ if fn:
+ func_name = fn
+ idx = m.group('idx')
+ if generic_n_function_search(func_name):
+ # don't look any further
+ break
+
# thx bashonly: yt-dlp/yt-dlp/pull/10611
if not func_name:
- self.report_warning('Falling back to generic n function search')
- return self._search_regex(
- r'''(?xs)
- (?:(?<=[^\w$])|^) # instead of \b, which ignores $
- (?P<name>(?!\d)[a-zA-Z\d_$]+)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
- \s*\{(?:(?!};).)+?(?:
- ["']enhanced_except_ |
- return\s*(?P<q>"|')[a-zA-Z\d-]+_w8_(?P=q)\s*\+\s*[\w$]+
- )
- ''', jscode, 'Initial JS player n function name', group='name')
+ self.report_warning('Falling back to generic n function search', only_once=True)
+ return generic_n_function_search()
+
if not idx:
return func_name
return self._search_json(
- r'var\s+{0}\s*='.format(re.escape(func_name)), jscode,
+ r'(?<![\w-])var\s(?:(?:(?!,).)+,|\s)*?{0}\s*='.format(re.escape(func_name)), jscode,
'Initial JS player n function list ({0}.{1})'.format(func_name, idx),
- func_name, contains_pattern=r'\[[\s\S]+\]', end_pattern='[,;]',
+ func_name, contains_pattern=r'\[.+\]', end_pattern='[,;]',
transform_source=js_to_json)[int(idx)]
def _extract_n_function_code(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
- func_code = self.cache.load('youtube-nsig', player_id)
+ func_code = self._load_player_data_from_cache('nsig', player_url)
jscode = func_code or self._load_player(video_id, player_url)
jsi = JSInterpreter(jscode)
if func_code:
return jsi, player_id, func_code
- return self._extract_n_function_code_jsi(video_id, jsi, player_id)
- def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
+ return self._extract_n_function_code_jsi(video_id, jsi, player_id, player_url)
+ def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None, player_url=None):
func_name = self._extract_n_function_name(jsi.code)
func_code = self._extract_sig_fn(jsi, func_name)
-
- if player_id:
- self.cache.store('youtube-nsig', player_id, func_code)
+ if player_url:
+ self._store_player_data_to_cache('nsig', player_url, func_code)
return jsi, player_id, func_code
def _extract_n_function_from_code(self, jsi, func_code):
@@ -1906,14 +2129,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return self._cached(self._decrypt_nsig, 'nsig', n, player_url)
for fmt in formats:
- parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
- n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
+ n_param = parse_qs(fmt['url']).get('n')
if not n_param:
continue
n_param = n_param[-1]
n_response = decrypt_nsig(n_param)(n_param, video_id, player_url)
if n_response is None:
- # give up if descrambling failed
+ # give up and forget cached data if descrambling failed
+ self._remove_player_data_from_cache('nsig', player_url)
break
fmt['url'] = update_url_query(fmt['url'], {'n': n_response})
@@ -1921,50 +2144,69 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
Extract signatureTimestamp (sts)
+
Required to tell API what sig/player version is in use.
"""
- sts = traverse_obj(ytcfg, 'STS', expected_type=int)
- if not sts:
- # Attempt to extract from player
- if player_url is None:
- error_msg = 'Cannot extract signature timestamp without player_url.'
- if fatal:
- raise ExtractorError(error_msg)
- self.report_warning(error_msg)
- return
- code = self._load_player(video_id, player_url, fatal=fatal)
- sts = int_or_none(self._search_regex(
- r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
- 'JS player signature timestamp', group='sts', fatal=fatal))
+ sts = traverse_obj(
+ (self._get_player_js_version(), ytcfg),
+ (0, 0),
+ (1, 'STS'),
+ expected_type=int_or_none)
+
+ if sts:
+ return sts
+
+ if not player_url:
+ error_msg = 'Cannot extract signature timestamp without player url'
+ if fatal:
+ raise ExtractorError(error_msg)
+ self.report_warning(error_msg)
+ return None
+
+ sts = self._load_player_data_from_cache('sts', player_url)
+ if sts:
+ return sts
+
+ # Attempt to extract from player
+ code = self._load_player(video_id, player_url, fatal=fatal)
+ sts = int_or_none(self._search_regex(
+ r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
+ 'JS player signature timestamp', group='sts', fatal=fatal))
+ if sts:
+ self._store_player_data_to_cache('sts', player_url, sts)
+
return sts
def _mark_watched(self, video_id, player_response):
- playback_url = url_or_none(try_get(
- player_response,
- lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
- if not playback_url:
- return
-
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
- # more consistent results setting it to right before the end
- qs = parse_qs(playback_url)
- video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
+ for is_full, key in enumerate(('videostatsPlaybackUrl', 'videostatsWatchtimeUrl')):
+ label = 'fully ' if is_full > 0 else ''
- playback_url = update_url_query(
- playback_url, {
- 'ver': '2',
- 'cpn': cpn,
- 'cmt': video_length,
- 'el': 'detailpage', # otherwise defaults to "shorts"
- })
+ playback_url = traverse_obj(player_response, (
+ 'playbackTracking'. key, 'baseUrl', T(url_or_none)))
+ if not playback_url:
+ self.report_warning('Unable to mark {0}watched'.format(label))
+ continue
+
+ # more consistent results setting it to right before the end
+ qs = parse_qs(playback_url)
+ video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
+
+ playback_url = update_url_query(
+ playback_url, {
+ 'ver': '2',
+ 'cpn': cpn,
+ 'cmt': video_length,
+ 'el': 'detailpage', # otherwise defaults to "shorts"
+ })
- self._download_webpage(
- playback_url, video_id, 'Marking watched',
- 'Unable to mark watched', fatal=False)
+ self._download_webpage(
+ playback_url, video_id, 'Marking {0}watched'.format(label),
+ 'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
@@ -2010,7 +2252,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
raise ExtractorError('Invalid URL: %s' % url)
return mobj.group(2)
- def _extract_chapters_from_json(self, data, video_id, duration):
+ @staticmethod
+ def _extract_chapters_from_json(data, video_id, duration):
chapters_list = try_get(
data,
lambda x: x['playerOverlays']
@@ -2055,13 +2298,60 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
+ def _get_preroll_length(self, ad_slot_lists):
+
+ def parse_instream_ad_renderer(instream_renderer):
+ for skippable, path in (
+ ('', ('skipOffsetMilliseconds', T(int))),
+ ('non-', ('playerVars', T(compat_parse_qs),
+ 'length_seconds', -1, T(int_or_none(invscale=1000))))):
+ length_ms = traverse_obj(instream_renderer, path)
+ if length_ms is not None:
+ self.write_debug('Detected a %ds %sskippable ad' % (
+ length_ms // 1000, skippable))
+ break
+ return length_ms
+
+ for slot_renderer in traverse_obj(ad_slot_lists, ('adSlots', Ellipsis, 'adSlotRenderer', T(dict))):
+ if traverse_obj(slot_renderer, ('adSlotMetadata', 'triggerEvent')) != 'SLOT_TRIGGER_EVENT_BEFORE_CONTENT':
+ continue
+ rendering_content = traverse_obj(slot_renderer, (
+ 'fulfillmentContent', 'fulfilledLayout', 'playerBytesAdLayoutRenderer',
+ 'renderingContent', 'instreamVideoAdRenderer', T(dict)))
+ length_ms = parse_instream_ad_renderer(rendering_content)
+ if length_ms is not None:
+ return length_ms
+ times = traverse_obj(rendering_content, ((
+ ('playerBytesSequentialLayoutRenderer', 'sequentialLayouts'),
+ None), any, Ellipsis, 'playerBytesAdLayoutRenderer',
+ 'renderingContent', 'instreamVideoAdRenderer',
+ T(parse_instream_ad_renderer)))
+ if times:
+ return sum(times)
+ return 0
+
+ def _is_premium_subscriber(self, initial_data):
+ if not self.is_authenticated or not initial_data:
+ return False
+
+ tlr = traverse_obj(
+ initial_data, ('topbar', 'desktopTopbarRenderer', 'logo', 'topbarLogoRenderer'))
+ return (
+ traverse_obj(tlr, ('iconImage', 'iconType')) == 'YOUTUBE_PREMIUM_LOGO'
+ or 'premium' in (self._get_text(tlr, 'tooltipText') or '').lower()
+ )
+
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
+ ua = traverse_obj(self._INNERTUBE_CLIENTS, (
+ 'web', 'INNERTUBE_CONTEXT', 'client', 'userAgent'))
+ headers = {'User-Agent': ua} if ua else None
webpage = self._download_webpage(
- webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
+ webpage_url + '&bpctr=9999999999&has_verified=1', video_id,
+ headers=headers, fatal=False)
player_response = None
player_url = None
@@ -2071,37 +2361,43 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_id, 'initial player response')
is_live = traverse_obj(player_response, ('videoDetails', 'isLive'))
+ fetched_timestamp = None
if False and not player_response:
player_response = self._call_api(
'player', {'videoId': video_id}, video_id)
if True or not player_response:
origin = 'https://www.youtube.com'
pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
-
player_url = self._extract_player_url(webpage)
ytcfg = self._extract_ytcfg(video_id, webpage or '')
sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
if sts:
pb_context['signatureTimestamp'] = sts
- client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
- T(dict.items), lambda _, k_v: not k_v[1].get('REQUIRE_PO_TOKEN'),
- 0))[:1]
+ auth = self._generate_sapisidhash_header(origin)
+
+ client_names = []
+ if auth or self._is_premium_subscriber(player_response):
+ client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
+ T(dict_items), lambda _, k_v: k_v[0] == 'web_safari', 0))[:1]
+ if not client_names:
+ client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
+ T(dict_items), lambda _, k_v: not (
+ k_v[1].get('REQUIRE_PO_TOKEN')
+ or (bool(k_v[1].get('WITH_COOKIES', auth)) ^ bool(auth))
+ ), 0))[:1]
if 'web' not in client_names:
- # webpage links won't download: ignore links and playability
+ # only live HLS webpage links will download: ignore playability
player_response = filter_dict(
player_response or {},
- lambda k, _: k not in ('streamingData', 'playabilityStatus'))
-
- if is_live and 'ios' not in client_names:
- client_names.append('ios')
+ lambda k, _: k != 'playabilityStatus')
headers = {
'Sec-Fetch-Mode': 'navigate',
'Origin': origin,
'X-Goog-Visitor-Id': self._extract_visitor_data(ytcfg) or '',
}
- auth = self._generate_sapisidhash_header(origin)
+
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
@@ -2131,7 +2427,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'INNERTUBE_CONTEXT', 'client', 'clientVersion'),
'User-Agent': (
'INNERTUBE_CONTEXT', 'client', 'userAgent'),
- }))
+ }) or {})
api_player_response = self._call_api(
'player', query, video_id, fatal=False, headers=api_headers,
@@ -2140,27 +2436,32 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'context', 'client', 'clientName')),
'API JSON', delim=' '))
- hls = traverse_obj(
+ # be sure to find HLS in case of is_live
+ hls = traverse_obj(player_response, (
+ 'streamingData', 'hlsManifestUrl', T(url_or_none)))
+ fetched_timestamp = int(time.time())
+ preroll_length_ms = (
+ self._get_preroll_length(api_player_response)
+ or self._get_preroll_length(player_response))
+ video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response),
- (Ellipsis, 'streamingData', 'hlsManifestUrl', T(url_or_none)))
- if len(hls) == 2 and not hls[0] and hls[1]:
- player_response['streamingData']['hlsManifestUrl'] = hls[1]
- else:
- video_details = merge_dicts(*traverse_obj(
- (player_response, api_player_response),
- (Ellipsis, 'videoDetails', T(dict))))
- player_response.update(filter_dict(
- api_player_response or {}, cndn=lambda k, _: k != 'captions'))
- player_response['videoDetails'] = video_details
+ (Ellipsis, 'videoDetails', T(dict))))
+ player_response.update(filter_dict(
+ api_player_response or {}, cndn=lambda k, _: k != 'captions'))
+ player_response['videoDetails'] = video_details
+ if hls and not traverse_obj(player_response, (
+ 'streamingData', 'hlsManifestUrl', T(url_or_none))):
+ player_response['streamingData']['hlsManifestUrl'] = hls
def is_agegated(playability):
- if not isinstance(playability, dict):
- return
+ # playability: dict
+ if not playability:
+ return False
if playability.get('desktopLegacyAgeGateReason'):
return True
- reasons = filter(None, (playability.get(r) for r in ('status', 'reason')))
+ reasons = traverse_obj(playability, (('status', 'reason'),))
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
@@ -2217,16 +2518,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
- def get_text(x):
- if not x:
- return
- text = x.get('simpleText')
- if text and isinstance(text, compat_str):
- return text
- runs = x.get('runs')
- if not isinstance(runs, list):
- return
- return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
+ get_text = lambda x: self._get_text(x) or ''
search_meta = (
(lambda x: self._html_search_meta(x, webpage, default=None))
@@ -2304,11 +2596,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return LazyList({
'url': update_url_query(f['url'], {
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])),
- })
+ }),
} for range_start in range(0, f['filesize'], CHUNK_SIZE))
lower = lambda s: s.lower()
+ if is_live:
+ fetched_timestamp = None
+ elif fetched_timestamp is not None:
+ # Handle preroll waiting period
+ preroll_sleep = self.get_param('youtube_preroll_sleep')
+ preroll_sleep = min(6, int_or_none(preroll_sleep, default=preroll_length_ms / 1000))
+ fetched_timestamp += preroll_sleep
+
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
continue
@@ -2352,6 +2652,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self.write_debug(error_to_compat_str(e), only_once=True)
continue
+ if parse_qs(fmt_url).get('n'):
+ # this and (we assume) all the formats here are n-scrambled
+ break
+
language_preference = (
10 if audio_track.get('audioIsDefault')
else -10 if 'descriptive' in (traverse_obj(audio_track, ('displayName', T(lower))) or '')
@@ -2405,6 +2709,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful?
})
+ if fetched_timestamp:
+ dct['available_at'] = fetched_timestamp
+
formats.append(dct)
def process_manifest_format(f, proto, client_name, itag, all_formats=False):
@@ -2422,6 +2729,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if f.get('source_preference') is None:
f['source_preference'] = -1
+ # Deprioritize since its pre-merged m3u8 formats may have lower quality audio streams
+ if client_name == 'web_safari' and proto == 'hls' and not is_live:
+ f['source_preference'] -= 1
+
if itag in ('616', '235'):
f['format_note'] = join_nonempty(f.get('format_note'), 'Premium', delim=' ')
f['source_preference'] += 100
@@ -2438,15 +2749,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
hls_manifest_url = streaming_data.get('hlsManifestUrl')
if hls_manifest_url:
- for f in self._extract_m3u8_formats(
+ formats.extend(
+ f for f in self._extract_m3u8_formats(
hls_manifest_url, video_id, 'mp4',
- entry_protocol='m3u8_native', live=is_live, fatal=False):
+ entry_protocol='m3u8_native', live=is_live, fatal=False)
if process_manifest_format(
- f, 'hls', None, self._search_regex(
- r'/itag/(\d+)', f['url'], 'itag', default=None)):
- formats.append(f)
+ f, 'hls', None, self._search_regex(
+ r'/itag/(\d+)', f['url'], 'itag', default=None)))
- if self._downloader.params.get('youtube_include_dash_manifest', True):
+ if self.get_param('youtube_include_dash_manifest', True):
dash_manifest_url = streaming_data.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(
@@ -2473,11 +2784,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
playability_status,
lambda x: x['errorScreen']['playerErrorMessageRenderer'],
dict) or {}
- reason = get_text(pemr.get('reason')) or playability_status.get('reason')
+ reason = get_text(pemr.get('reason')) or playability_status.get('reason') or ''
subreason = pemr.get('subreason')
if subreason:
subreason = clean_html(get_text(subreason))
- if subreason == 'The uploader has not made this video available in your country.':
+ if subreason.startswith('The uploader has not made this video available in your country'):
countries = microformat.get('availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
@@ -2485,7 +2796,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self.raise_geo_restricted(
subreason, countries)
reason += '\n' + subreason
+
if reason:
+ if 'sign in' in reason.lower():
+ self.raise_login_required(remove_end(reason, 'This helps protect our community. Learn more'))
+ elif traverse_obj(playability_status, ('errorScreen', 'playerCaptchaViewModel', T(dict))):
+ reason += '. YouTube is requiring a captcha challenge before playback'
raise ExtractorError(reason, expected=True)
self._sort_formats(formats)
@@ -2588,6 +2904,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
+ # xosf=1 causes undesirable text position data for vtt, json3 & srv* subtitles
+ # See: https://github.com/yt-dlp/yt-dlp/issues/13654
+ 'xosf': [],
})
lang_subs.append({
'ext': fmt,
@@ -2629,7 +2948,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
d_k += '_time'
if d_k not in info and k in s_ks:
- info[d_k] = parse_duration(query[k][0])
+ info[d_k] = parse_duration(v[0])
if video_description:
# Youtube Music Auto-generated description
@@ -2658,28 +2977,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
initial_data = self._call_api(
'next', {'videoId': video_id}, video_id, fatal=False)
+ initial_sdcr = None
if initial_data:
chapters = self._extract_chapters_from_json(
initial_data, video_id, duration)
if not chapters:
- for engagment_pannel in (initial_data.get('engagementPanels') or []):
- contents = try_get(
- engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
- list)
- if not contents:
- continue
+ def chapter_time(mmlir):
+ return parse_duration(
+ get_text(mmlir.get('timeDescription')))
- def chapter_time(mmlir):
- return parse_duration(
- get_text(mmlir.get('timeDescription')))
+ for markers in traverse_obj(initial_data, (
+ 'engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
+ 'content', 'macroMarkersListRenderer', 'contents', T(list))):
chapters = []
- for next_num, content in enumerate(contents, start=1):
+ for next_num, content in enumerate(markers, start=1):
mmlir = content.get('macroMarkersListItemRenderer') or {}
start_time = chapter_time(mmlir)
- end_time = chapter_time(try_get(
- contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \
- if next_num < len(contents) else duration
+ end_time = (traverse_obj(markers, (
+ next_num, 'macroMarkersListItemRenderer', T(chapter_time)))
+ if next_num < len(markers) else duration)
if start_time is None or end_time is None:
continue
chapters.append({
@@ -2785,12 +3102,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
info['track'] = mrr_contents_text
# this is not extraction but spelunking!
- carousel_lockups = traverse_obj(
- initial_data,
- ('engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
- 'content', 'structuredDescriptionContentRenderer', 'items', Ellipsis,
- 'videoDescriptionMusicSectionRenderer', 'carouselLockups', Ellipsis),
- expected_type=dict) or []
+ initial_sdcr = traverse_obj(initial_data, (
+ 'engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
+ 'content', 'structuredDescriptionContentRenderer', T(dict)),
+ get_all=False)
+ carousel_lockups = traverse_obj(initial_sdcr, (
+ 'items', Ellipsis, 'videoDescriptionMusicSectionRenderer',
+ 'carouselLockups', Ellipsis, T(dict))) or []
# try to reproduce logic from metadataRowContainerRenderer above (if it still is)
fields = (('ALBUM', 'album'), ('ARTIST', 'artist'), ('SONG', 'track'), ('LICENSES', 'license'))
# multiple_songs ?
@@ -2815,6 +3133,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self.mark_watched(video_id, player_response)
+ # Fallbacks for missing metadata
+ if initial_sdcr:
+ if info.get('description') is None:
+ info['description'] = traverse_obj(initial_sdcr, (
+ 'items', Ellipsis, 'expandableVideoDescriptionBodyRenderer',
+ 'attributedDescriptionBodyText', 'content', T(compat_str)),
+ get_all=False)
+ # videoDescriptionHeaderRenderer also has publishDate/channel/handle/ucid, but not needed
+ if info.get('title') is None:
+ info['title'] = traverse_obj(
+ (initial_sdcr, initial_data),
+ (0, 'items', Ellipsis, 'videoDescriptionHeaderRenderer', T(dict)),
+ (1, 'playerOverlays', 'playerOverlayRenderer', 'videoDetails',
+ 'playerOverlayVideoDetailsRenderer', T(dict)),
+ expected_type=lambda x: self._get_text(x, 'title'),
+ get_all=False)
+
return merge_dicts(
info, {
'uploader_id': self._extract_uploader_id(owner_profile_url),
@@ -3216,19 +3551,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
@staticmethod
def _extract_grid_item_renderer(item):
- assert isinstance(item, dict)
- for key, renderer in item.items():
- if not key.startswith('grid') or not key.endswith('Renderer'):
- continue
- if not isinstance(renderer, dict):
- continue
- return renderer
-
- @staticmethod
- def _get_text(r, k):
- return traverse_obj(
- r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
- expected_type=txt_or_none)
+ return traverse_obj(item, (
+ T(dict.items), lambda _, k_v: k_v[0].startswith('grid') and k_v[0].endswith('Renderer'),
+ 1, T(dict)), get_all=False)
def _grid_entries(self, grid_renderer):
for item in traverse_obj(grid_renderer, ('items', Ellipsis, T(dict))):
@@ -3305,8 +3630,8 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
yield self.url_result(shelf_url, video_title=title)
# Shelf may not contain shelf URL, fallback to extraction from content
- for entry in self._shelf_entries_from_content(shelf_renderer):
- yield entry
+ for from_ in self._shelf_entries_from_content(shelf_renderer):
+ yield from_
def _playlist_entries(self, video_list_renderer):
for content in video_list_renderer['contents']:
@@ -3325,24 +3650,56 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
if not content_id:
return
content_type = view_model.get('contentType')
- if content_type not in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'):
+ if content_type == 'LOCKUP_CONTENT_TYPE_VIDEO':
+ ie = YoutubeIE
+ url = update_url_query(
+ 'https://www.youtube.com/watch', {'v': content_id})
+ thumb_keys = (None,)
+ elif content_type in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'):
+ ie = YoutubeTabIE
+ url = update_url_query(
+ 'https://www.youtube.com/playlist', {'list': content_id})
+ thumb_keys = ('collectionThumbnailViewModel', 'primaryThumbnail')
+ else:
self.report_warning(
- 'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()), only_once=True)
+ 'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()),
+ only_once=True)
return
+
+ thumb_keys = ('contentImage',) + thumb_keys + ('thumbnailViewModel', 'image')
+
return merge_dicts(self.url_result(
- update_url_query('https://www.youtube.com/playlist', {'list': content_id}),
- ie=YoutubeTabIE.ie_key(), video_id=content_id), {
+ url, ie=ie.ie_key(), video_id=content_id),
+ traverse_obj(view_model, {
+ 'title': ('metadata', 'lockupMetadataViewModel', 'title',
+ 'content', T(compat_str)),
+ 'thumbnails': T(lambda vm: self._extract_thumbnails(
+ vm, thumb_keys, final_key='sources')),
+ 'duration': (
+ 'contentImage', 'thumbnailViewModel', 'overlays',
+ Ellipsis, (
+ ('thumbnailBottomOverlayViewModel', 'badges'),
+ ('thumbnailOverlayBadgeViewModel', 'thumbnailBadges')
+ ), Ellipsis, 'thumbnailBadgeViewModel', 'text',
+ T(parse_duration), any),
+ })
+ )
+
+ def _extract_shorts_lockup_view_model(self, view_model):
+ content_id = traverse_obj(view_model, (
+ 'onTap', 'innertubeCommand', 'reelWatchEndpoint', 'videoId',
+ T(lambda v: v if YoutubeIE.suitable(v) else None)))
+ return merge_dicts(self.url_result(
+ content_id, ie=YoutubeIE.ie_key(), video_id=content_id), {
'title': traverse_obj(view_model, (
- 'metadata', 'lockupMetadataViewModel', 'title', 'content', T(compat_str))),
- 'thumbnails': self._extract_thumbnails(view_model, (
- 'contentImage', 'collectionThumbnailViewModel', 'primaryThumbnail',
- 'thumbnailViewModel', 'image'), final_key='sources'),
- })
+ 'overlayMetadata', 'primaryText', 'content', T(compat_str))),
+ 'thumbnails': self._extract_thumbnails(
+ view_model, 'thumbnail', final_key='sources'),
+ }) if content_id else None
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
- if video_id:
- return self._extract_video(video_renderer)
+ return self._extract_video(video_renderer) if video_id else None
def _post_thread_entries(self, post_thread_renderer):
post_renderer = try_get(
@@ -3374,21 +3731,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
- contents = post_thread_continuation.get('contents')
- if not isinstance(contents, list):
- return
- for content in contents:
- renderer = content.get('backstagePostThreadRenderer')
- if not isinstance(renderer, dict):
- continue
- for entry in self._post_thread_entries(renderer):
- yield entry
+ for renderer in traverse_obj(post_thread_continuation, (
+ 'contents', Ellipsis, 'backstagePostThreadRenderer', T(dict))):
+ for from_ in self._post_thread_entries(renderer):
+ yield from_
def _rich_grid_entries(self, contents):
- for content in contents:
- content = traverse_obj(
- content, ('richItemRenderer', 'content'),
- expected_type=dict) or {}
+ for content in traverse_obj(
+ contents, (Ellipsis, 'richItemRenderer', 'content'),
+ expected_type=dict):
video_renderer = traverse_obj(
content, 'videoRenderer', 'reelItemRenderer',
expected_type=dict)
@@ -3396,6 +3747,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
entry = self._video_entry(video_renderer)
if entry:
yield entry
+ # shorts item
+ shorts_lockup_view_model = content.get('shortsLockupViewModel')
+ if shorts_lockup_view_model:
+ entry = self._extract_shorts_lockup_view_model(shorts_lockup_view_model)
+ if entry:
+ yield entry
# playlist
renderer = traverse_obj(
content, 'playlistRenderer', expected_type=dict) or {}
@@ -3434,23 +3791,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
next_continuation = cls._extract_next_continuation_data(renderer)
if next_continuation:
return next_continuation
- contents = []
- for key in ('contents', 'items'):
- contents.extend(try_get(renderer, lambda x: x[key], list) or [])
- for content in contents:
- if not isinstance(content, dict):
- continue
- continuation_ep = try_get(
- content, lambda x: x['continuationItemRenderer']['continuationEndpoint'],
- dict)
- if not continuation_ep:
- continue
- continuation = try_get(
- continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
+ for command in traverse_obj(renderer, (
+ ('contents', 'items', 'rows'), Ellipsis, 'continuationItemRenderer',
+ ('continuationEndpoint', ('button', 'buttonRenderer', 'command')),
+ (('commandExecutorCommand', 'commands', Ellipsis), None), T(dict))):
+ continuation = traverse_obj(command, ('continuationCommand', 'token', T(compat_str)))
if not continuation:
continue
- ctp = continuation_ep.get('clickTrackingParams')
- return YoutubeTabIE._build_continuation_query(continuation, ctp)
+ ctp = command.get('clickTrackingParams')
+ return cls._build_continuation_query(continuation, ctp)
def _entries(self, tab, item_id, webpage):
tab_content = try_get(tab, lambda x: x['content'], dict)
@@ -3460,17 +3809,10 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
if slr_renderer:
is_channels_tab = tab.get('title') == 'Channels'
continuation = None
- slr_contents = try_get(slr_renderer, lambda x: x['contents'], list) or []
- for slr_content in slr_contents:
- if not isinstance(slr_content, dict):
- continue
- is_renderer = try_get(slr_content, lambda x: x['itemSectionRenderer'], dict)
- if not is_renderer:
- continue
- isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
- for isr_content in isr_contents:
- if not isinstance(isr_content, dict):
- continue
+ for is_renderer in traverse_obj(slr_renderer, (
+ 'contents', Ellipsis, 'itemSectionRenderer', T(dict))):
+ for isr_content in traverse_obj(slr_renderer, (
+ 'contents', Ellipsis, T(dict))):
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:
for entry in self._playlist_entries(renderer):
@@ -3499,6 +3841,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
entry = self._video_entry(renderer)
if entry:
yield entry
+ renderer = isr_content.get('richGridRenderer')
+ if renderer:
+ for from_ in self._rich_grid_entries(
+ traverse_obj(renderer, ('contents', Ellipsis, T(dict)))):
+ yield from_
+ continuation = self._extract_continuation(renderer)
+ continue
if not continuation:
continuation = self._extract_continuation(is_renderer)
@@ -3508,8 +3857,9 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
rich_grid_renderer = tab_content.get('richGridRenderer')
if not rich_grid_renderer:
return
- for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
- yield entry
+ for from_ in self._rich_grid_entries(
+ traverse_obj(rich_grid_renderer, ('contents', Ellipsis, T(dict)))):
+ yield from_
continuation = self._extract_continuation(rich_grid_renderer)
@@ -3555,8 +3905,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
# Downloading page may result in intermittent 5xx HTTP error
# that is usually worked around with a retry
response = self._download_json(
- 'https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
+ 'https://www.youtube.com/youtubei/v1/browse',
None, 'Downloading page %d%s' % (page_num, ' (retry #%d)' % count if count else ''),
+ query={
+ # 'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
+ 'prettyPrint': 'false',
+ },
headers=headers, data=json.dumps(data).encode('utf8'))
break
except ExtractorError as e:
@@ -3653,18 +4007,34 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
uploader['channel'] = uploader['uploader']
return uploader
- @classmethod
- def _extract_alert(cls, data):
- alerts = []
- for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
- alert_text = traverse_obj(
- alert, (None, lambda x: x['alertRenderer']['text']), get_all=False)
- if not alert_text:
- continue
- text = cls._get_text(alert_text, 'text')
- if text:
- alerts.append(text)
- return '\n'.join(alerts)
+ def _extract_and_report_alerts(self, data, expected=True, fatal=True, only_once=False):
+
+ def alerts():
+ for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
+ alert_dict = traverse_obj(
+ alert, 'alertRenderer', None, expected_type=dict, get_all=False)
+ alert_type = traverse_obj(alert_dict, 'type')
+ if not alert_type:
+ continue
+ message = self._get_text(alert_dict, 'text')
+ if message:
+ yield alert_type, message
+
+ errors, warnings = [], []
+ _IGNORED_WARNINGS = T('Unavailable videos will be hidden during playback')
+ for alert_type, alert_message in alerts():
+ if alert_type.lower() == 'error' and fatal:
+ errors.append([alert_type, alert_message])
+ elif alert_message not in _IGNORED_WARNINGS:
+ warnings.append([alert_type, alert_message])
+
+ for alert_type, alert_message in itertools.chain(warnings, errors[:-1]):
+ self.report_warning(
+ 'YouTube said: %s - %s' % (alert_type, alert_message),
+ only_once=only_once)
+ if errors:
+ raise ExtractorError(
+ 'YouTube said: %s' % (errors[-1][1],), expected=expected)
def _extract_from_tabs(self, item_id, webpage, data, tabs):
selected_tab = self._extract_selected_tab(tabs)
@@ -3764,10 +4134,10 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
compat_str) or video_id
if video_id:
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+
# Capture and output alerts
- alert = self._extract_alert(data)
- if alert:
- raise ExtractorError(alert, expected=True)
+ self._extract_and_report_alerts(data)
+
# Failed to recognize
raise ExtractorError('Unable to recognize tab page')
@@ -3921,7 +4291,7 @@ class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
'only_matching': True,
}]
- def _real_extract(self, url):
+ def _real_extract(self, _):
return self.url_result(
'https://www.youtube.com/playlist?list=LL',
ie=YoutubeTabIE.ie_key())
@@ -3993,6 +4363,7 @@ class YoutubeFeedsInfoExtractor(YoutubeTabIE):
Subclasses must define the _FEED_NAME property.
"""
+
_LOGIN_REQUIRED = True
@property
@@ -4002,7 +4373,7 @@ class YoutubeFeedsInfoExtractor(YoutubeTabIE):
def _real_initialize(self):
self._login()
- def _real_extract(self, url):
+ def _real_extract(self, _):
return self.url_result(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
ie=YoutubeTabIE.ie_key())
@@ -4017,7 +4388,7 @@ class YoutubeWatchLaterIE(InfoExtractor):
'only_matching': True,
}]
- def _real_extract(self, url):
+ def _real_extract(self, _):
return self.url_result(
'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
@@ -4097,7 +4468,7 @@ class YoutubeTruncatedURLIE(InfoExtractor):
'only_matching': True,
}]
- def _real_extract(self, url):
+ def _real_extract(self, _):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py
index f0c4fa928..7630e2099 100644
--- a/youtube_dl/jsinterp.py
+++ b/youtube_dl/jsinterp.py
@@ -240,7 +240,7 @@ def _js_ternary(cndn, if_true=True, if_false=False):
def _js_unary_op(op):
@wraps_op(op)
- def wrapped(_, a):
+ def wrapped(a, _):
return op(a)
return wrapped
@@ -283,17 +283,6 @@ _OPERATORS = (
('**', _js_exp),
)
-_COMP_OPERATORS = (
- ('===', _js_id_op(operator.is_)),
- ('!==', _js_id_op(operator.is_not)),
- ('==', _js_eq),
- ('!=', _js_neq),
- ('<=', _js_comp_op(operator.le)),
- ('>=', _js_comp_op(operator.ge)),
- ('<', _js_comp_op(operator.lt)),
- ('>', _js_comp_op(operator.gt)),
-)
-
_LOG_OPERATORS = (
('|', _js_bit_op(operator.or_)),
('^', _js_bit_op(operator.xor)),
@@ -310,13 +299,27 @@ _SC_OPERATORS = (
_UNARY_OPERATORS_X = (
('void', _js_unary_op(lambda _: JS_Undefined)),
('typeof', _js_unary_op(_js_typeof)),
+ # avoid functools.partial here since Py2 update_wrapper(partial) -> no __module__
+ ('!', _js_unary_op(lambda x: _js_ternary(x, if_true=False, if_false=True))),
)
-_OPERATOR_RE = '|'.join(map(lambda x: re.escape(x[0]), _OPERATORS + _LOG_OPERATORS))
+_COMP_OPERATORS = (
+ ('===', _js_id_op(operator.is_)),
+ ('!==', _js_id_op(operator.is_not)),
+ ('==', _js_eq),
+ ('!=', _js_neq),
+ ('<=', _js_comp_op(operator.le)),
+ ('>=', _js_comp_op(operator.ge)),
+ ('<', _js_comp_op(operator.lt)),
+ ('>', _js_comp_op(operator.gt)),
+)
+
+_OPERATOR_RE = '|'.join(map(lambda x: re.escape(x[0]), _OPERATORS + _LOG_OPERATORS + _SC_OPERATORS))
_NAME_RE = r'[a-zA-Z_$][\w$]*'
_MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]')))
_QUOTES = '\'"/'
+_NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?'
class JS_Break(ExtractorError):
@@ -353,7 +356,7 @@ class LocalNameSpace(ChainMap):
raise NotImplementedError('Deleting is not supported')
def __repr__(self):
- return 'LocalNameSpace%s' % (self.maps, )
+ return 'LocalNameSpace({0!r})'.format(self.maps)
class Debugger(object):
@@ -374,6 +377,9 @@ class Debugger(object):
@classmethod
def wrap_interpreter(cls, f):
+ if not cls.ENABLED:
+ return f
+
@wraps(f)
def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs):
if cls.ENABLED and stmt.strip():
@@ -414,7 +420,17 @@ class JSInterpreter(object):
msg = '{0} in: {1!r:.100}'.format(msg.rstrip(), expr)
super(JSInterpreter.Exception, self).__init__(msg, *args, **kwargs)
- class JS_RegExp(object):
+ class JS_Object(object):
+ def __getitem__(self, key):
+ if hasattr(self, key):
+ return getattr(self, key)
+ raise KeyError(key)
+
+ def dump(self):
+ """Serialise the instance"""
+ raise NotImplementedError
+
+ class JS_RegExp(JS_Object):
RE_FLAGS = {
# special knowledge: Python's re flags are bitmask values, current max 128
# invent new bitmask values well above that for literal parsing
@@ -435,16 +451,24 @@ class JSInterpreter(object):
def __init__(self, pattern_txt, flags=0):
if isinstance(flags, compat_str):
flags, _ = self.regex_flags(flags)
- # First, avoid https://github.com/python/cpython/issues/74534
self.__self = None
pattern_txt = str_or_none(pattern_txt) or '(?:)'
- self.__pattern_txt = pattern_txt.replace('[[', r'[\[')
+ # escape unintended embedded flags
+ pattern_txt = re.sub(
+ r'(\(\?)([aiLmsux]*)(-[imsx]+:|(?<!\?)\))',
+ lambda m: ''.join(
+ (re.escape(m.group(1)), m.group(2), re.escape(m.group(3)))
+ if m.group(3) == ')'
+ else ('(?:', m.group(2), m.group(3))),
+ pattern_txt)
+ # Avoid https://github.com/python/cpython/issues/74534
+ self.source = pattern_txt.replace('[[', r'[\[')
self.__flags = flags
def __instantiate(self):
if self.__self:
return
- self.__self = re.compile(self.__pattern_txt, self.__flags)
+ self.__self = re.compile(self.source, self.__flags)
# Thx: https://stackoverflow.com/questions/44773522/setattr-on-python2-sre-sre-pattern
for name in dir(self.__self):
# Only these? Obviously __class__, __init__.
@@ -452,16 +476,15 @@ class JSInterpreter(object):
# that can't be setattr'd but also can't need to be copied.
if name in ('__class__', '__init__', '__weakref__'):
continue
- setattr(self, name, getattr(self.__self, name))
+ if name == 'flags':
+ setattr(self, name, getattr(self.__self, name, self.__flags))
+ else:
+ setattr(self, name, getattr(self.__self, name))
def __getattr__(self, name):
self.__instantiate()
- # make Py 2.6 conform to its lying documentation
- if name == 'flags':
- self.flags = self.__flags
- return self.flags
- elif name == 'pattern':
- self.pattern = self.__pattern_txt
+ if name == 'pattern':
+ self.pattern = self.source
return self.pattern
elif hasattr(self.__self, name):
v = getattr(self.__self, name)
@@ -469,6 +492,26 @@ class JSInterpreter(object):
return v
elif name in ('groupindex', 'groups'):
return 0 if name == 'groupindex' else {}
+ else:
+ flag_attrs = ( # order by 2nd elt
+ ('hasIndices', 'd'),
+ ('global', 'g'),
+ ('ignoreCase', 'i'),
+ ('multiline', 'm'),
+ ('dotAll', 's'),
+ ('unicode', 'u'),
+ ('unicodeSets', 'v'),
+ ('sticky', 'y'),
+ )
+ for k, c in flag_attrs:
+ if name == k:
+ return bool(self.RE_FLAGS[c] & self.__flags)
+ else:
+ if name == 'flags':
+ return ''.join(
+ (c if self.RE_FLAGS[c] & self.__flags else '')
+ for _, c in flag_attrs)
+
raise AttributeError('{0} has no attribute named {1}'.format(self, name))
@classmethod
@@ -482,7 +525,16 @@ class JSInterpreter(object):
flags |= cls.RE_FLAGS[ch]
return flags, expr[idx + 1:]
- class JS_Date(object):
+ def dump(self):
+ return '(/{0}/{1})'.format(
+ re.sub(r'(?<!\\)/', r'\/', self.source),
+ self.flags)
+
+ @staticmethod
+ def escape(string_):
+ return re.escape(string_)
+
+ class JS_Date(JS_Object):
_t = None
@staticmethod
@@ -549,6 +601,9 @@ class JSInterpreter(object):
def valueOf(self):
return _NaN if self._t is None else self._t
+ def dump(self):
+ return '(new Date({0}))'.format(self.toString())
+
@classmethod
def __op_chars(cls):
op_chars = set(';,[')
@@ -652,6 +707,68 @@ class JSInterpreter(object):
_SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS, _UNARY_OPERATORS_X))
return _cached
+ def _separate_at_op(self, expr, max_split=None):
+
+ for op, _ in self._all_operators():
+ # hackety: </> have higher priority than <</>>, but don't confuse them
+ skip_delim = (op + op) if op in '<>*?' else None
+ if op == '?':
+ skip_delim = (skip_delim, '?.')
+ separated = list(self._separate(expr, op, skip_delims=skip_delim))
+ if len(separated) < 2:
+ continue
+
+ right_expr = separated.pop()
+ # handle operators that are both unary and binary, minimal BODMAS
+ if op in ('+', '-'):
+ # simplify/adjust consecutive instances of these operators
+ undone = 0
+ separated = [s.strip() for s in separated]
+ while len(separated) > 1 and not separated[-1]:
+ undone += 1
+ separated.pop()
+ if op == '-' and undone % 2 != 0:
+ right_expr = op + right_expr
+ elif op == '+':
+ while len(separated) > 1 and set(separated[-1]) <= self.OP_CHARS:
+ right_expr = separated.pop() + right_expr
+ if separated[-1][-1:] in self.OP_CHARS:
+ right_expr = separated.pop() + right_expr
+ # hanging op at end of left => unary + (strip) or - (push right)
+ separated.append(right_expr)
+ dm_ops = ('*', '%', '/', '**')
+ dm_chars = set(''.join(dm_ops))
+
+ def yield_terms(s):
+ skip = False
+ for i, term in enumerate(s[:-1]):
+ if skip:
+ skip = False
+ continue
+ if not (dm_chars & set(term)):
+ yield term
+ continue
+ for dm_op in dm_ops:
+ bodmas = list(self._separate(term, dm_op, skip_delims=skip_delim))
+ if len(bodmas) > 1 and not bodmas[-1].strip():
+ bodmas[-1] = (op if op == '-' else '') + s[i + 1]
+ yield dm_op.join(bodmas)
+ skip = True
+ break
+ else:
+ if term:
+ yield term
+
+ if not skip and s[-1]:
+ yield s[-1]
+
+ separated = list(yield_terms(separated))
+ right_expr = separated.pop() if len(separated) > 1 else None
+ expr = op.join(separated)
+ if right_expr is None:
+ continue
+ return op, separated, right_expr
+
def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion):
if op in ('||', '&&'):
if (op == '&&') ^ _js_ternary(left_val):
@@ -662,7 +779,7 @@ class JSInterpreter(object):
elif op == '?':
right_expr = _js_ternary(left_val, *self._separate(right_expr, ':', 1))
- right_val = self.interpret_expression(right_expr, local_vars, allow_recursion)
+ right_val = self.interpret_expression(right_expr, local_vars, allow_recursion) if right_expr else left_val
opfunc = op and next((v for k, v in self._all_operators() if k == op), None)
if not opfunc:
return right_val
@@ -686,6 +803,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
+ if obj is JS_Undefined:
+ return 'undefined'
try:
return json.dumps(obj)
except TypeError:
@@ -705,51 +824,9 @@ class JSInterpreter(object):
_FINALLY_RE = re.compile(r'finally\s*\{')
_SWITCH_RE = re.compile(r'switch\s*\(')
- def handle_operators(self, expr, local_vars, allow_recursion):
-
- for op, _ in self._all_operators():
- # hackety: </> have higher priority than <</>>, but don't confuse them
- skip_delim = (op + op) if op in '<>*?' else None
- if op == '?':
- skip_delim = (skip_delim, '?.')
- separated = list(self._separate(expr, op, skip_delims=skip_delim))
- if len(separated) < 2:
- continue
-
- right_expr = separated.pop()
- # handle operators that are both unary and binary, minimal BODMAS
- if op in ('+', '-'):
- # simplify/adjust consecutive instances of these operators
- undone = 0
- separated = [s.strip() for s in separated]
- while len(separated) > 1 and not separated[-1]:
- undone += 1
- separated.pop()
- if op == '-' and undone % 2 != 0:
- right_expr = op + right_expr
- elif op == '+':
- while len(separated) > 1 and set(separated[-1]) <= self.OP_CHARS:
- right_expr = separated.pop() + right_expr
- if separated[-1][-1:] in self.OP_CHARS:
- right_expr = separated.pop() + right_expr
- # hanging op at end of left => unary + (strip) or - (push right)
- left_val = separated[-1] if separated else ''
- for dm_op in ('*', '%', '/', '**'):
- bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
- if len(bodmas) > 1 and not bodmas[-1].strip():
- expr = op.join(separated) + op + right_expr
- if len(separated) > 1:
- separated.pop()
- right_expr = op.join((left_val, right_expr))
- else:
- separated = [op.join((left_val, right_expr))]
- right_expr = None
- break
- if right_expr is None:
- continue
-
- left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion)
- return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), True
+ def _eval_operator(self, op, left_expr, right_expr, expr, local_vars, allow_recursion):
+ left_val = self.interpret_expression(left_expr, local_vars, allow_recursion)
+ return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion)
@Debugger.wrap_interpreter
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
@@ -805,15 +882,19 @@ class JSInterpreter(object):
else:
raise self.Exception('Unsupported object {obj:.100}'.format(**locals()), expr=expr)
+ # apply unary operators (see new above)
for op, _ in _UNARY_OPERATORS_X:
if not expr.startswith(op):
continue
operand = expr[len(op):]
- if not operand or operand[0] != ' ':
+ if not operand or (op.isalpha() and operand[0] != ' '):
continue
- op_result = self.handle_operators(expr, local_vars, allow_recursion)
- if op_result:
- return op_result[0], should_return
+ separated = self._separate_at_op(operand, max_split=1)
+ if separated:
+ next_op, separated, right_expr = separated
+ separated.append(right_expr)
+ operand = next_op.join(separated)
+ return self._eval_operator(op, operand, '', expr, local_vars, allow_recursion), should_return
if expr.startswith('{'):
inner, outer = self._separate_at_paren(expr)
@@ -1008,15 +1089,18 @@ class JSInterpreter(object):
m = re.match(r'''(?x)
(?P<assign>
- (?P<out>{_NAME_RE})(?:\[(?P<out_idx>(?:.+?\]\s*\[)*.+?)\])?\s*
+ (?P<out>{_NAME_RE})(?P<out_idx>(?:\[{_NESTED_BRACKETS}\])+)?\s*
(?P<op>{_OPERATOR_RE})?
=(?!=)(?P<expr>.*)$
)|(?P<return>
(?!if|return|true|false|null|undefined|NaN|Infinity)(?P<name>{_NAME_RE})$
- )|(?P<indexing>
- (?P<in>{_NAME_RE})\[(?P<in_idx>(?:.+?\]\s*\[)*.+?)\]$
)|(?P<attribute>
- (?P<var>{_NAME_RE})(?:(?P<nullish>\?)?\.(?P<member>[^(]+)|\[(?P<member2>[^\]]+)\])\s*
+ (?P<var>{_NAME_RE})(?:
+ (?P<nullish>\?)?\.(?P<member>[^(]+)|
+ \[(?P<member2>{_NESTED_BRACKETS})\]
+ )\s*
+ )|(?P<indexing>
+ (?P<in>{_NAME_RE})(?P<in_idx>\[.+\])$
)|(?P<function>
(?P<fname>{_NAME_RE})\((?P<args>.*)\)$
)'''.format(**globals()), expr)
@@ -1031,10 +1115,11 @@ class JSInterpreter(object):
elif left_val in (None, JS_Undefined):
raise self.Exception('Cannot index undefined variable ' + m.group('out'), expr=expr)
- indexes = re.split(r'\]\s*\[', m.group('out_idx'))
- for i, idx in enumerate(indexes, 1):
+ indexes = md['out_idx']
+ while indexes:
+ idx, indexes = self._separate_at_paren(indexes)
idx = self.interpret_expression(idx, local_vars, allow_recursion)
- if i < len(indexes):
+ if indexes:
left_val = self._index(left_val, idx)
if isinstance(idx, float):
idx = int(idx)
@@ -1079,14 +1164,17 @@ class JSInterpreter(object):
if md.get('indexing'):
val = local_vars[m.group('in')]
- for idx in re.split(r'\]\s*\[', m.group('in_idx')):
+ indexes = m.group('in_idx')
+ while indexes:
+ idx, indexes = self._separate_at_paren(indexes)
idx = self.interpret_expression(idx, local_vars, allow_recursion)
val = self._index(val, idx)
return val, should_return
- op_result = self.handle_operators(expr, local_vars, allow_recursion)
- if op_result:
- return op_result[0], should_return
+ separated = self._separate_at_op(expr)
+ if separated:
+ op, separated, right_expr = separated
+ return self._eval_operator(op, op.join(separated), right_expr, expr, local_vars, allow_recursion), should_return
if md.get('attribute'):
variable, member, nullish = m.group('var', 'member', 'nullish')
@@ -1107,13 +1195,15 @@ class JSInterpreter(object):
def eval_method(variable, member):
if (variable, member) == ('console', 'debug'):
if Debugger.ENABLED:
- Debugger.write(self.interpret_expression('[{}]'.format(arg_str), local_vars, allow_recursion))
+ Debugger.write(self.interpret_expression('[{0}]'.format(arg_str), local_vars, allow_recursion))
return
types = {
'String': compat_str,
'Math': float,
'Array': list,
'Date': self.JS_Date,
+ 'RegExp': self.JS_RegExp,
+ # 'Error': self.Exception, # has no std static methods
}
obj = local_vars.get(variable)
if obj in (JS_Undefined, None):
@@ -1121,7 +1211,7 @@ class JSInterpreter(object):
if obj is JS_Undefined:
try:
if variable not in self._objects:
- self._objects[variable] = self.extract_object(variable)
+ self._objects[variable] = self.extract_object(variable, local_vars)
obj = self._objects[variable]
except self.Exception:
if not nullish:
@@ -1132,7 +1222,7 @@ class JSInterpreter(object):
# Member access
if arg_str is None:
- return self._index(obj, member)
+ return self._index(obj, member, nullish)
# Function call
argvals = [
@@ -1275,7 +1365,8 @@ class JSInterpreter(object):
assertion(len(argvals) == 2, 'takes exactly two arguments')
# TODO: argvals[1] callable, other Py vs JS edge cases
if isinstance(argvals[0], self.JS_RegExp):
- count = 0 if argvals[0].flags & self.JS_RegExp.RE_FLAGS['g'] else 1
+ # access JS member with Py reserved name
+ count = 0 if self._index(argvals[0], 'global') else 1
assertion(member != 'replaceAll' or count == 0,
'replaceAll must be called with a global RegExp')
return argvals[0].sub(argvals[1], obj, count=count)
@@ -1316,7 +1407,7 @@ class JSInterpreter(object):
for v in self._separate(list_txt):
yield self.interpret_expression(v, local_vars, allow_recursion)
- def extract_object(self, objname):
+ def extract_object(self, objname, *global_stack):
_FUNC_NAME_RE = r'''(?:{n}|"{n}"|'{n}')'''.format(n=_NAME_RE)
obj = {}
fields = next(filter(None, (
@@ -1337,7 +1428,8 @@ class JSInterpreter(object):
fields):
argnames = self.build_arglist(f.group('args'))
name = remove_quotes(f.group('key'))
- obj[name] = function_with_repr(self.build_function(argnames, f.group('code')), 'F<{0}>'.format(name))
+ obj[name] = function_with_repr(
+ self.build_function(argnames, f.group('code'), *global_stack), 'F<{0}>'.format(name))
return obj
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 61705d1f0..9b0d77a23 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -405,6 +405,10 @@ def parseOpts(overrideArguments=None):
action='store_true', dest='listformats',
help='List all available formats of requested videos')
video_format.add_option(
+ '--no-list-formats',
+ action='store_false', dest='listformats',
+ help='Do not list available formats of requested videos (default)')
+ video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
help=optparse.SUPPRESS_HELP)
@@ -413,6 +417,17 @@ def parseOpts(overrideArguments=None):
action='store_false', dest='youtube_include_dash_manifest',
help='Do not download the DASH manifests and related data on YouTube videos')
video_format.add_option(
+ '--youtube-player-js-variant',
+ action='store', dest='youtube_player_js_variant',
+ help='For YouTube, the player javascript variant to use for n/sig deciphering; `actual` to follow the site; default `%default`.',
+ choices=('actual', 'main', 'tcc', 'tce', 'es5', 'es6', 'tv', 'tv_es6', 'phone', 'tablet'),
+ default='actual', metavar='VARIANT')
+ video_format.add_option(
+ '--youtube-player-js-version',
+ action='store', dest='youtube_player_js_version',
+ help='For YouTube, the player javascript version to use for n/sig deciphering, specified as `signature_timestamp@hash`, or `actual` to follow the site; default `%default`',
+ default='actual', metavar='STS@HASH')
+ video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,
help=(
diff --git a/youtube_dl/traversal.py b/youtube_dl/traversal.py
index 834cfef7f..1de48b145 100644
--- a/youtube_dl/traversal.py
+++ b/youtube_dl/traversal.py
@@ -5,6 +5,10 @@
from .utils import (
dict_get,
get_first,
+ require,
+ subs_list_to_dict,
T,
traverse_obj,
+ unpack,
+ value,
)
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index ac1e78002..02a49ff49 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -53,6 +53,8 @@ from .compat import (
compat_etree_fromstring,
compat_etree_iterfind,
compat_expanduser,
+ compat_filter as filter,
+ compat_filter_fns,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
@@ -1859,6 +1861,39 @@ def write_json_file(obj, fn):
raise
+class partial_application(object):
+ """Allow a function to use pre-set argument values"""
+
+ # see _try_bind_args()
+ try:
+ inspect.signature
+
+ @staticmethod
+ def required_args(fn):
+ return [
+ param.name for param in inspect.signature(fn).parameters.values()
+ if (param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
+ and param.default is inspect.Parameter.empty)]
+
+ except AttributeError:
+
+ # Py < 3.3
+ @staticmethod
+ def required_args(fn):
+ fn_args = inspect.getargspec(fn)
+ n_defaults = len(fn_args.defaults or [])
+ return (fn_args.args or [])[:-n_defaults if n_defaults > 0 else None]
+
+ def __new__(cls, func):
+ @functools.wraps(func)
+ def wrapped(*args, **kwargs):
+ if set(cls.required_args(func)[len(args):]).difference(kwargs):
+ return functools.partial(func, *args, **kwargs)
+ return func(*args, **kwargs)
+
+ return wrapped
+
+
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
@@ -3152,6 +3187,7 @@ def extract_timezone(date_str):
return timezone, date_str
+@partial_application
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
@@ -3229,6 +3265,7 @@ def unified_timestamp(date_str, day_first=True):
return calendar.timegm(timetuple) + pm_delta * 3600 - compat_datetime_timedelta_total_seconds(timezone)
+@partial_application
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
@@ -3807,6 +3844,7 @@ def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
+@partial_application
def urljoin(base, path):
path = _decode_compat_str(path, encoding='utf-8', or_none=True)
if not path:
@@ -3831,6 +3869,7 @@ class PUTRequest(compat_urllib_request.Request):
return 'PUT'
+@partial_application
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1, base=None):
if get_attr:
if v is not None:
@@ -3857,6 +3896,7 @@ def str_to_int(int_str):
return int_or_none(int_str)
+@partial_application
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
@@ -3891,38 +3931,46 @@ def parse_duration(s):
return None
s = s.strip()
+ if not s:
+ return None
days, hours, mins, secs, ms = [None] * 5
- m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
+ m = re.match(r'''(?x)
+ (?P<before_secs>
+ (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?
+ (?P<mins>[0-9]+):)?
+ (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
+ (?:[.:](?P<ms>[0-9]+))?Z?$
+ ''', s)
if m:
- days, hours, mins, secs, ms = m.groups()
+ days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
else:
m = re.match(
r'''(?ix)(?:P?
(?:
- [0-9]+\s*y(?:ears?)?\s*
+ [0-9]+\s*y(?:ears?)?,?\s*
)?
(?:
- [0-9]+\s*m(?:onths?)?\s*
+ [0-9]+\s*m(?:onths?)?,?\s*
)?
(?:
- [0-9]+\s*w(?:eeks?)?\s*
+ [0-9]+\s*w(?:eeks?)?,?\s*
)?
(?:
- (?P<days>[0-9]+)\s*d(?:ays?)?\s*
+ (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
)?
T)?
(?:
- (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
+ (?P<hours>[0-9]+)\s*h(?:(?:ou)?rs?)?,?\s*
)?
(?:
- (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
+ (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
)?
(?:
- (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
+ (?P<secs>[0-9]+)(?:\.(?P<ms>[0-9]+))?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
- days, hours, mins, secs, ms = m.groups()
+ days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
@@ -3930,17 +3978,13 @@ def parse_duration(s):
else:
return None
- duration = 0
- if secs:
- duration += float(secs)
- if mins:
- duration += float(mins) * 60
- if hours:
- duration += float(hours) * 60 * 60
- if days:
- duration += float(days) * 24 * 60 * 60
- if ms:
- duration += float(ms)
+ duration = (
+ ((((float(days) * 24) if days else 0)
+ + (float(hours) if hours else 0)) * 60
+ + (float(mins) if mins else 0)) * 60
+ + (float(secs) if secs else 0)
+ + (float(ms) / 10 ** len(ms) if ms else 0))
+
return duration
@@ -4204,12 +4248,16 @@ def lowercase_escape(s):
s)
-def escape_rfc3986(s):
+def escape_rfc3986(s, safe=None):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0):
s = _encode_compat_str(s, 'utf-8')
+ if safe is not None:
+ safe = _encode_compat_str(safe, 'utf-8')
+ if safe is None:
+ safe = b"%/;:@&=+$,!~*'()?#[]"
# ensure unicode: after quoting, it can always be converted
- return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
+ return compat_str(compat_urllib_parse.quote(s, safe))
def escape_url(url):
@@ -4247,6 +4295,7 @@ def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
+@partial_application
def update_url(url, **kwargs):
"""Replace URL components specified by kwargs
url: compat_str or parsed URL tuple
@@ -4268,6 +4317,7 @@ def update_url(url, **kwargs):
return compat_urllib_parse.urlunparse(url._replace(**kwargs))
+@partial_application
def update_url_query(url, query):
return update_url(url, query_update=query)
@@ -4694,30 +4744,45 @@ def parse_codecs(codecs_str):
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
- lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
- vcodec, acodec = None, None
+ lambda s: s.strip(), codecs_str.strip().split(','))))
+ vcodec, acodec, hdr = None, None, None
for full_codec in split_codecs:
- codec = full_codec.split('.')[0]
- if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
- if not vcodec:
- vcodec = full_codec
- elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
+ codec, rest = full_codec.partition('.')[::2]
+ codec = codec.lower()
+ full_codec = '.'.join((codec, rest)) if rest else codec
+ codec = re.sub(r'0+(?=\d)', '', codec)
+ if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
+ 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
+ if vcodec:
+ continue
+ vcodec = full_codec
+ if codec in ('dvh1', 'dvhe'):
+ hdr = 'DV'
+ elif codec in ('av1', 'vp9'):
+ n, m = {
+ 'av1': (2, '10'),
+ 'vp9': (0, '2'),
+ }[codec]
+ if (rest.split('.', n + 1)[n:] or [''])[0].lstrip('0') == m:
+ hdr = 'HDR10'
+ elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
+ 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
- write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
- if not vcodec and not acodec:
- if len(split_codecs) == 2:
- return {
- 'vcodec': split_codecs[0],
- 'acodec': split_codecs[1],
- }
- else:
- return {
+ write_string('WARNING: Unknown codec %s\n' % (full_codec,), sys.stderr)
+
+ return (
+ filter_dict({
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
- }
- return {}
+ 'dynamic_range': hdr,
+ }) if vcodec or acodec
+ else {
+ 'vcodec': split_codecs[0],
+ 'acodec': split_codecs[1],
+ } if len(split_codecs) == 2
+ else {})
def urlhandle_detect_ext(url_handle):
@@ -6279,6 +6344,7 @@ def traverse_obj(obj, *paths, **kwargs):
Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
- `any`-builtin: Take the first matching object and return it, resetting branching.
- `all`-builtin: Take all matching objects and return them as a list, resetting branching.
+ - `filter`-builtin: Return the value if it is truthy, `None` otherwise.
`tuple`, `list`, and `dict` all support nested paths and branches.
@@ -6320,6 +6386,11 @@ def traverse_obj(obj, *paths, **kwargs):
# instant compat
str = compat_str
+ from .compat import (
+ compat_builtins_dict as dict_, # the basic dict type
+ compat_dict as dict, # dict preserving imsertion order
+ )
+
casefold = lambda k: compat_casefold(k) if isinstance(k, str) else k
if isinstance(expected_type, type):
@@ -6402,7 +6473,7 @@ def traverse_obj(obj, *paths, **kwargs):
if not branching: # string traversal
result = ''.join(result)
- elif isinstance(key, dict):
+ elif isinstance(key, dict_):
iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items())
result = dict((k, v if v is not None else default) for k, v in iter_obj
if v is not None or default is not NO_DEFAULT) or None
@@ -6480,7 +6551,7 @@ def traverse_obj(obj, *paths, **kwargs):
has_branched = False
key = None
- for last, key in lazy_last(variadic(path, (str, bytes, dict, set))):
+ for last, key in lazy_last(variadic(path, (str, bytes, dict_, set))):
if not casesense and isinstance(key, str):
key = compat_casefold(key)
@@ -6493,6 +6564,11 @@ def traverse_obj(obj, *paths, **kwargs):
objs = (list(filtered_objs),)
continue
+ # filter might be from __builtin__, future_builtins, or itertools.ifilter
+ if key in compat_filter_fns:
+ objs = filter(None, objs)
+ continue
+
if __debug__ and callable(key):
# Verify function signature
_try_bind_args(key, None, None)
@@ -6505,10 +6581,10 @@ def traverse_obj(obj, *paths, **kwargs):
objs = from_iterable(new_objs)
- if test_type and not isinstance(key, (dict, list, tuple)):
+ if test_type and not isinstance(key, (dict_, list, tuple)):
objs = map(type_test, objs)
- return objs, has_branched, isinstance(key, dict)
+ return objs, has_branched, isinstance(key, dict_)
def _traverse_obj(obj, path, allow_empty, test_type):
results, has_branched, is_dict = apply_path(obj, path, test_type)
@@ -6531,6 +6607,76 @@ def traverse_obj(obj, *paths, **kwargs):
return None if default is NO_DEFAULT else default
+def value(value):
+ return lambda _: value
+
+
+class require(ExtractorError):
+ def __init__(self, name, expected=False):
+ super(require, self).__init__(
+ 'Unable to extract {0}'.format(name), expected=expected)
+
+ def __call__(self, value):
+ if value is None:
+ raise self
+
+ return value
+
+
+@partial_application
+# typing: (subs: list[dict], /, *, lang='und', ext=None) -> dict[str, list[dict]
+def subs_list_to_dict(subs, lang='und', ext=None):
+ """
+ Convert subtitles from a traversal into a subtitle dict.
+ The path should have an `all` immediately before this function.
+
+ Arguments:
+ `lang` The default language tag for subtitle dicts with no
+ `lang` (`und`: undefined)
+ `ext` The default value for `ext` in the subtitle dicts
+
+ In the dict you can set the following additional items:
+ `id` The language tag to which the subtitle dict should be added
+ `quality` The sort order for each subtitle dict
+ """
+
+ result = collections.defaultdict(list)
+
+ for sub in subs:
+ tn_url = url_or_none(sub.pop('url', None))
+ if tn_url:
+ sub['url'] = tn_url
+ elif not sub.get('data'):
+ continue
+ sub_lang = sub.pop('id', None)
+ if not isinstance(sub_lang, compat_str):
+ if not lang:
+ continue
+ sub_lang = lang
+ sub_ext = sub.get('ext')
+ if not isinstance(sub_ext, compat_str):
+ if not ext:
+ sub.pop('ext', None)
+ else:
+ sub['ext'] = ext
+ result[sub_lang].append(sub)
+ result = dict(result)
+
+ for subs in result.values():
+ subs.sort(key=lambda x: x.pop('quality', 0) or 0)
+
+ return result
+
+
+def unpack(func, **kwargs):
+ """Make a function that applies `partial(func, **kwargs)` to its argument as *args"""
+ @functools.wraps(func)
+ def inner(items):
+ return func(*items, **kwargs)
+
+ return inner
+
+
def T(*x):
""" For use in yt-dl instead of {type, ...} or set((type, ...)) """
return set(x)
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index b82fbc702..c70d9d2af 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
from __future__ import unicode_literals
-__version__ = '2021.12.17'
+__version__ = '2025.04.07'