diff options
314 files changed, 13875 insertions, 5393 deletions
diff --git a/.travis.yml b/.travis.yml index 511bee64c..cc21fae8f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,9 +5,8 @@ python: - "3.2" - "3.3" - "3.4" -before_install: - - sudo apt-get update -qq - - sudo apt-get install -yqq rtmpdump + - "3.5" +sudo: false script: nosetests test --verbose notifications: email: @@ -135,3 +135,17 @@ Bernhard Minks sceext Zach Bruggeman Tjark Saul +slangangular +Behrouz Abbasi +ngld +nyuszika7h +Shaun Walbridge +Lee Jenkins +Anssi Hannula +Lukáš Lalinský +Qijiang Fan +Rémy Léone +Marco Ferragina +reiv +Muratcan Simsek +Evan Lu diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 588b15bde..f3fe0d432 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,20 @@ -**Please include the full output of youtube-dl when run with `-v`**. - -The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. +**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this: +``` +$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj +[debug] System config: [] +[debug] User config: [] +[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] +[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 +[debug] youtube-dl version 2015.12.06 +[debug] Git HEAD: 135392e +[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2 +[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 +[debug] Proxy map: {} +... +``` +**Do not post screenshots of verbose log only plain text is acceptable.** + +The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist): @@ -16,19 +30,19 @@ So please elaborate on what feature you are requesting, or what bug you want to If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over. -For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. +For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. -If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/). +If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/). -**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL. +**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL. ### Are you using the latest version? -Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. +Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. ### Is the issue already documented? -Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity. +Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity. ### Why are existing options not enough? @@ -114,18 +128,19 @@ If you want to add support for a new site, you can follow this quick list (assum webpage = self._download_webpage(url, video_id) # TODO more code goes here, for example ... - title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') + title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title') return { 'id': video_id, 'title': title, 'description': self._og_search_description(webpage), + 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False), # TODO more properties (see youtube_dl/extractor/common.py) } ``` 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). -6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. -7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want. +6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. +7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want. 8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8). 9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this: @@ -61,34 +61,34 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py chmod a+x youtube-dl README.md: youtube_dl/*.py youtube_dl/*/*.py - COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py + COLUMNS=80 $(PYTHON) youtube_dl/__main__.py --help | $(PYTHON) devscripts/make_readme.py CONTRIBUTING.md: README.md - python devscripts/make_contributing.py README.md CONTRIBUTING.md + $(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md supportedsites: - python devscripts/make_supportedsites.py docs/supportedsites.md + $(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md README.txt: README.md pandoc -f markdown -t plain README.md -o README.txt youtube-dl.1: README.md - python devscripts/prepare_manpage.py >youtube-dl.1.temp.md + $(PYTHON) devscripts/prepare_manpage.py >youtube-dl.1.temp.md pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1 rm -f youtube-dl.1.temp.md youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in - python devscripts/bash-completion.py + $(PYTHON) devscripts/bash-completion.py bash-completion: youtube-dl.bash-completion youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in - python devscripts/zsh-completion.py + $(PYTHON) devscripts/zsh-completion.py zsh-completion: youtube-dl.zsh youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in - python devscripts/fish-completion.py + $(PYTHON) devscripts/fish-completion.py fish-completion: youtube-dl.fish @@ -9,6 +9,7 @@ youtube-dl - download videos from youtube.com or other video platforms - [VIDEO SELECTION](#video-selection) - [FAQ](#faq) - [DEVELOPER INSTRUCTIONS](#developer-instructions) +- [EMBEDDING YOUTUBE-DL](#embedding-youtube-dl) - [BUGS](#bugs) - [COPYRIGHT](#copyright) @@ -34,7 +35,7 @@ You can also use pip: sudo pip install youtube-dl -Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html . +Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html). # DESCRIPTION **youtube-dl** is a small command-line program to download videos from @@ -48,110 +49,220 @@ which means you can modify it, redistribute it or use it however you like. # OPTIONS -h, --help Print this help text and exit --version Print program version and exit - -U, --update Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed) - -i, --ignore-errors Continue on download errors, for example to skip unavailable videos in a playlist - --abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs + -U, --update Update this program to latest version. Make + sure that you have sufficient permissions + (run with sudo if needed) + -i, --ignore-errors Continue on download errors, for example to + skip unavailable videos in a playlist + --abort-on-error Abort downloading of further videos (in the + playlist or the command line) if an error + occurs --dump-user-agent Display the current browser identification --list-extractors List all supported extractors - --extractor-descriptions Output descriptions of all supported extractors - --force-generic-extractor Force extraction to use the generic extractor - --default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". - Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The - default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching. - --ignore-config Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: Do not read the user configuration - in ~/.config/youtube-dl/config (%APPDATA%/youtube-dl/config.txt on Windows) - --flat-playlist Do not extract the videos of a playlist, only list them. + --extractor-descriptions Output descriptions of all supported + extractors + --force-generic-extractor Force extraction to use the generic + extractor + --default-search PREFIX Use this prefix for unqualified URLs. For + example "gvsearch2:" downloads two videos + from google videos for youtube-dl "large + apple". Use the value "auto" to let + youtube-dl guess ("auto_warning" to emit a + warning when guessing). "error" just throws + an error. The default value "fixup_error" + repairs broken URLs, but emits an error if + this is not possible instead of searching. + --ignore-config Do not read configuration files. When given + in the global configuration file /etc + /youtube-dl.conf: Do not read the user + configuration in ~/.config/youtube- + dl/config (%APPDATA%/youtube-dl/config.txt + on Windows) + --flat-playlist Do not extract the videos of a playlist, + only list them. --no-color Do not emit color codes in output ## Network Options: - --proxy URL Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection + --proxy URL Use the specified HTTP/HTTPS proxy. Pass in + an empty string (--proxy "") for direct + connection --socket-timeout SECONDS Time to wait before giving up, in seconds - --source-address IP Client-side IP address to bind to (experimental) - -4, --force-ipv4 Make all connections via IPv4 (experimental) - -6, --force-ipv6 Make all connections via IPv6 (experimental) - --cn-verification-proxy URL Use this proxy to verify the IP address for some Chinese sites. The default proxy specified by --proxy (or none, if the options is - not present) is used for the actual downloading. (experimental) + --source-address IP Client-side IP address to bind to + (experimental) + -4, --force-ipv4 Make all connections via IPv4 + (experimental) + -6, --force-ipv6 Make all connections via IPv6 + (experimental) + --cn-verification-proxy URL Use this proxy to verify the IP address for + some Chinese sites. The default proxy + specified by --proxy (or none, if the + options is not present) is used for the + actual downloading. (experimental) ## Video Selection: --playlist-start NUMBER Playlist video to start at (default is 1) --playlist-end NUMBER Playlist video to end at (default is last) - --playlist-items ITEM_SPEC Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" - if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will - download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13. - --match-title REGEX Download only matching titles (regex or caseless sub-string) - --reject-title REGEX Skip download for matching titles (regex or caseless sub-string) + --playlist-items ITEM_SPEC Playlist video items to download. Specify + indices of the videos in the playlist + separated by commas like: "--playlist-items + 1,2,5,8" if you want to download videos + indexed 1, 2, 5, 8 in the playlist. You can + specify range: "--playlist-items + 1-3,7,10-13", it will download the videos + at index 1, 2, 3, 7, 10, 11, 12 and 13. + --match-title REGEX Download only matching titles (regex or + caseless sub-string) + --reject-title REGEX Skip download for matching titles (regex or + caseless sub-string) --max-downloads NUMBER Abort after downloading NUMBER files - --min-filesize SIZE Do not download any videos smaller than SIZE (e.g. 50k or 44.6m) - --max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m) + --min-filesize SIZE Do not download any videos smaller than + SIZE (e.g. 50k or 44.6m) + --max-filesize SIZE Do not download any videos larger than SIZE + (e.g. 50k or 44.6m) --date DATE Download only videos uploaded in this date - --datebefore DATE Download only videos uploaded on or before this date (i.e. inclusive) - --dateafter DATE Download only videos uploaded on or after this date (i.e. inclusive) - --min-views COUNT Do not download any videos with less than COUNT views - --max-views COUNT Do not download any videos with more than COUNT views - --match-filter FILTER Generic video filter (experimental). Specify any key (see help for -o for a list of available keys) to match if the key is present, - !key to check if the key is not present,key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against - a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) after the - operator.For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike - functionality is not available at the given service), but who also have a description, use --match-filter "like_count > 100 & + --datebefore DATE Download only videos uploaded on or before + this date (i.e. inclusive) + --dateafter DATE Download only videos uploaded on or after + this date (i.e. inclusive) + --min-views COUNT Do not download any videos with less than + COUNT views + --max-views COUNT Do not download any videos with more than + COUNT views + --match-filter FILTER Generic video filter (experimental). + Specify any key (see help for -o for a list + of available keys) to match if the key is + present, !key to check if the key is not + present,key > NUMBER (like "comment_count > + 12", also works with >=, <, <=, !=, =) to + compare against a number, and & to require + multiple matches. Values which are not + known are excluded unless you put a + question mark (?) after the operator.For + example, to only match videos that have + been liked more than 100 times and disliked + less than 50 times (or the dislike + functionality is not available at the given + service), but who also have a description, + use --match-filter "like_count > 100 & dislike_count <? 50 & description" . - --no-playlist Download only the video, if the URL refers to a video and a playlist. - --yes-playlist Download the playlist, if the URL refers to a video and a playlist. - --age-limit YEARS Download only videos suitable for the given age - --download-archive FILE Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it. - --include-ads Download advertisements as well (experimental) + --no-playlist Download only the video, if the URL refers + to a video and a playlist. + --yes-playlist Download the playlist, if the URL refers to + a video and a playlist. + --age-limit YEARS Download only videos suitable for the given + age + --download-archive FILE Download only videos not listed in the + archive file. Record the IDs of all + downloaded videos in it. + --include-ads Download advertisements as well + (experimental) ## Download Options: - -r, --rate-limit LIMIT Maximum download rate in bytes per second (e.g. 50K or 4.2M) - -R, --retries RETRIES Number of retries (default is 10), or "infinite". - --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K) (default is 1024) - --no-resize-buffer Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE. + -r, --rate-limit LIMIT Maximum download rate in bytes per second + (e.g. 50K or 4.2M) + -R, --retries RETRIES Number of retries (default is 10), or + "infinite". + --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K) + (default is 1024) + --no-resize-buffer Do not automatically adjust the buffer + size. By default, the buffer size is + automatically resized from an initial value + of SIZE. --playlist-reverse Download playlist videos in reverse order - --xattr-set-filesize Set file xattribute ytdl.filesize with expected filesize (experimental) - --hls-prefer-native Use the native HLS downloader instead of ffmpeg (experimental) - --external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,httpie,wget - --external-downloader-args ARGS Give these arguments to the external downloader + --xattr-set-filesize Set file xattribute ytdl.filesize with + expected filesize (experimental) + --hls-prefer-native Use the native HLS downloader instead of + ffmpeg (experimental) + --external-downloader COMMAND Use the specified external downloader. + Currently supports + aria2c,axel,curl,httpie,wget + --external-downloader-args ARGS Give these arguments to the external + downloader ## Filesystem Options: - -a, --batch-file FILE File containing URLs to download ('-' for stdin) + -a, --batch-file FILE File containing URLs to download ('-' for + stdin) --id Use only video ID in file name - -o, --output TEMPLATE Output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader - nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for - the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like YouTube's itags: "137"), - %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id, - %(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, - %(playlist_index)s for the position in the playlist. %(height)s and %(width)s for the width and height of the video format. - %(resolution)s for a textual description of the resolution of the video format. %% for a literal percent. Use - to output to stdout. - Can also be used to download to a different directory, for example with -o '/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' . - --autonumber-size NUMBER Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given - --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames - -A, --auto-number [deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000 - -t, --title [deprecated] Use title in file name (default) + -o, --output TEMPLATE Output filename template. Use %(title)s to + get the title, %(uploader)s for the + uploader name, %(uploader_id)s for the + uploader nickname if different, + %(autonumber)s to get an automatically + incremented number, %(ext)s for the + filename extension, %(format)s for the + format description (like "22 - 1280x720" or + "HD"), %(format_id)s for the unique id of + the format (like YouTube's itags: "137"), + %(upload_date)s for the upload date + (YYYYMMDD), %(extractor)s for the provider + (youtube, metacafe, etc), %(id)s for the + video id, %(playlist_title)s, + %(playlist_id)s, or %(playlist)s (=title if + present, ID otherwise) for the playlist the + video is in, %(playlist_index)s for the + position in the playlist. %(height)s and + %(width)s for the width and height of the + video format. %(resolution)s for a textual + description of the resolution of the video + format. %% for a literal percent. Use - to + output to stdout. Can also be used to + download to a different directory, for + example with -o '/my/downloads/%(uploader)s + /%(title)s-%(id)s.%(ext)s' . + --autonumber-size NUMBER Specify the number of digits in + %(autonumber)s when it is present in output + filename template or --auto-number option + is given + --restrict-filenames Restrict filenames to only ASCII + characters, and avoid "&" and spaces in + filenames + -A, --auto-number [deprecated; use -o + "%(autonumber)s-%(title)s.%(ext)s" ] Number + downloaded files starting from 00000 + -t, --title [deprecated] Use title in file name + (default) -l, --literal [deprecated] Alias of --title -w, --no-overwrites Do not overwrite files - -c, --continue Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible. - --no-continue Do not resume partially downloaded files (restart from beginning) - --no-part Do not use .part files - write directly into output file - --no-mtime Do not use the Last-modified header to set the file modification time - --write-description Write video description to a .description file + -c, --continue Force resume of partially downloaded files. + By default, youtube-dl will resume + downloads if possible. + --no-continue Do not resume partially downloaded files + (restart from beginning) + --no-part Do not use .part files - write directly + into output file + --no-mtime Do not use the Last-modified header to set + the file modification time + --write-description Write video description to a .description + file --write-info-json Write video metadata to a .info.json file - --write-annotations Write video annotations to a .annotations.xml file - --load-info FILE JSON file containing the video information (created with the "--write-info-json" option) - --cookies FILE File to read cookies from and dump cookie jar in - --cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl - or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may - change. + --write-annotations Write video annotations to a + .annotations.xml file + --load-info FILE JSON file containing the video information + (created with the "--write-info-json" + option) + --cookies FILE File to read cookies from and dump cookie + jar in + --cache-dir DIR Location in the filesystem where youtube-dl + can store some downloaded information + permanently. By default $XDG_CACHE_HOME + /youtube-dl or ~/.cache/youtube-dl . At the + moment, only YouTube player files (for + videos with obfuscated signatures) are + cached, but that may change. --no-cache-dir Disable filesystem caching --rm-cache-dir Delete all filesystem cache files ## Thumbnail images: --write-thumbnail Write thumbnail image to disk --write-all-thumbnails Write all thumbnail image formats to disk - --list-thumbnails Simulate and list all available thumbnail formats + --list-thumbnails Simulate and list all available thumbnail + formats ## Verbosity / Simulation Options: -q, --quiet Activate quiet mode --no-warnings Ignore warnings - -s, --simulate Do not download the video and do not write anything to disk + -s, --simulate Do not download the video and do not write + anything to disk --skip-download Do not download the video -g, --get-url Simulate, quiet but print URL -e, --get-title Simulate, quiet but print title @@ -161,86 +272,151 @@ which means you can modify it, redistribute it or use it however you like. --get-duration Simulate, quiet but print video length --get-filename Simulate, quiet but print output filename --get-format Simulate, quiet but print output format - -j, --dump-json Simulate, quiet but print JSON information. See --output for a description of available keys. - -J, --dump-single-json Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist - information in a single line. - --print-json Be quiet and print the video information as JSON (video is still being downloaded). + -j, --dump-json Simulate, quiet but print JSON information. + See --output for a description of available + keys. + -J, --dump-single-json Simulate, quiet but print JSON information + for each command-line argument. If the URL + refers to a playlist, dump the whole + playlist information in a single line. + --print-json Be quiet and print the video information as + JSON (video is still being downloaded). --newline Output progress bar as new lines --no-progress Do not print progress bar --console-title Display progress in console titlebar -v, --verbose Print various debugging information - --dump-pages Print downloaded pages encoded using base64 to debug problems (very verbose) - --write-pages Write downloaded intermediary pages to files in the current directory to debug problems + --dump-pages Print downloaded pages encoded using base64 + to debug problems (very verbose) + --write-pages Write downloaded intermediary pages to + files in the current directory to debug + problems --print-traffic Display sent and read HTTP traffic -C, --call-home Contact the youtube-dl server for debugging - --no-call-home Do NOT contact the youtube-dl server for debugging + --no-call-home Do NOT contact the youtube-dl server for + debugging ## Workarounds: --encoding ENCODING Force the specified encoding (experimental) --no-check-certificate Suppress HTTPS certificate validation - --prefer-insecure Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube) + --prefer-insecure Use an unencrypted connection to retrieve + information about the video. (Currently + supported only for YouTube) --user-agent UA Specify a custom user agent - --referer URL Specify a custom referer, use if the video access is restricted to one domain - --add-header FIELD:VALUE Specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times - --bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH - --sleep-interval SECONDS Number of seconds to sleep before each download. + --referer URL Specify a custom referer, use if the video + access is restricted to one domain + --add-header FIELD:VALUE Specify a custom HTTP header and its value, + separated by a colon ':'. You can use this + option multiple times + --bidi-workaround Work around terminals that lack + bidirectional text support. Requires bidiv + or fribidi executable in PATH + --sleep-interval SECONDS Number of seconds to sleep before each + download. ## Video Format Options: - -f, --format FORMAT Video format code, see the "FORMAT SELECTION" for all the info + -f, --format FORMAT Video format code, see the "FORMAT + SELECTION" for all the info --all-formats Download all available video formats - --prefer-free-formats Prefer free video formats unless a specific one is requested - -F, --list-formats List all available formats - --youtube-skip-dash-manifest Do not download the DASH manifests and related data on YouTube videos - --merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no - merge is required + --prefer-free-formats Prefer free video formats unless a specific + one is requested + -F, --list-formats List all available formats of requested + videos + --youtube-skip-dash-manifest Do not download the DASH manifests and + related data on YouTube videos + --merge-output-format FORMAT If a merge is required (e.g. + bestvideo+bestaudio), output to given + container format. One of mkv, mp4, ogg, + webm, flv. Ignored if no merge is required ## Subtitle Options: --write-sub Write subtitle file - --write-auto-sub Write automatic subtitle file (YouTube only) - --all-subs Download all the available subtitles of the video + --write-auto-sub Write automatically generated subtitle file + (YouTube only) + --all-subs Download all the available subtitles of the + video --list-subs List all available subtitles for the video - --sub-format FORMAT Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best" - --sub-lang LANGS Languages of the subtitles to download (optional) separated by commas, use IETF language tags like 'en,pt' + --sub-format FORMAT Subtitle format, accepts formats + preference, for example: "srt" or + "ass/srt/best" + --sub-lang LANGS Languages of the subtitles to download + (optional) separated by commas, use IETF + language tags like 'en,pt' ## Authentication Options: -u, --username USERNAME Login with this account ID - -p, --password PASSWORD Account password. If this option is left out, youtube-dl will ask interactively. + -p, --password PASSWORD Account password. If this option is left + out, youtube-dl will ask interactively. -2, --twofactor TWOFACTOR Two-factor auth code -n, --netrc Use .netrc authentication data - --video-password PASSWORD Video password (vimeo, smotri) + --video-password PASSWORD Video password (vimeo, smotri, youku) ## Post-processing Options: - -x, --extract-audio Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe) - --audio-format FORMAT Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default - --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default - 5) - --recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi) + -x, --extract-audio Convert video files to audio-only files + (requires ffmpeg or avconv and ffprobe or + avprobe) + --audio-format FORMAT Specify audio format: "best", "aac", + "vorbis", "mp3", "m4a", "opus", or "wav"; + "best" by default + --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert + a value between 0 (better) and 9 (worse) + for VBR or a specific bitrate like 128K + (default 5) + --recode-video FORMAT Encode the video to another format if + necessary (currently supported: + mp4|flv|ogg|webm|mkv|avi) --postprocessor-args ARGS Give these arguments to the postprocessor - -k, --keep-video Keep the video file on disk after the post-processing; the video is erased by default - --no-post-overwrites Do not overwrite post-processed files; the post-processed files are overwritten by default - --embed-subs Embed subtitles in the video (only for mkv and mp4 videos) + -k, --keep-video Keep the video file on disk after the post- + processing; the video is erased by default + --no-post-overwrites Do not overwrite post-processed files; the + post-processed files are overwritten by + default + --embed-subs Embed subtitles in the video (only for mkv + and mp4 videos) --embed-thumbnail Embed thumbnail in the audio as cover art --add-metadata Write metadata to the video file - --metadata-from-title FORMAT Parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed - parameters replace existing values. Additional templates: %(album)s, %(artist)s. Example: --metadata-from-title "%(artist)s - - %(title)s" matches a title like "Coldplay - Paradise" - --xattrs Write metadata to the video file's xattrs (using dublin core and xdg standards) - --fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn (the default; - fix file if we can, warn otherwise) - --prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default) - --prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors - --ffmpeg-location PATH Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory. - --exec CMD Execute a command on the file after downloading, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm - {}' - --convert-subtitles FORMAT Convert the subtitles to other format (currently supported: srt|ass|vtt) + --metadata-from-title FORMAT Parse additional metadata like song title / + artist from the video title. The format + syntax is the same as --output, the parsed + parameters replace existing values. + Additional templates: %(album)s, + %(artist)s. Example: --metadata-from-title + "%(artist)s - %(title)s" matches a title + like "Coldplay - Paradise" + --xattrs Write metadata to the video file's xattrs + (using dublin core and xdg standards) + --fixup POLICY Automatically correct known faults of the + file. One of never (do nothing), warn (only + emit a warning), detect_or_warn (the + default; fix file if we can, warn + otherwise) + --prefer-avconv Prefer avconv over ffmpeg for running the + postprocessors (default) + --prefer-ffmpeg Prefer ffmpeg over avconv for running the + postprocessors + --ffmpeg-location PATH Location of the ffmpeg/avconv binary; + either the path to the binary or its + containing directory. + --exec CMD Execute a command on the file after + downloading, similar to find's -exec + syntax. Example: --exec 'adb push {} + /sdcard/Music/ && rm {}' + --convert-subtitles FORMAT Convert the subtitles to other format + (currently supported: srt|ass|vtt) # CONFIGURATION -You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<user name>\youtube-dl.conf`. +You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime and use a proxy: +``` +--extract-audio +--no-mtime +--proxy 127.0.0.1:3128 +``` + +You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run. -### Authentication with `.netrc` file ### +### Authentication with `.netrc` file -You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in shell command history. You can achieve this using [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create `.netrc` file in your `$HOME` and restrict permissions to read/write by you only: +You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create a`.netrc` file in your `$HOME` and restrict permissions to read/write by you only: ``` touch $HOME/.netrc chmod a-rwx,u+rw $HOME/.netrc @@ -254,13 +430,13 @@ For example: machine youtube login myaccount@gmail.com password my_youtube_password machine twitch login my_twitch_account_name password my_twitch_password ``` -To activate authentication with `.netrc` file you should pass `--netrc` to youtube-dl or to place it in [configuration file](#configuration). +To activate authentication with the `.netrc` file you should pass `--netrc` to youtube-dl or place it in the [configuration file](#configuration). -On Windows you may also need to setup `%HOME%` environment variable manually. +On Windows you may also need to setup the `%HOME%` environment variable manually. # OUTPUT TEMPLATE -The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are: +The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are: - `id`: The sequence will be replaced by the video identifier. - `url`: The sequence will be replaced by the video URL. @@ -270,8 +446,10 @@ The `-o` option allows users to indicate a template for the output file names. T - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4). - `epoch`: The sequence will be replaced by the Unix epoch when creating the file. - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero. - - `playlist`: The name or the id of the playlist that contains the video. - - `playlist_index`: The index of the video in the playlist, a five-digit number. + - `playlist`: The sequence will be replaced by the name or the id of the playlist that contains the video. + - `playlist_index`: The sequence will be replaced by the index of the video in the playlist padded with leading zeros according to the total length of the playlist. + - `format_id`: The sequence will be replaced by the format code specified by `--format`. + - `duration`: The sequence will be replaced by the length of the video in seconds. The current default template is `%(title)s-%(id)s.%(ext)s`. @@ -286,18 +464,18 @@ youtube-dl_test_video_.mp4 # A simple file name # FORMAT SELECTION -By default youtube-dl tries to download the best quality, but sometimes you may want to download other format. +By default youtube-dl tries to download the best quality, but sometimes you may want to download in a different format. The simplest case is requesting a specific format, for example `-f 22`. You can get the list of available formats using `--list-formats`, you can also use a file extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names `best`, `bestvideo`, `bestaudio` and `worst`. -If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. +If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`. -Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed. +Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed. -If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl. +If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl. # VIDEO SELECTION -Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats: +Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`. They accept dates in two formats: - Absolute dates: Dates in the format `YYYYMMDD`. - Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?` @@ -311,7 +489,7 @@ $ youtube-dl --dateafter now-6months # Download only the videos uploaded on January 1, 1970 $ youtube-dl --date 19700101 -$ # will only download the videos uploaded in the 200x decade +$ # Download only the videos uploaded in the 200x decade $ youtube-dl --dateafter 20000101 --datebefore 20091231 ``` @@ -323,7 +501,7 @@ If you've followed [our manual installation instructions](http://rg3.github.io/y If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update. -If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum. +If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distribution serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum. As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like @@ -349,7 +527,7 @@ If you have installed youtube-dl with a package manager, pip, setup.py or a tarb By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, the only option out of `-citw` that is regularly useful is `-i`. -### Can you please put the -b option back? +### Can you please put the `-b` option back? Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it. @@ -357,17 +535,23 @@ Most people asking this question are not aware that youtube-dl now defaults to d Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We're [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl. +### Do I need any other programs? + +youtube-dl works fine on its own on most sites. However, if you want to convert video/audio, you'll need [avconv](https://libav.org/) or [ffmpeg](https://www.ffmpeg.org/). On some sites - most notably YouTube - videos can be retrieved in a higher quality format without sound. youtube-dl will detect whether avconv/ffmpeg is present and automatically pick the best option. + +Videos or video formats streamed via RTMP protocol can only be downloaded when [rtmpdump](https://rtmpdump.mplayerhq.hu/) is installed. Downloading MMS and RTSP videos requires either [mplayer](http://mplayerhq.hu/) or [mpv](https://mpv.io/) to be installed. + ### I have downloaded a video but how can I play it? Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/). -### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser. +### I extracted a video URL with `-g`, but it does not play on another machine / in my webbrowser. It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl. It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule. -Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well. +Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using `-g`, your own downloader must support these as well. If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn. @@ -375,13 +559,13 @@ If you want to play the video on a machine that is not running youtube-dl, you c YouTube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl. -### ERROR: unable to download video ### +### ERROR: unable to download video YouTube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl. -### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command` ### +### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command` -That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell). +That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by the shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell). For example if your URL is https://www.youtube.com/watch?t=4&v=BaW_jenozKc you should end up with following command: @@ -403,7 +587,7 @@ In February 2015, the new YouTube player contained a character sequence in a str These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--source-address` options](#network-options) to select another IP address. -### SyntaxError: Non-ASCII character ### +### SyntaxError: Non-ASCII character The error @@ -432,13 +616,19 @@ From then on, after restarting your shell, you will be able to access both youtu Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration). -### How do I download a video starting with a `-` ? +### How do I download a video starting with a `-`? Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`: youtube-dl -- -wNyEUrxzFU youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU" +### How do I pass cookies to youtube-dl? + +Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format. + +Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly. + ### Can you add support for this anime video site, or site which shows current movies for free? As a matter of policy (as well as legality), youtube-dl does not include support for services that specialize in infringing copyright. As a rule of thumb, if you cannot easily find a video that the service is quite obviously allowed to distribute (i.e. that has been uploaded by the creator, the creator's distributor, or is published under a free license), the service is probably unfit for inclusion to youtube-dl. @@ -527,18 +717,19 @@ If you want to add support for a new site, you can follow this quick list (assum webpage = self._download_webpage(url, video_id) # TODO more code goes here, for example ... - title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') + title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title') return { 'id': video_id, 'title': title, 'description': self._og_search_description(webpage), + 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False), # TODO more properties (see youtube_dl/extractor/common.py) } ``` 5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). -6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. -7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want. +6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. +7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want. 8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8). 9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this: @@ -566,7 +757,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc']) ``` -Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object. +Most likely, you'll want to use various options. For a list of what can be done, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L121-L269). For a start, if you want to intercept youtube-dl's output, set a `logger` object. Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file: @@ -607,11 +798,25 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl: # BUGS -Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode. +Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)). -**Please include the full output of youtube-dl when run with `-v`**. +**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this: +``` +$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj +[debug] System config: [] +[debug] User config: [] +[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj'] +[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251 +[debug] youtube-dl version 2015.12.06 +[debug] Git HEAD: 135392e +[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2 +[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4 +[debug] Proxy map: {} +... +``` +**Do not post screenshots of verbose log only plain text is acceptable.** -The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. +The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever. Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist): @@ -627,19 +832,19 @@ So please elaborate on what feature you are requesting, or what bug you want to If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over. -For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. +For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information. -If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/). +If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/). -**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL. +**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL. ### Are you using the latest version? -Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. +Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well. ### Is the issue already documented? -Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity. +Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity. ### Why are existing options not enough? @@ -669,4 +874,4 @@ It may sound strange, but some bug reports we receive are completely unrelated t youtube-dl is released into the public domain by the copyright holders. -This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain. +This README file was originally written by [Daniel Bolton](https://github.com/dbbolton) and is likewise released into the public domain. diff --git a/devscripts/bash-completion.py b/devscripts/bash-completion.py index cd26cc089..ce68f26f9 100755 --- a/devscripts/bash-completion.py +++ b/devscripts/bash-completion.py @@ -5,7 +5,7 @@ import os from os.path import dirname as dirn import sys -sys.path.append(dirn(dirn((os.path.abspath(__file__))))) +sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl BASH_COMPLETION_FILE = "youtube-dl.bash-completion" diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py index c2f238798..41629d87d 100755 --- a/devscripts/fish-completion.py +++ b/devscripts/fish-completion.py @@ -6,7 +6,7 @@ import os from os.path import dirname as dirn import sys -sys.path.append(dirn(dirn((os.path.abspath(__file__))))) +sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl from youtube_dl.utils import shell_quote diff --git a/devscripts/gh-pages/update-sites.py b/devscripts/gh-pages/update-sites.py index d3ef5f0b5..503c1372f 100755 --- a/devscripts/gh-pages/update-sites.py +++ b/devscripts/gh-pages/update-sites.py @@ -6,7 +6,7 @@ import os import textwrap # We must be able to import youtube_dl -sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) import youtube_dl diff --git a/devscripts/make_supportedsites.py b/devscripts/make_supportedsites.py index 3df4385a6..8cb4a4638 100644 --- a/devscripts/make_supportedsites.py +++ b/devscripts/make_supportedsites.py @@ -9,7 +9,7 @@ import sys # Import youtube_dl ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') -sys.path.append(ROOT_DIR) +sys.path.insert(0, ROOT_DIR) import youtube_dl diff --git a/devscripts/prepare_manpage.py b/devscripts/prepare_manpage.py index 7ece37754..776e6556e 100644 --- a/devscripts/prepare_manpage.py +++ b/devscripts/prepare_manpage.py @@ -8,6 +8,35 @@ import re ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) README_FILE = os.path.join(ROOT_DIR, 'README.md') + +def filter_options(readme): + ret = '' + in_options = False + for line in readme.split('\n'): + if line.startswith('# '): + if line[2:].startswith('OPTIONS'): + in_options = True + else: + in_options = False + + if in_options: + if line.lstrip().startswith('-'): + option, description = re.split(r'\s{2,}', line.lstrip()) + split_option = option.split(' ') + + if not split_option[-1].startswith('-'): # metavar + option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]]) + + # Pandoc's definition_lists. See http://pandoc.org/README.html + # for more information. + ret += '\n%s\n: %s\n' % (option, description) + else: + ret += line.lstrip() + '\n' + else: + ret += line + '\n' + + return ret + with io.open(README_FILE, encoding='utf-8') as f: readme = f.read() @@ -26,6 +55,8 @@ readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme) readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme) readme = PREFIX + readme +readme = filter_options(readme) + if sys.version_info < (3, 0): print(readme.encode('utf-8')) else: diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py index f200f2c80..04728e8e2 100755 --- a/devscripts/zsh-completion.py +++ b/devscripts/zsh-completion.py @@ -5,7 +5,7 @@ import os from os.path import dirname as dirn import sys -sys.path.append(dirn(dirn((os.path.abspath(__file__))))) +sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) import youtube_dl ZSH_COMPLETION_FILE = "youtube-dl.zsh" diff --git a/docs/supportedsites.md b/docs/supportedsites.md index 73445137f..8253335e3 100644 --- a/docs/supportedsites.md +++ b/docs/supportedsites.md @@ -15,8 +15,12 @@ - **abc.net.au** - **Abc7News** - **AcademicEarth:Course** + - **acast** + - **acast:channel** - **AddAnime** - **AdobeTV** + - **AdobeTVChannel** + - **AdobeTVShow** - **AdobeTVVideo** - **AdultSwim** - **Aftenposten** @@ -43,6 +47,7 @@ - **arte.tv:future** - **AtresPlayer** - **ATTTechChannel** + - **AudiMedia** - **audiomack** - **audiomack:album** - **Azubu** @@ -51,7 +56,9 @@ - **bambuser:channel** - **Bandcamp** - **Bandcamp:album** + - **bbc**: BBC - **bbc.co.uk**: BBC iPlayer + - **bbc.co.uk:article**: BBC articles - **BeatportPro** - **Beeg** - **BehindKink** @@ -65,7 +72,8 @@ - **Bpb**: Bundeszentrale für politische Bildung - **BR**: Bayerischer Rundfunk Mediathek - **Break** - - **Brightcove** + - **brightcove:legacy** + - **brightcove:new** - **bt:article**: Bergens Tidende Articles - **bt:vestlendingen**: Bergens Tidende - Vestlendingen - **BuzzFeed** @@ -80,16 +88,19 @@ - **CBSSports** - **CeskaTelevize** - **channel9**: Channel 9 + - **Chaturbate** - **Chilloutzone** - **chirbit** - **chirbit:profile** - **Cinchcast** - **Cinemassacre** - - **clipfish** + - **Clipfish** - **cliphunter** - **Clipsyndicate** + - **cloudtime**: CloudTime - **Cloudy** - **Clubic** + - **Clyp** - **cmt.com** - **CNET** - **CNN** @@ -100,7 +111,7 @@ - **ComCarCoff** - **ComedyCentral** - **ComedyCentralShows**: The Daily Show / The Colbert Report - - **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED + - **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED - **Cracked** - **Criterion** - **CrooksAndLiars** @@ -115,14 +126,16 @@ - **DailymotionCloud** - **daum.net** - **DBTV** + - **DCN** - **DctpTv** - **DeezerPlaylist** - **defense.gouv.fr** + - **democracynow** - **DHM**: Filmarchiv - Deutsches Historisches Museum - **Discovery** - - **divxstage**: DivxStage - **Dotsub** - **DouyuTV**: 斗鱼 + - **DPlay** - **dramafever** - **dramafever:series** - **DRBonanza** @@ -148,6 +161,8 @@ - **EroProfile** - **Escapist** - **ESPN** (Currently broken) + - **EsriVideo** + - **Europa** - **EveryonesMixtape** - **exfm**: ex.fm - **ExpoTV** @@ -155,15 +170,15 @@ - **facebook** - **faz.net** - **fc2** + - **Fczenit** - **fernsehkritik.tv** - - **fernsehkritik.tv:postecke** - **Firstpost** - **FiveTV** - **Flickr** - **Folketinget**: Folketinget (ft.dk; Danish parliament) - **FootyRoom** - **Foxgay** - - **FoxNews** + - **FoxNews**: Fox News and Fox Business Video - **FoxSports** - **france2.fr:generation-quoi** - **FranceCulture** @@ -173,7 +188,9 @@ - **Freesound** - **freespeech.org** - **FreeVideo** + - **Funimation** - **FunnyOrDie** + - **GameInformer** - **Gamekings** - **GameOne** - **gameone:playlist** @@ -189,10 +206,10 @@ - **Giga** - **Glide**: Glide mobile video messages (glide.me) - **Globo** + - **GloboArticle** - **GodTube** - **GoldenMoustache** - **Golem** - - **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net - **Goshgay** - **Groupon** - **Hark** @@ -206,7 +223,6 @@ - **hitbox** - **hitbox:live** - **HornBunny** - - **HostingBulk** - **HotNewHipHop** - **Howcast** - **HowStuffWorks** @@ -217,13 +233,17 @@ - **imdb**: Internet Movie Database trailers - **imdb:list**: Internet Movie Database lists - **Imgur** + - **ImgurAlbum** - **Ina** + - **Indavideo** + - **IndavideoEmbed** - **InfoQ** - **Instagram** - **instagram:user**: Instagram user profile - **InternetVideoArchive** - **IPrima** - **iqiyi**: 爱奇艺 + - **Ir90Tv** - **ivi**: ivi.ru - **ivi:compilation**: ivi.ru compilations - **Izlesene** @@ -252,12 +272,16 @@ - **kuwo:song**: 酷我音乐 - **la7.tv** - **Laola1Tv** + - **Lecture2Go** - **Letv**: 乐视网 - **LetvPlaylist** - **LetvTv** - **Libsyn** - **life:embed** - **lifenews**: LIFE | NEWS + - **limelight** + - **limelight:channel** + - **limelight:channel_list** - **LiveLeak** - **livestream** - **livestream:original** @@ -269,16 +293,15 @@ - **macgamestore**: MacGameStore trailers - **mailru**: Видео@Mail.Ru - **Malemotion** - - **MDR** + - **MDR**: MDR.DE and KiKA - **media.ccc.de** - - **MegaVideoz** - **metacafe** - **Metacritic** - **Mgoon** - **Minhateca** - **MinistryGrid** - **miomio.tv** - - **mitele.es** + - **MiTele**: mitele.es - **mixcloud** - **MLB** - **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net @@ -292,22 +315,22 @@ - **MovieClips** - **MovieFap** - **Moviezine** - - **movshare**: MovShare - **MPORA** + - **MSNBC** - **MTV** + - **mtv.de** - **mtviggy.com** - **mtvservices:embedded** - **MuenchenTV**: münchen.tv - **MusicPlayOn** - - **MusicVault** - **muzu.tv** + - **Mwave** - **MySpace** - **MySpace:album** - **MySpass** - **Myvi** - **myvideo** - **MyVidster** - - **N-JOY** - **n-tv.de** - **NationalGeographic** - **Naver** @@ -316,7 +339,9 @@ - **NBCNews** - **NBCSports** - **NBCSportsVPlayer** - - **ndr**: NDR.de - Mediathek + - **ndr**: NDR.de - Norddeutscher Rundfunk + - **ndr:embed** + - **ndr:embed:base** - **NDTV** - **NerdCubedFeed** - **Nerdist** @@ -339,16 +364,20 @@ - **nhl.com:videocenter**: NHL videocenter category - **niconico**: ニコニコ動画 - **NiconicoPlaylist** + - **njoy**: N-JOY + - **njoy:embed** - **Noco** - **Normalboots** - **NosVideo** - **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz - **novamov**: NovaMov - - **Nowness** + - **nowness** + - **nowness:playlist** + - **nowness:series** - **NowTV** + - **NowTVList** - **nowvideo**: NowVideo - **npo**: npo.nl and ntr.nl - - **npo**: npo.nl and ntr.nl - **npo.nl:live** - **npo.nl:radio** - **npo.nl:radio:fragment** @@ -366,14 +395,14 @@ - **OnionStudios** - **Ooyala** - **OoyalaExternal** - - **OpenFilm** - **orf:fm4**: radio FM4 - **orf:iptv**: iptv.ORF.at - **orf:oe1**: Radio Österreich 1 - **orf:tvthek**: ORF TVthek - **parliamentlive.tv**: UK parliament videos - **Patreon** - - **PBS** + - **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC) + - **Periscope**: Periscope - **PhilharmonieDeParis**: Philharmonie de Paris - **Phoenix** - **Photobucket** @@ -382,8 +411,11 @@ - **PlanetaPlay** - **play.fm** - **played.to** + - **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz - **Playvid** - **Playwire** + - **pluralsight** + - **pluralsight:course** - **plus.google**: Google Plus - **pluzz.francetv.fr** - **podomatic** @@ -427,6 +459,7 @@ - **rtve.es:alacarta**: RTVE a la carta - **rtve.es:infantil**: RTVE infantil - **rtve.es:live**: RTVE.es live streams + - **RTVNH** - **RUHD** - **rutube**: Rutube videos - **rutube:channel**: Rutube channels @@ -450,9 +483,12 @@ - **ServingSys** - **Sexu** - **SexyKarma**: Sexy Karma and Watch Indian Porn - - **Shared** + - **Shahid** + - **Shared**: shared.sx and vivo.sx - **ShareSix** - **Sina** + - **skynewsarabia:video** + - **skynewsarabia:video** - **Slideshare** - **Slutload** - **smotri**: Smotri.com @@ -467,6 +503,7 @@ - **soompi:show** - **soundcloud** - **soundcloud:playlist** + - **soundcloud:search**: Soundcloud search - **soundcloud:set** - **soundcloud:user** - **soundgasm** @@ -493,6 +530,7 @@ - **SSA** - **stanfordoc**: Stanford Open ClassRoom - **Steam** + - **Stitcher** - **streamcloud.eu** - **StreamCZ** - **StreetVoice** @@ -514,7 +552,8 @@ - **techtv.mit.edu** - **ted** - **TeleBruxelles** - - **telecinco.es** + - **Telecinco**: telecinco.es, cuatro.com and mediaset.es + - **Telegraaf** - **TeleMB** - **TeleTask** - **TenPlay** @@ -522,6 +561,7 @@ - **TF1** - **TheOnion** - **ThePlatform** + - **ThePlatformFeed** - **TheSixtyOne** - **ThisAmericanLife** - **ThisAV** @@ -564,7 +604,8 @@ - **twitch:stream** - **twitch:video** - **twitch:vod** - - **TwitterCard** + - **twitter** + - **twitter:card** - **Ubu** - **udemy** - **udemy:course** @@ -587,10 +628,8 @@ - **Viddler** - **video.google:search**: Google Video search - **video.mit.edu** - - **VideoBam** - **VideoDetective** - **videofy.me** - - **videolectures.net** - **VideoMega** - **VideoPremium** - **VideoTt**: video.tt - Your True Tube @@ -600,6 +639,7 @@ - **vier** - **vier:videos** - **Viewster** + - **Viidea** - **viki** - **viki:channel** - **vimeo** @@ -615,9 +655,11 @@ - **vine:user** - **vk**: VK - **vk:uservideos**: VK - User's Videos + - **vlive** - **Vodlocker** - **VoiceRepublic** - **Vporn** + - **vpro**: npo.nl and ntr.nl - **VRT** - **vube**: Vube.com - **VuClip** @@ -632,6 +674,7 @@ - **WebOfStories** - **WebOfStoriesPlaylist** - **Weibo** + - **wholecloud**: WholeCloud - **Wimp** - **Wistia** - **WNL** @@ -640,6 +683,7 @@ - **WSJ**: Wall Street Journal - **XBef** - **XboxClips** + - **XFileShare**: XFileShare based sites: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net, filehoot.com and vidto.me - **XHamster** - **XHamsterEmbed** - **XMinus** @@ -674,6 +718,7 @@ - **youtube:show**: YouTube.com (multi-season) shows - **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication) - **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword) + - **youtube:user:playlists**: YouTube.com user playlists - **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication) - **Zapiks** - **ZDF** @@ -28,7 +28,7 @@ py2exe_options = { "compressed": 1, "optimize": 2, "dist_dir": '.', - "dll_excludes": ['w9xpopen.exe'], + "dll_excludes": ['w9xpopen.exe', 'crypt32.dll'], } py2exe_console = [{ diff --git a/test/helper.py b/test/helper.py index e1129e58f..bdd7acca4 100644 --- a/test/helper.py +++ b/test/helper.py @@ -89,66 +89,81 @@ def gettestcases(include_onlymatching=False): md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() -def expect_info_dict(self, got_dict, expected_dict): +def expect_value(self, got, expected, field): + if isinstance(expected, compat_str) and expected.startswith('re:'): + match_str = expected[len('re:'):] + match_rex = re.compile(match_str) + + self.assertTrue( + isinstance(got, compat_str), + 'Expected a %s object, but got %s for field %s' % ( + compat_str.__name__, type(got).__name__, field)) + self.assertTrue( + match_rex.match(got), + 'field %s (value: %r) should match %r' % (field, got, match_str)) + elif isinstance(expected, compat_str) and expected.startswith('startswith:'): + start_str = expected[len('startswith:'):] + self.assertTrue( + isinstance(got, compat_str), + 'Expected a %s object, but got %s for field %s' % ( + compat_str.__name__, type(got).__name__, field)) + self.assertTrue( + got.startswith(start_str), + 'field %s (value: %r) should start with %r' % (field, got, start_str)) + elif isinstance(expected, compat_str) and expected.startswith('contains:'): + contains_str = expected[len('contains:'):] + self.assertTrue( + isinstance(got, compat_str), + 'Expected a %s object, but got %s for field %s' % ( + compat_str.__name__, type(got).__name__, field)) + self.assertTrue( + contains_str in got, + 'field %s (value: %r) should contain %r' % (field, got, contains_str)) + elif isinstance(expected, type): + self.assertTrue( + isinstance(got, expected), + 'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got))) + elif isinstance(expected, dict) and isinstance(got, dict): + expect_dict(self, got, expected) + elif isinstance(expected, list) and isinstance(got, list): + self.assertEqual( + len(expected), len(got), + 'Expect a list of length %d, but got a list of length %d for field %s' % ( + len(expected), len(got), field)) + for index, (item_got, item_expected) in enumerate(zip(got, expected)): + type_got = type(item_got) + type_expected = type(item_expected) + self.assertEqual( + type_expected, type_got, + 'Type mismatch for list item at index %d for field %s, expected %r, got %r' % ( + index, field, type_expected, type_got)) + expect_value(self, item_got, item_expected, field) + else: + if isinstance(expected, compat_str) and expected.startswith('md5:'): + got = 'md5:' + md5(got) + elif isinstance(expected, compat_str) and expected.startswith('mincount:'): + self.assertTrue( + isinstance(got, (list, dict)), + 'Expected field %s to be a list or a dict, but it is of type %s' % ( + field, type(got).__name__)) + expected_num = int(expected.partition(':')[2]) + assertGreaterEqual( + self, len(got), expected_num, + 'Expected %d items in field %s, but only got %d' % (expected_num, field, len(got))) + return + self.assertEqual( + expected, got, + 'Invalid value for field %s, expected %r, got %r' % (field, expected, got)) + + +def expect_dict(self, got_dict, expected_dict): for info_field, expected in expected_dict.items(): - if isinstance(expected, compat_str) and expected.startswith('re:'): - got = got_dict.get(info_field) - match_str = expected[len('re:'):] - match_rex = re.compile(match_str) + got = got_dict.get(info_field) + expect_value(self, got, expected, info_field) - self.assertTrue( - isinstance(got, compat_str), - 'Expected a %s object, but got %s for field %s' % ( - compat_str.__name__, type(got).__name__, info_field)) - self.assertTrue( - match_rex.match(got), - 'field %s (value: %r) should match %r' % (info_field, got, match_str)) - elif isinstance(expected, compat_str) and expected.startswith('startswith:'): - got = got_dict.get(info_field) - start_str = expected[len('startswith:'):] - self.assertTrue( - isinstance(got, compat_str), - 'Expected a %s object, but got %s for field %s' % ( - compat_str.__name__, type(got).__name__, info_field)) - self.assertTrue( - got.startswith(start_str), - 'field %s (value: %r) should start with %r' % (info_field, got, start_str)) - elif isinstance(expected, compat_str) and expected.startswith('contains:'): - got = got_dict.get(info_field) - contains_str = expected[len('contains:'):] - self.assertTrue( - isinstance(got, compat_str), - 'Expected a %s object, but got %s for field %s' % ( - compat_str.__name__, type(got).__name__, info_field)) - self.assertTrue( - contains_str in got, - 'field %s (value: %r) should contain %r' % (info_field, got, contains_str)) - elif isinstance(expected, type): - got = got_dict.get(info_field) - self.assertTrue(isinstance(got, expected), - 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) - else: - if isinstance(expected, compat_str) and expected.startswith('md5:'): - got = 'md5:' + md5(got_dict.get(info_field)) - elif isinstance(expected, compat_str) and expected.startswith('mincount:'): - got = got_dict.get(info_field) - self.assertTrue( - isinstance(got, list), - 'Expected field %s to be a list, but it is of type %s' % ( - info_field, type(got).__name__)) - expected_num = int(expected.partition(':')[2]) - assertGreaterEqual( - self, len(got), expected_num, - 'Expected %d items in field %s, but only got %d' % ( - expected_num, info_field, len(got) - ) - ) - continue - else: - got = got_dict.get(info_field) - self.assertEqual(expected, got, - 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) +def expect_info_dict(self, got_dict, expected_dict): + expect_dict(self, got_dict, expected_dict) # Check for the presence of mandatory fields if got_dict.get('_type') not in ('playlist', 'multi_video'): for key in ('id', 'url', 'title', 'ext'): @@ -160,7 +175,7 @@ def expect_info_dict(self, got_dict, expected_dict): # Are checkable fields missing from the test case definition? test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) for key, value in got_dict.items() - if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) + if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit')) missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) if missing_keys: def _repr(v): diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py index be8d12997..938466a80 100644 --- a/test/test_InfoExtractor.py +++ b/test/test_InfoExtractor.py @@ -35,10 +35,18 @@ class TestInfoExtractor(unittest.TestCase): <meta name="og:title" content='Foo'/> <meta content="Some video's description " name="og:description"/> <meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/> + <meta content='application/x-shockwave-flash' property='og:video:type'> + <meta content='Foo' property=og:foobar> + <meta name="og:test1" content='foo > < bar'/> + <meta name="og:test2" content="foo >//< bar"/> ''' self.assertEqual(ie._og_search_title(html), 'Foo') self.assertEqual(ie._og_search_description(html), 'Some video\'s description ') self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2') + self.assertEqual(ie._og_search_video_url(html, default=None), None) + self.assertEqual(ie._og_search_property('foobar', html), 'Foo') + self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar') + self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar') def test_html_search_meta(self): ie = self.ie diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index a13c09ef4..0388c0bf3 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -15,7 +15,7 @@ from youtube_dl import YoutubeDL from youtube_dl.compat import compat_str from youtube_dl.extractor import YoutubeIE from youtube_dl.postprocessor.common import PostProcessor -from youtube_dl.utils import match_filter_func +from youtube_dl.utils import ExtractorError, match_filter_func TEST_URL = 'http://localhost/sample.mp4' @@ -105,6 +105,7 @@ class TestFormatSelection(unittest.TestCase): def test_format_selection(self): formats = [ {'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL}, + {'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL}, {'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL}, {'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL}, {'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL}, @@ -136,6 +137,11 @@ class TestFormatSelection(unittest.TestCase): downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], '35') + ydl = YDL({'format': 'example-with-dashes'}) + ydl.process_ie_result(info_dict.copy()) + downloaded = ydl.downloaded_info_dicts[0] + self.assertEqual(downloaded['format_id'], 'example-with-dashes') + def test_format_selection_audio(self): formats = [ {'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL}, @@ -229,21 +235,70 @@ class TestFormatSelection(unittest.TestCase): '141', '172', '140', '171', '139', ] - for f1id, f2id in zip(order, order[1:]): - f1 = YoutubeIE._formats[f1id].copy() - f1['format_id'] = f1id - f1['url'] = 'url:' + f1id - f2 = YoutubeIE._formats[f2id].copy() - f2['format_id'] = f2id - f2['url'] = 'url:' + f2id + def format_info(f_id): + info = YoutubeIE._formats[f_id].copy() + info['format_id'] = f_id + info['url'] = 'url:' + f_id + return info + formats_order = [format_info(f_id) for f_id in order] + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': 'bestvideo+bestaudio'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded = ydl.downloaded_info_dicts[0] + self.assertEqual(downloaded['format_id'], '137+141') + self.assertEqual(downloaded['ext'], 'mp4') + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded = ydl.downloaded_info_dicts[0] + self.assertEqual(downloaded['format_id'], '38') + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': 'bestvideo/best,bestaudio'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] + self.assertEqual(downloaded_ids, ['137', '141']) + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] + self.assertEqual(downloaded_ids, ['137+141', '248+141']) + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] + self.assertEqual(downloaded_ids, ['136+141', '247+141']) + + info_dict = _make_result(list(formats_order), extractor='youtube') + ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'}) + yie = YoutubeIE(ydl) + yie._sort_formats(info_dict['formats']) + ydl.process_ie_result(info_dict) + downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] + self.assertEqual(downloaded_ids, ['248+141']) + for f1, f2 in zip(formats_order, formats_order[1:]): info_dict = _make_result([f1, f2], extractor='youtube') ydl = YDL({'format': 'best/bestvideo'}) yie = YoutubeIE(ydl) yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] - self.assertEqual(downloaded['format_id'], f1id) + self.assertEqual(downloaded['format_id'], f1['format_id']) info_dict = _make_result([f2, f1], extractor='youtube') ydl = YDL({'format': 'best/bestvideo'}) @@ -251,7 +306,18 @@ class TestFormatSelection(unittest.TestCase): yie._sort_formats(info_dict['formats']) ydl.process_ie_result(info_dict) downloaded = ydl.downloaded_info_dicts[0] - self.assertEqual(downloaded['format_id'], f1id) + self.assertEqual(downloaded['format_id'], f1['format_id']) + + def test_invalid_format_specs(self): + def assert_syntax_error(format_spec): + ydl = YDL({'format': format_spec}) + info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}]) + self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict) + + assert_syntax_error('bestvideo,,best') + assert_syntax_error('+bestaudio') + assert_syntax_error('bestvideo+') + assert_syntax_error('/') def test_format_filtering(self): formats = [ @@ -308,6 +374,18 @@ class TestFormatSelection(unittest.TestCase): downloaded = ydl.downloaded_info_dicts[0] self.assertEqual(downloaded['format_id'], 'G') + ydl = YDL({'format': 'all[width>=400][width<=600]'}) + ydl.process_ie_result(info_dict) + downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts] + self.assertEqual(downloaded_ids, ['B', 'C', 'D']) + + ydl = YDL({'format': 'best[height<40]'}) + try: + ydl.process_ie_result(info_dict) + except ExtractorError: + pass + self.assertEqual(ydl.downloaded_info_dicts, []) + class TestYoutubeDL(unittest.TestCase): def test_subtitles(self): diff --git a/test/test_all_urls.py b/test/test_all_urls.py index a9db42b30..a0c11e6c1 100644 --- a/test/test_all_urls.py +++ b/test/test_all_urls.py @@ -121,8 +121,8 @@ class TestAllURLsMatching(unittest.TestCase): def test_pbs(self): # https://github.com/rg3/youtube-dl/issues/2350 - self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS']) - self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS']) + self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs']) + self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs']) def test_yahoo_https(self): # https://github.com/rg3/youtube-dl/issues/2701 diff --git a/test/test_compat.py b/test/test_compat.py index c3ba8ad2e..b6bfad05e 100644 --- a/test/test_compat.py +++ b/test/test_compat.py @@ -13,7 +13,10 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from youtube_dl.utils import get_filesystem_encoding from youtube_dl.compat import ( compat_getenv, + compat_etree_fromstring, compat_expanduser, + compat_shlex_split, + compat_str, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, ) @@ -67,5 +70,23 @@ class TestCompat(unittest.TestCase): self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def') + def test_compat_shlex_split(self): + self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two']) + + def test_compat_etree_fromstring(self): + xml = ''' + <root foo="bar" spam="中文"> + <normal>foo</normal> + <chinese>中文</chinese> + <foo><bar>spam</bar></foo> + </root> + ''' + doc = compat_etree_fromstring(xml.encode('utf-8')) + self.assertTrue(isinstance(doc.attrib['foo'], compat_str)) + self.assertTrue(isinstance(doc.attrib['spam'], compat_str)) + self.assertTrue(isinstance(doc.find('normal').text, compat_str)) + self.assertTrue(isinstance(doc.find('chinese').text, compat_str)) + self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str)) + if __name__ == '__main__': unittest.main() diff --git a/test/test_download.py b/test/test_download.py index 1110357a7..a3f1c0644 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -102,7 +102,7 @@ def generator(test_case): params = get_params(test_case.get('params', {})) if is_playlist and 'playlist' not in test_case: - params.setdefault('extract_flat', True) + params.setdefault('extract_flat', 'in_playlist') params.setdefault('skip_download', True) ydl = YoutubeDL(params, auto_init=False) @@ -136,7 +136,9 @@ def generator(test_case): # We're not using .download here sine that is just a shim # for outside error handling, and returns the exit code # instead of the result dict. - res_dict = ydl.extract_info(test_case['url']) + res_dict = ydl.extract_info( + test_case['url'], + force_generic_extractor=params.get('force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): diff --git a/test/test_jsinterp.py b/test/test_jsinterp.py index fc73e5dc2..63c350b8f 100644 --- a/test/test_jsinterp.py +++ b/test/test_jsinterp.py @@ -19,6 +19,9 @@ class TestJSInterpreter(unittest.TestCase): jsi = JSInterpreter('function x3(){return 42;}') self.assertEqual(jsi.call_function('x3'), 42) + jsi = JSInterpreter('var x5 = function(){return 42;}') + self.assertEqual(jsi.call_function('x5'), 42) + def test_calc(self): jsi = JSInterpreter('function x4(a){return 2*a+1;}') self.assertEqual(jsi.call_function('x4', 3), 7) diff --git a/test/test_subtitles.py b/test/test_subtitles.py index c4e3adb67..75f0ea75f 100644 --- a/test/test_subtitles.py +++ b/test/test_subtitles.py @@ -25,8 +25,10 @@ from youtube_dl.extractor import ( RaiIE, VikiIE, ThePlatformIE, + ThePlatformFeedIE, RTVEALaCartaIE, FunnyOrDieIE, + DemocracynowIE, ) @@ -307,6 +309,18 @@ class TestThePlatformSubtitles(BaseTestSubtitles): self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b') +class TestThePlatformFeedSubtitles(BaseTestSubtitles): + url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207' + IE = ThePlatformFeedIE + + def test_allsubtitles(self): + self.DL.params['writesubtitles'] = True + self.DL.params['allsubtitles'] = True + subtitles = self.getSubtitles() + self.assertEqual(set(subtitles.keys()), set(['en'])) + self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade') + + class TestRtveSubtitles(BaseTestSubtitles): url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/' IE = RTVEALaCartaIE @@ -333,5 +347,25 @@ class TestFunnyOrDieSubtitles(BaseTestSubtitles): self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4') +class TestDemocracynowSubtitles(BaseTestSubtitles): + url = 'http://www.democracynow.org/shows/2015/7/3' + IE = DemocracynowIE + + def test_allsubtitles(self): + self.DL.params['writesubtitles'] = True + self.DL.params['allsubtitles'] = True + subtitles = self.getSubtitles() + self.assertEqual(set(subtitles.keys()), set(['en'])) + self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c') + + def test_subtitles_in_page(self): + self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree' + self.DL.params['writesubtitles'] = True + self.DL.params['allsubtitles'] = True + subtitles = self.getSubtitles() + self.assertEqual(set(subtitles.keys()), set(['en'])) + self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c') + + if __name__ == '__main__': unittest.main() diff --git a/test/test_utils.py b/test/test_utils.py index 65692a9fb..1c3290d9b 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -21,6 +21,8 @@ from youtube_dl.utils import ( clean_html, DateRange, detect_exe_version, + determine_ext, + encode_compat_str, encodeFilename, escape_rfc3986, escape_url, @@ -42,6 +44,7 @@ from youtube_dl.utils import ( sanitize_path, prepend_extension, replace_extension, + remove_quotes, shell_quote, smuggle_url, str_to_int, @@ -57,11 +60,19 @@ from youtube_dl.utils import ( urlencode_postdata, version_tuple, xpath_with_ns, + xpath_element, xpath_text, + xpath_attr, render_table, match_str, parse_dfxp_time_expr, dfxp2srt, + cli_option, + cli_valueless_option, + cli_bool_option, +) +from youtube_dl.compat import ( + compat_etree_fromstring, ) @@ -191,6 +202,15 @@ class TestUtil(unittest.TestCase): self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') + def test_remove_quotes(self): + self.assertEqual(remove_quotes(None), None) + self.assertEqual(remove_quotes('"'), '"') + self.assertEqual(remove_quotes("'"), "'") + self.assertEqual(remove_quotes(';'), ';') + self.assertEqual(remove_quotes('";'), '";') + self.assertEqual(remove_quotes('""'), '') + self.assertEqual(remove_quotes('";"'), ';') + def test_ordered_set(self): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) @@ -202,8 +222,8 @@ class TestUtil(unittest.TestCase): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('/'), '/') self.assertEqual(unescapeHTML('/'), '/') - self.assertEqual( - unescapeHTML('é'), 'é') + self.assertEqual(unescapeHTML('é'), 'é') + self.assertEqual(unescapeHTML('�'), '�') def test_daterange(self): _20century = DateRange("19000101", "20000101") @@ -228,6 +248,14 @@ class TestUtil(unittest.TestCase): unified_strdate('2/2/2015 6:47:40 PM', day_first=False), '20150202') self.assertEqual(unified_strdate('25-09-2014'), '20140925') + self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None) + + def test_determine_ext(self): + self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4') + self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None) + self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None) + self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None) + self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8') def test_find_xpath_attr(self): testxml = '''<root> @@ -235,12 +263,21 @@ class TestUtil(unittest.TestCase): <node x="a"/> <node x="a" y="c" /> <node x="b" y="d" /> + <node x="" /> </root>''' - doc = xml.etree.ElementTree.fromstring(testxml) + doc = compat_etree_fromstring(testxml) + self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None) + self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None) + self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None) + self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1]) + self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3]) + self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2]) + self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3]) + self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4]) def test_xpath_with_ns(self): testxml = '''<root xmlns:media="http://example.com/"> @@ -249,23 +286,56 @@ class TestUtil(unittest.TestCase): <url>http://server.com/download.mp3</url> </media:song> </root>''' - doc = xml.etree.ElementTree.fromstring(testxml) + doc = compat_etree_fromstring(testxml) find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'})) self.assertTrue(find('media:song') is not None) self.assertEqual(find('media:song/media:author').text, 'The Author') self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') + def test_xpath_element(self): + doc = xml.etree.ElementTree.Element('root') + div = xml.etree.ElementTree.SubElement(doc, 'div') + p = xml.etree.ElementTree.SubElement(div, 'p') + p.text = 'Foo' + self.assertEqual(xpath_element(doc, 'div/p'), p) + self.assertEqual(xpath_element(doc, ['div/p']), p) + self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p) + self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default') + self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default') + self.assertTrue(xpath_element(doc, 'div/bar') is None) + self.assertTrue(xpath_element(doc, ['div/bar']) is None) + self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None) + self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True) + self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True) + self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True) + def test_xpath_text(self): testxml = '''<root> <div> <p>Foo</p> </div> </root>''' - doc = xml.etree.ElementTree.fromstring(testxml) + doc = compat_etree_fromstring(testxml) self.assertEqual(xpath_text(doc, 'div/p'), 'Foo') + self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default') self.assertTrue(xpath_text(doc, 'div/bar') is None) self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True) + def test_xpath_attr(self): + testxml = '''<root> + <div> + <p x="a">Foo</p> + </div> + </root>''' + doc = compat_etree_fromstring(testxml) + self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a') + self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None) + self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None) + self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default') + self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default') + self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True) + self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True) + def test_smuggle_url(self): data = {"ö": "ö", "abc": [3]} url = 'https://foo.bar/baz?x=y#a' @@ -380,11 +450,17 @@ class TestUtil(unittest.TestCase): data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'}) self.assertTrue(isinstance(data, bytes)) + def test_encode_compat_str(self): + self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест') + self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест') + def test_parse_iso8601(self): self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266) + self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251) + self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None) def test_strip_jsonp(self): stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);') @@ -455,6 +531,9 @@ class TestUtil(unittest.TestCase): "playlist":[{"controls":{"all":null}}] }''') + inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"''' + self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''') + inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"' json_code = js_to_json(inp) self.assertEqual(json.loads(json_code), json.loads(inp)) @@ -587,12 +666,13 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4') {'like_count': 190, 'dislike_count': 10})) def test_parse_dfxp_time_expr(self): - self.assertEqual(parse_dfxp_time_expr(None), 0.0) - self.assertEqual(parse_dfxp_time_expr(''), 0.0) + self.assertEqual(parse_dfxp_time_expr(None), None) + self.assertEqual(parse_dfxp_time_expr(''), None) self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1) self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1) self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0) self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1) + self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1) def test_dfxp2srt(self): dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?> @@ -602,6 +682,9 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4') <p begin="0" end="1">The following line contains Chinese characters and special symbols</p> <p begin="1" end="2">第二行<br/>♪♪</p> <p begin="2" dur="1"><span>Third<br/>Line</span></p> + <p begin="3" end="-1">Lines with invalid timestamps are ignored</p> + <p begin="-1" end="-1">Ignore, two</p> + <p begin="3" dur="-1">Ignored, three</p> </div> </body> </tt>''' @@ -637,6 +720,51 @@ The first line ''' self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data) + def test_cli_option(self): + self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128']) + self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), []) + self.assertEqual(cli_option({}, '--proxy', 'proxy'), []) + + def test_cli_valueless_option(self): + self.assertEqual(cli_valueless_option( + {'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader']) + self.assertEqual(cli_valueless_option( + {'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), []) + self.assertEqual(cli_valueless_option( + {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate']) + self.assertEqual(cli_valueless_option( + {'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), []) + self.assertEqual(cli_valueless_option( + {'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), []) + self.assertEqual(cli_valueless_option( + {'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate']) + + def test_cli_bool_option(self): + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), + ['--no-check-certificate', 'true']) + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='), + ['--no-check-certificate=true']) + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'), + ['--check-certificate', 'false']) + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='), + ['--check-certificate=false']) + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'), + ['--check-certificate', 'true']) + self.assertEqual( + cli_bool_option( + {'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='), + ['--check-certificate=true']) + if __name__ == '__main__': unittest.main() diff --git a/test/test_write_annotations.py b/test/test_write_annotations.py index 780636c77..84b8f39e0 100644 --- a/test/test_write_annotations.py +++ b/test/test_write_annotations.py @@ -33,7 +33,7 @@ params = get_params({ TEST_ID = 'gr51aVj-mLg' -ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml' +ANNOTATIONS_FILE = TEST_ID + '.annotations.xml' EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label'] diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index c889b6f15..26aadb34f 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase): entries = result['entries'] self.assertEqual(len(entries), 100) + def test_youtube_flat_playlist_titles(self): + dl = FakeYDL() + dl.params['extract_flat'] = True + ie = YoutubePlaylistIE(dl) + result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') + self.assertIsPlaylist(result) + for entry in result['entries']: + self.assertTrue(entry.get('title')) + if __name__ == '__main__': unittest.main() @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,py33,py34 +envlist = py26,py27,py33,py34,py35 [testenv] deps = nose diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 702a6ad50..50425b8d7 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -21,6 +21,7 @@ import subprocess import socket import sys import time +import tokenize import traceback if os.name == 'nt': @@ -34,22 +35,24 @@ from .compat import ( compat_http_client, compat_kwargs, compat_str, + compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, + compat_urllib_request_DataHandler, ) from .utils import ( - escape_url, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, DownloadError, + encode_compat_str, encodeFilename, + error_to_compat_str, ExtractorError, format_bytes, formatSeconds, - HEADRequest, locked_file, make_HTTPS_handler, MaxDownloadsReached, @@ -63,6 +66,7 @@ from .utils import ( SameFileError, sanitize_filename, sanitize_path, + sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, @@ -70,6 +74,7 @@ from .utils import ( version_tuple, write_json_file, write_string, + YoutubeDLCookieProcessor, YoutubeDLHandler, prepend_extension, replace_extension, @@ -155,7 +160,7 @@ class YoutubeDL(object): writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file - writeautomaticsub: Write the automatic subtitles to a file + writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video @@ -285,7 +290,11 @@ class YoutubeDL(object): self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr - self.params = params + self.params = { + # Default parameters + 'nocheckcertificate': False, + } + self.params.update(params) self.cache = Cache(self) if params.get('bidi_workaround', False): @@ -488,7 +497,7 @@ class YoutubeDL(object): tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) - tb += compat_str(traceback.format_exc()) + tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) @@ -567,7 +576,7 @@ class YoutubeDL(object): if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) - outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) + outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 @@ -575,7 +584,7 @@ class YoutubeDL(object): # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) - return filename + return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None @@ -667,14 +676,14 @@ class YoutubeDL(object): return self.process_ie_result(ie_result, download, extra_info) else: return ie_result - except ExtractorError as de: # An error we somewhat expected - self.report_error(compat_str(de), de.format_traceback()) + except ExtractorError as e: # An error we somewhat expected + self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): - self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) + self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise @@ -828,6 +837,7 @@ class YoutubeDL(object): extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results + self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( @@ -853,8 +863,8 @@ class YoutubeDL(object): else: raise Exception('Invalid result type: %s' % result_type) - def _apply_format_filter(self, format_spec, available_formats): - " Returns a tuple of the remaining format_spec and filtered formats " + def _build_format_filter(self, filter_spec): + " Returns a function to filter the formats according to the filter_spec " OPERATORS = { '<': operator.lt, @@ -864,13 +874,13 @@ class YoutubeDL(object): '=': operator.eq, '!=': operator.ne, } - operator_rex = re.compile(r'''(?x)\s*\[ + operator_rex = re.compile(r'''(?x)\s* (?P<key>width|height|tbr|abr|vbr|asr|filesize|fps) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?) - \]$ + $ ''' % '|'.join(map(re.escape, OPERATORS.keys()))) - m = operator_rex.search(format_spec) + m = operator_rex.search(filter_spec) if m: try: comparison_value = int(m.group('value')) @@ -881,7 +891,7 @@ class YoutubeDL(object): if comparison_value is None: raise ValueError( 'Invalid value %r in format specification %r' % ( - m.group('value'), format_spec)) + m.group('value'), filter_spec)) op = OPERATORS[m.group('op')] if not m: @@ -889,85 +899,289 @@ class YoutubeDL(object): '=': operator.eq, '!=': operator.ne, } - str_operator_rex = re.compile(r'''(?x)\s*\[ + str_operator_rex = re.compile(r'''(?x) \s*(?P<key>ext|acodec|vcodec|container|protocol) \s*(?P<op>%s)(?P<none_inclusive>\s*\?)? \s*(?P<value>[a-zA-Z0-9_-]+) - \s*\]$ + \s*$ ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) - m = str_operator_rex.search(format_spec) + m = str_operator_rex.search(filter_spec) if m: comparison_value = m.group('value') op = STR_OPERATORS[m.group('op')] if not m: - raise ValueError('Invalid format specification %r' % format_spec) + raise ValueError('Invalid filter specification %r' % filter_spec) def _filter(f): actual_value = f.get(m.group('key')) if actual_value is None: return m.group('none_inclusive') return op(actual_value, comparison_value) - new_formats = [f for f in available_formats if _filter(f)] + return _filter + + def build_format_selector(self, format_spec): + def syntax_error(note, start): + message = ( + 'Invalid format specification: ' + '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) + return SyntaxError(message) + + PICKFIRST = 'PICKFIRST' + MERGE = 'MERGE' + SINGLE = 'SINGLE' + GROUP = 'GROUP' + FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters']) + + def _parse_filter(tokens): + filter_parts = [] + for type, string, start, _, _ in tokens: + if type == tokenize.OP and string == ']': + return ''.join(filter_parts) + else: + filter_parts.append(string) + + def _remove_unused_ops(tokens): + # Remove operators that we don't use and join them with the surrounding strings + # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' + ALLOWED_OPS = ('/', '+', ',', '(', ')') + last_string, last_start, last_end, last_line = None, None, None, None + for type, string, start, end, line in tokens: + if type == tokenize.OP and string == '[': + if last_string: + yield tokenize.NAME, last_string, last_start, last_end, last_line + last_string = None + yield type, string, start, end, line + # everything inside brackets will be handled by _parse_filter + for type, string, start, end, line in tokens: + yield type, string, start, end, line + if type == tokenize.OP and string == ']': + break + elif type == tokenize.OP and string in ALLOWED_OPS: + if last_string: + yield tokenize.NAME, last_string, last_start, last_end, last_line + last_string = None + yield type, string, start, end, line + elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]: + if not last_string: + last_string = string + last_start = start + last_end = end + else: + last_string += string + if last_string: + yield tokenize.NAME, last_string, last_start, last_end, last_line + + def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False): + selectors = [] + current_selector = None + for type, string, start, _, _ in tokens: + # ENCODING is only defined in python 3.x + if type == getattr(tokenize, 'ENCODING', None): + continue + elif type in [tokenize.NAME, tokenize.NUMBER]: + current_selector = FormatSelector(SINGLE, string, []) + elif type == tokenize.OP: + if string == ')': + if not inside_group: + # ')' will be handled by the parentheses group + tokens.restore_last_token() + break + elif inside_merge and string in ['/', ',']: + tokens.restore_last_token() + break + elif inside_choice and string == ',': + tokens.restore_last_token() + break + elif string == ',': + if not current_selector: + raise syntax_error('"," must follow a format selector', start) + selectors.append(current_selector) + current_selector = None + elif string == '/': + if not current_selector: + raise syntax_error('"/" must follow a format selector', start) + first_choice = current_selector + second_choice = _parse_format_selection(tokens, inside_choice=True) + current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), []) + elif string == '[': + if not current_selector: + current_selector = FormatSelector(SINGLE, 'best', []) + format_filter = _parse_filter(tokens) + current_selector.filters.append(format_filter) + elif string == '(': + if current_selector: + raise syntax_error('Unexpected "("', start) + group = _parse_format_selection(tokens, inside_group=True) + current_selector = FormatSelector(GROUP, group, []) + elif string == '+': + video_selector = current_selector + audio_selector = _parse_format_selection(tokens, inside_merge=True) + if not video_selector or not audio_selector: + raise syntax_error('"+" must be between two format selectors', start) + current_selector = FormatSelector(MERGE, (video_selector, audio_selector), []) + else: + raise syntax_error('Operator not recognized: "{0}"'.format(string), start) + elif type == tokenize.ENDMARKER: + break + if current_selector: + selectors.append(current_selector) + return selectors + + def _build_selector_function(selector): + if isinstance(selector, list): + fs = [_build_selector_function(s) for s in selector] + + def selector_function(formats): + for f in fs: + for format in f(formats): + yield format + return selector_function + elif selector.type == GROUP: + selector_function = _build_selector_function(selector.selector) + elif selector.type == PICKFIRST: + fs = [_build_selector_function(s) for s in selector.selector] + + def selector_function(formats): + for f in fs: + picked_formats = list(f(formats)) + if picked_formats: + return picked_formats + return [] + elif selector.type == SINGLE: + format_spec = selector.selector + + def selector_function(formats): + formats = list(formats) + if not formats: + return + if format_spec == 'all': + for f in formats: + yield f + elif format_spec in ['best', 'worst', None]: + format_idx = 0 if format_spec == 'worst' else -1 + audiovideo_formats = [ + f for f in formats + if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] + if audiovideo_formats: + yield audiovideo_formats[format_idx] + # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format + elif (all(f.get('acodec') != 'none' for f in formats) or + all(f.get('vcodec') != 'none' for f in formats)): + yield formats[format_idx] + elif format_spec == 'bestaudio': + audio_formats = [ + f for f in formats + if f.get('vcodec') == 'none'] + if audio_formats: + yield audio_formats[-1] + elif format_spec == 'worstaudio': + audio_formats = [ + f for f in formats + if f.get('vcodec') == 'none'] + if audio_formats: + yield audio_formats[0] + elif format_spec == 'bestvideo': + video_formats = [ + f for f in formats + if f.get('acodec') == 'none'] + if video_formats: + yield video_formats[-1] + elif format_spec == 'worstvideo': + video_formats = [ + f for f in formats + if f.get('acodec') == 'none'] + if video_formats: + yield video_formats[0] + else: + extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] + if format_spec in extensions: + filter_f = lambda f: f['ext'] == format_spec + else: + filter_f = lambda f: f['format_id'] == format_spec + matches = list(filter(filter_f, formats)) + if matches: + yield matches[-1] + elif selector.type == MERGE: + def _merge(formats_info): + format_1, format_2 = [f['format_id'] for f in formats_info] + # The first format must contain the video and the + # second the audio + if formats_info[0].get('vcodec') == 'none': + self.report_error('The first format must ' + 'contain the video, try using ' + '"-f %s+%s"' % (format_2, format_1)) + return + # Formats must be opposite (video+audio) + if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': + self.report_error( + 'Both formats %s and %s are video-only, you must specify "-f video+audio"' + % (format_1, format_2)) + return + output_ext = ( + formats_info[0]['ext'] + if self.params.get('merge_output_format') is None + else self.params['merge_output_format']) + return { + 'requested_formats': formats_info, + 'format': '%s+%s' % (formats_info[0].get('format'), + formats_info[1].get('format')), + 'format_id': '%s+%s' % (formats_info[0].get('format_id'), + formats_info[1].get('format_id')), + 'width': formats_info[0].get('width'), + 'height': formats_info[0].get('height'), + 'resolution': formats_info[0].get('resolution'), + 'fps': formats_info[0].get('fps'), + 'vcodec': formats_info[0].get('vcodec'), + 'vbr': formats_info[0].get('vbr'), + 'stretched_ratio': formats_info[0].get('stretched_ratio'), + 'acodec': formats_info[1].get('acodec'), + 'abr': formats_info[1].get('abr'), + 'ext': output_ext, + } + video_selector, audio_selector = map(_build_selector_function, selector.selector) - new_format_spec = format_spec[:-len(m.group(0))] - if not new_format_spec: - new_format_spec = 'best' + def selector_function(formats): + formats = list(formats) + for pair in itertools.product(video_selector(formats), audio_selector(formats)): + yield _merge(pair) - return (new_format_spec, new_formats) + filters = [self._build_format_filter(f) for f in selector.filters] - def select_format(self, format_spec, available_formats): - while format_spec.endswith(']'): - format_spec, available_formats = self._apply_format_filter( - format_spec, available_formats) - if not available_formats: - return None + def final_selector(formats): + for _filter in filters: + formats = list(filter(_filter, formats)) + return selector_function(formats) + return final_selector - if format_spec in ['best', 'worst', None]: - format_idx = 0 if format_spec == 'worst' else -1 - audiovideo_formats = [ - f for f in available_formats - if f.get('vcodec') != 'none' and f.get('acodec') != 'none'] - if audiovideo_formats: - return audiovideo_formats[format_idx] - # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format - elif (all(f.get('acodec') != 'none' for f in available_formats) or - all(f.get('vcodec') != 'none' for f in available_formats)): - return available_formats[format_idx] - elif format_spec == 'bestaudio': - audio_formats = [ - f for f in available_formats - if f.get('vcodec') == 'none'] - if audio_formats: - return audio_formats[-1] - elif format_spec == 'worstaudio': - audio_formats = [ - f for f in available_formats - if f.get('vcodec') == 'none'] - if audio_formats: - return audio_formats[0] - elif format_spec == 'bestvideo': - video_formats = [ - f for f in available_formats - if f.get('acodec') == 'none'] - if video_formats: - return video_formats[-1] - elif format_spec == 'worstvideo': - video_formats = [ - f for f in available_formats - if f.get('acodec') == 'none'] - if video_formats: - return video_formats[0] - else: - extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] - if format_spec in extensions: - filter_f = lambda f: f['ext'] == format_spec - else: - filter_f = lambda f: f['format_id'] == format_spec - matches = list(filter(filter_f, available_formats)) - if matches: - return matches[-1] - return None + stream = io.BytesIO(format_spec.encode('utf-8')) + try: + tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) + except tokenize.TokenError: + raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) + + class TokenIterator(object): + def __init__(self, tokens): + self.tokens = tokens + self.counter = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.counter >= len(self.tokens): + raise StopIteration() + value = self.tokens[self.counter] + self.counter += 1 + return value + + next = __next__ + + def restore_last_token(self): + self.counter -= 1 + + parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) + return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): res = std_headers.copy() @@ -983,7 +1197,7 @@ class YoutubeDL(object): return res def _calc_cookies(self, info_dict): - pr = compat_urllib_request.Request(info_dict['url']) + pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') @@ -1030,13 +1244,20 @@ class YoutubeDL(object): except (ValueError, OverflowError, OSError): pass + subtitles = info_dict.get('subtitles') + if subtitles: + for _, subtitle in subtitles.items(): + for subtitle_format in subtitle: + if 'ext' not in subtitle_format: + subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() + if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') - self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') + self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( - info_dict['id'], info_dict.get('subtitles'), + info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded @@ -1111,56 +1332,8 @@ class YoutubeDL(object): req_format_list.append('bestvideo+bestaudio') req_format_list.append('best') req_format = '/'.join(req_format_list) - formats_to_download = [] - if req_format == 'all': - formats_to_download = formats - else: - for rfstr in req_format.split(','): - # We can accept formats requested in the format: 34/5/best, we pick - # the first that is available, starting from left - req_formats = rfstr.split('/') - for rf in req_formats: - if re.match(r'.+?\+.+?', rf) is not None: - # Two formats have been requested like '137+139' - format_1, format_2 = rf.split('+') - formats_info = (self.select_format(format_1, formats), - self.select_format(format_2, formats)) - if all(formats_info): - # The first format must contain the video and the - # second the audio - if formats_info[0].get('vcodec') == 'none': - self.report_error('The first format must ' - 'contain the video, try using ' - '"-f %s+%s"' % (format_2, format_1)) - return - output_ext = ( - formats_info[0]['ext'] - if self.params.get('merge_output_format') is None - else self.params['merge_output_format']) - selected_format = { - 'requested_formats': formats_info, - 'format': '%s+%s' % (formats_info[0].get('format'), - formats_info[1].get('format')), - 'format_id': '%s+%s' % (formats_info[0].get('format_id'), - formats_info[1].get('format_id')), - 'width': formats_info[0].get('width'), - 'height': formats_info[0].get('height'), - 'resolution': formats_info[0].get('resolution'), - 'fps': formats_info[0].get('fps'), - 'vcodec': formats_info[0].get('vcodec'), - 'vbr': formats_info[0].get('vbr'), - 'stretched_ratio': formats_info[0].get('stretched_ratio'), - 'acodec': formats_info[1].get('acodec'), - 'abr': formats_info[1].get('abr'), - 'ext': output_ext, - } - else: - selected_format = None - else: - selected_format = self.select_format(rf, formats) - if selected_format is not None: - formats_to_download.append(selected_format) - break + format_selector = self.build_format_selector(req_format) + formats_to_download = list(format_selector(formats)) if not formats_to_download: raise ExtractorError('requested format not available', expected=True) @@ -1288,7 +1461,7 @@ class YoutubeDL(object): if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: - self.report_error('unable to create directory ' + compat_str(err)) + self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): @@ -1339,7 +1512,7 @@ class YoutubeDL(object): sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % - (sub_lang, compat_str(err.cause))) + (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) @@ -1708,27 +1881,8 @@ class YoutubeDL(object): def urlopen(self, req): """ Start an HTTP download """ - - # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not - # always respected by websites, some tend to give out URLs with non percent-encoded - # non-ASCII characters (see telemb.py, ard.py [#3412]) - # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) - # To work around aforementioned issue we will replace request's original URL with - # percent-encoded one - req_is_string = isinstance(req, compat_basestring) - url = req if req_is_string else req.get_full_url() - url_escaped = escape_url(url) - - # Substitute URL if any change after escaping - if url != url_escaped: - if req_is_string: - req = url_escaped - else: - req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request - req = req_type( - url_escaped, data=req.data, headers=req.headers, - origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) - + if isinstance(req, compat_basestring): + req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): @@ -1811,8 +1965,7 @@ class YoutubeDL(object): if os.access(opts_cookiefile, os.R_OK): self.cookiejar.load() - cookie_processor = compat_urllib_request.HTTPCookieProcessor( - self.cookiejar) + cookie_processor = YoutubeDLCookieProcessor(self.cookiejar) if opts_proxy is not None: if opts_proxy == '': proxies = {} @@ -1828,8 +1981,9 @@ class YoutubeDL(object): debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) + data_handler = compat_urllib_request_DataHandler() opener = compat_urllib_request.build_opener( - proxy_handler, https_handler, cookie_processor, ydlh) + proxy_handler, https_handler, cookie_processor, ydlh, data_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play @@ -1881,10 +2035,10 @@ class YoutubeDL(object): (info_dict['extractor'], info_dict['id'], thumb_display_id)) try: uf = self.urlopen(t['url']) - with open(thumb_filename, 'wb') as thumbf: + with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) self.to_screen('[%s] %s: Writing thumbnail %sto: %s' % (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % - (t['url'], compat_str(err))) + (t['url'], error_to_compat_str(err))) diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 55b22c889..9f131f5db 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -9,7 +9,6 @@ import codecs import io import os import random -import shlex import sys @@ -20,6 +19,7 @@ from .compat import ( compat_expanduser, compat_getpass, compat_print, + compat_shlex_split, workaround_optparse_bug9161, ) from .utils import ( @@ -262,10 +262,10 @@ def _real_main(argv=None): parser.error('setting filesize xattr requested but python-xattr is not available') external_downloader_args = None if opts.external_downloader_args: - external_downloader_args = shlex.split(opts.external_downloader_args) + external_downloader_args = compat_shlex_split(opts.external_downloader_args) postprocessor_args = None if opts.postprocessor_args: - postprocessor_args = shlex.split(opts.postprocessor_args) + postprocessor_args = compat_shlex_split(opts.postprocessor_args) match_filter = ( None if opts.match_filter is None else match_filter_func(opts.match_filter)) @@ -377,7 +377,7 @@ def _real_main(argv=None): with YoutubeDL(ydl_opts) as ydl: # Update version if opts.update_self: - update_self(ydl.to_screen, opts.verbose) + update_self(ydl.to_screen, opts.verbose, ydl._opener) # Remove cache dir if opts.rm_cachedir: diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py index 65a0f891c..42a0f8c6f 100755 --- a/youtube_dl/__main__.py +++ b/youtube_dl/__main__.py @@ -11,7 +11,7 @@ if __package__ is None and not hasattr(sys, "frozen"): # direct call of __main__.py import os.path path = os.path.realpath(os.path.abspath(__file__)) - sys.path.append(os.path.dirname(os.path.dirname(path))) + sys.path.insert(0, os.path.dirname(os.path.dirname(path))) import youtube_dl diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index 0c57c7aeb..a3e85264a 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -1,15 +1,20 @@ from __future__ import unicode_literals +import binascii import collections +import email import getpass +import io import optparse import os import re +import shlex import shutil import socket import subprocess import sys import itertools +import xml.etree.ElementTree try: @@ -38,11 +43,21 @@ except ImportError: # Python 2 import urlparse as compat_urlparse try: + import urllib.response as compat_urllib_response +except ImportError: # Python 2 + import urllib as compat_urllib_response + +try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar try: + import http.cookies as compat_cookies +except ImportError: # Python 2 + import Cookie as compat_cookies + +try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities @@ -75,6 +90,11 @@ except ImportError: import BaseHTTPServer as compat_http_server try: + compat_str = unicode # Python 2 +except NameError: + compat_str = str + +try: from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes from urllib.parse import unquote as compat_urllib_parse_unquote from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus @@ -94,7 +114,7 @@ except ImportError: # Python 2 # Is it a string-like object? string.split return b'' - if isinstance(string, unicode): + if isinstance(string, compat_str): string = string.encode('utf-8') bits = string.split(b'%') if len(bits) == 1: @@ -145,9 +165,38 @@ except ImportError: # Python 2 return compat_urllib_parse_unquote(string, encoding, errors) try: - compat_str = unicode # Python 2 -except NameError: - compat_str = str + from urllib.request import DataHandler as compat_urllib_request_DataHandler +except ImportError: # Python < 3.4 + # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py + class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler): + def data_open(self, req): + # data URLs as specified in RFC 2397. + # + # ignores POSTed data + # + # syntax: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + url = req.get_full_url() + + scheme, data = url.split(":", 1) + mediatype, data = data.split(",", 1) + + # even base64 encoded data URLs might be quoted so unquote in any case: + data = compat_urllib_parse_unquote_to_bytes(data) + if mediatype.endswith(";base64"): + data = binascii.a2b_base64(data) + mediatype = mediatype[:-7] + + if not mediatype: + mediatype = "text/plain;charset=US-ASCII" + + headers = email.message_from_string( + "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data))) + + return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url) try: compat_basestring = basestring # Python 2 @@ -164,6 +213,43 @@ try: except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error +if sys.version_info[0] >= 3: + compat_etree_fromstring = xml.etree.ElementTree.fromstring +else: + # python 2.x tries to encode unicode strings with ascii (see the + # XMLParser._fixtext method) + etree = xml.etree.ElementTree + + try: + _etree_iter = etree.Element.iter + except AttributeError: # Python <=2.6 + def _etree_iter(root): + for el in root.findall('*'): + yield el + for sub in _etree_iter(el): + yield sub + + # on 2.6 XML doesn't have a parser argument, function copied from CPython + # 2.7 source + def _XML(text, parser=None): + if not parser: + parser = etree.XMLParser(target=etree.TreeBuilder()) + parser.feed(text) + return parser.close() + + def _element_factory(*args, **kwargs): + el = etree.Element(*args, **kwargs) + for k, v in el.items(): + if isinstance(v, bytes): + el.set(k, v.decode('utf-8')) + return el + + def compat_etree_fromstring(text): + doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory))) + for el in _etree_iter(doc): + if el.text is not None and isinstance(el.text, bytes): + el.text = el.text.decode('utf-8') + return doc try: from urllib.parse import parse_qs as compat_parse_qs @@ -222,6 +308,17 @@ except ImportError: # Python < 3.3 return "'" + s.replace("'", "'\"'\"'") + "'" +if sys.version_info >= (2, 7, 3): + compat_shlex_split = shlex.split +else: + # Working around shlex issue with unicode strings on some python 2 + # versions (see http://bugs.python.org/issue1548891) + def compat_shlex_split(s, comments=False, posix=True): + if isinstance(s, compat_str): + s = s.encode('utf-8') + return shlex.split(s, comments, posix) + + def compat_ord(c): if type(c) is int: return c @@ -399,26 +496,32 @@ if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3 else: _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines']) - def compat_get_terminal_size(): - columns = compat_getenv('COLUMNS', None) + def compat_get_terminal_size(fallback=(80, 24)): + columns = compat_getenv('COLUMNS') if columns: columns = int(columns) else: columns = None - lines = compat_getenv('LINES', None) + lines = compat_getenv('LINES') if lines: lines = int(lines) else: lines = None - try: - sp = subprocess.Popen( - ['stty', 'size'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = sp.communicate() - lines, columns = map(int, out.split()) - except Exception: - pass + if columns is None or lines is None or columns <= 0 or lines <= 0: + try: + sp = subprocess.Popen( + ['stty', 'size'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = sp.communicate() + _lines, _columns = map(int, out.split()) + except Exception: + _columns, _lines = _terminal_size(*fallback) + + if columns is None or columns <= 0: + columns = _columns + if lines is None or lines <= 0: + lines = _lines return _terminal_size(columns, lines) try: @@ -431,11 +534,18 @@ except TypeError: # Python 2.6 yield n n += step +if sys.version_info >= (3, 0): + from tokenize import tokenize as compat_tokenize_tokenize +else: + from tokenize import generate_tokens as compat_tokenize_tokenize + __all__ = [ 'compat_HTTPError', 'compat_basestring', 'compat_chr', 'compat_cookiejar', + 'compat_cookies', + 'compat_etree_fromstring', 'compat_expanduser', 'compat_get_terminal_size', 'compat_getenv', @@ -448,9 +558,11 @@ __all__ = [ 'compat_ord', 'compat_parse_qs', 'compat_print', + 'compat_shlex_split', 'compat_socket_create_connection', 'compat_str', 'compat_subprocess_get_DEVNULL', + 'compat_tokenize_tokenize', 'compat_urllib_error', 'compat_urllib_parse', 'compat_urllib_parse_unquote', @@ -458,6 +570,8 @@ __all__ = [ 'compat_urllib_parse_unquote_to_bytes', 'compat_urllib_parse_urlparse', 'compat_urllib_request', + 'compat_urllib_request_DataHandler', + 'compat_urllib_response', 'compat_urlparse', 'compat_urlretrieve', 'compat_xml_parse_error', diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py index 97e755d4b..beae8c4d0 100644 --- a/youtube_dl/downloader/common.py +++ b/youtube_dl/downloader/common.py @@ -5,9 +5,9 @@ import re import sys import time -from ..compat import compat_str from ..utils import ( encodeFilename, + error_to_compat_str, decodeArgument, format_bytes, timeconvert, @@ -42,7 +42,7 @@ class FileDownloader(object): min_filesize: Skip files smaller than this size max_filesize: Skip files larger than this size xattr_set_filesize: Set ytdl.filesize user xattribute with expected size. - (experimenatal) + (experimental) external_downloader_args: A list of additional command-line arguments for the external downloader. @@ -186,7 +186,7 @@ class FileDownloader(object): return os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) except (IOError, OSError) as err: - self.report_error('unable to rename file: %s' % compat_str(err)) + self.report_error('unable to rename file: %s' % error_to_compat_str(err)) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" @@ -325,7 +325,7 @@ class FileDownloader(object): ) # Check file already present - if filename != '-' and nooverwrites_and_exists or continuedl_and_exists: + if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists): self.report_file_already_downloaded(filename) self._hook_progress({ 'filename': filename, diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py index 8b6fa2753..535f2a7fc 100644 --- a/youtube_dl/downloader/dash.py +++ b/youtube_dl/downloader/dash.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re from .common import FileDownloader -from ..compat import compat_urllib_request +from ..utils import sanitized_Request class DashSegmentsFD(FileDownloader): @@ -22,7 +22,7 @@ class DashSegmentsFD(FileDownloader): def append_url_to_file(outf, target_url, target_name, remaining_bytes=None): self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name)) - req = compat_urllib_request.Request(target_url) + req = sanitized_Request(target_url) if remaining_bytes is not None: req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1)) diff --git a/youtube_dl/downloader/external.py b/youtube_dl/downloader/external.py index 1d5cc9904..2bc011266 100644 --- a/youtube_dl/downloader/external.py +++ b/youtube_dl/downloader/external.py @@ -5,6 +5,10 @@ import subprocess from .common import FileDownloader from ..utils import ( + cli_option, + cli_valueless_option, + cli_bool_option, + cli_configuration_args, encodeFilename, encodeArgument, ) @@ -45,18 +49,17 @@ class ExternalFD(FileDownloader): def supports(cls, info_dict): return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps') - def _source_address(self, command_option): - source_address = self.params.get('source_address') - if source_address is None: - return [] - return [command_option, source_address] + def _option(self, command_option, param): + return cli_option(self.params, command_option, param) + + def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None): + return cli_bool_option(self.params, command_option, param, true_value, false_value, separator) + + def _valueless_option(self, command_option, param, expected_value=True): + return cli_valueless_option(self.params, command_option, param, expected_value) def _configuration_args(self, default=[]): - ex_args = self.params.get('external_downloader_args') - if ex_args is None: - return default - assert isinstance(ex_args, list) - return ex_args + return cli_configuration_args(self.params, 'external_downloader_args', default) def _call_downloader(self, tmpfilename, info_dict): """ Either overwrite this or implement _make_cmd """ @@ -77,7 +80,19 @@ class CurlFD(ExternalFD): cmd = [self.exe, '--location', '-o', tmpfilename] for key, val in info_dict['http_headers'].items(): cmd += ['--header', '%s: %s' % (key, val)] - cmd += self._source_address('--interface') + cmd += self._option('--interface', 'source_address') + cmd += self._option('--proxy', 'proxy') + cmd += self._valueless_option('--insecure', 'nocheckcertificate') + cmd += self._configuration_args() + cmd += ['--', info_dict['url']] + return cmd + + +class AxelFD(ExternalFD): + def _make_cmd(self, tmpfilename, info_dict): + cmd = [self.exe, '-o', tmpfilename] + for key, val in info_dict['http_headers'].items(): + cmd += ['-H', '%s: %s' % (key, val)] cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd @@ -88,7 +103,9 @@ class WgetFD(ExternalFD): cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies'] for key, val in info_dict['http_headers'].items(): cmd += ['--header', '%s: %s' % (key, val)] - cmd += self._source_address('--bind-address') + cmd += self._option('--bind-address', 'source_address') + cmd += self._option('--proxy', 'proxy') + cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate') cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd @@ -105,7 +122,9 @@ class Aria2cFD(ExternalFD): cmd += ['--out', os.path.basename(tmpfilename)] for key, val in info_dict['http_headers'].items(): cmd += ['--header', '%s: %s' % (key, val)] - cmd += self._source_address('--interface') + cmd += self._option('--interface', 'source_address') + cmd += self._option('--all-proxy', 'proxy') + cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=') cmd += ['--', info_dict['url']] return cmd diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index b1a858c45..aaf0c49c8 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -5,19 +5,20 @@ import io import itertools import os import time -import xml.etree.ElementTree as etree -from .common import FileDownloader -from .http import HttpFD +from .fragment import FragmentFD from ..compat import ( + compat_etree_fromstring, compat_urlparse, compat_urllib_error, + compat_urllib_parse_urlparse, ) from ..utils import ( - struct_pack, - struct_unpack, encodeFilename, + fix_xml_ampersands, sanitize_open, + struct_pack, + struct_unpack, xpath_text, ) @@ -226,16 +227,13 @@ def _add_ns(prop): return '{http://ns.adobe.com/f4m/1.0}%s' % prop -class HttpQuietDownloader(HttpFD): - def to_screen(self, *args, **kargs): - pass - - -class F4mFD(FileDownloader): +class F4mFD(FragmentFD): """ A downloader for f4m manifests or AdobeHDS. """ + FD_NAME = 'f4m' + def _get_unencrypted_media(self, doc): media = doc.findall(_add_ns('media')) if not media: @@ -288,10 +286,15 @@ class F4mFD(FileDownloader): def real_download(self, filename, info_dict): man_url = info_dict['url'] requested_bitrate = info_dict.get('tbr') - self.to_screen('[download] Downloading f4m manifest') - manifest = self.ydl.urlopen(man_url).read() - - doc = etree.fromstring(manifest) + self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME) + urlh = self.ydl.urlopen(man_url) + man_url = urlh.geturl() + # Some manifests may be malformed, e.g. prosiebensat1 generated manifests + # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244 + # and https://github.com/rg3/youtube-dl/issues/7823) + manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip() + + doc = compat_etree_fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in self._get_unencrypted_media(doc)] if requested_bitrate is None: @@ -320,94 +323,53 @@ class F4mFD(FileDownloader): # For some akamai manifests we'll need to add a query to the fragment url akamai_pv = xpath_text(doc, _add_ns('pv-2.0')) - self.report_destination(filename) - http_dl = HttpQuietDownloader( - self.ydl, - { - 'continuedl': True, - 'quiet': True, - 'noprogress': True, - 'ratelimit': self.params.get('ratelimit', None), - 'test': self.params.get('test', False), - } - ) - tmpfilename = self.temp_name(filename) - (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb') + ctx = { + 'filename': filename, + 'total_frags': total_frags, + } + + self._prepare_frag_download(ctx) + + dest_stream = ctx['dest_stream'] write_flv_header(dest_stream) if not live: write_metadata_tag(dest_stream, metadata) - # This dict stores the download progress, it's updated by the progress - # hook - state = { - 'status': 'downloading', - 'downloaded_bytes': 0, - 'frag_index': 0, - 'frag_count': total_frags, - 'filename': filename, - 'tmpfilename': tmpfilename, - } - start = time.time() - - def frag_progress_hook(s): - if s['status'] not in ('downloading', 'finished'): - return - - frag_total_bytes = s.get('total_bytes', 0) - if s['status'] == 'finished': - state['downloaded_bytes'] += frag_total_bytes - state['frag_index'] += 1 - - estimated_size = ( - (state['downloaded_bytes'] + frag_total_bytes) / - (state['frag_index'] + 1) * total_frags) - time_now = time.time() - state['total_bytes_estimate'] = estimated_size - state['elapsed'] = time_now - start - - if s['status'] == 'finished': - progress = self.calc_percent(state['frag_index'], total_frags) - else: - frag_downloaded_bytes = s['downloaded_bytes'] - frag_progress = self.calc_percent(frag_downloaded_bytes, - frag_total_bytes) - progress = self.calc_percent(state['frag_index'], total_frags) - progress += frag_progress / float(total_frags) - - state['eta'] = self.calc_eta( - start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes) - state['speed'] = s.get('speed') - self._hook_progress(state) + base_url_parsed = compat_urllib_parse_urlparse(base_url) - http_dl.add_progress_hook(frag_progress_hook) + self._start_frag_download(ctx) frags_filenames = [] while fragments_list: seg_i, frag_i = fragments_list.pop(0) name = 'Seg%d-Frag%d' % (seg_i, frag_i) - url = base_url + name + query = [] + if base_url_parsed.query: + query.append(base_url_parsed.query) if akamai_pv: - url += '?' + akamai_pv.strip(';') + query.append(akamai_pv.strip(';')) if info_dict.get('extra_param_to_segment_url'): - url += info_dict.get('extra_param_to_segment_url') - frag_filename = '%s-%s' % (tmpfilename, name) + query.append(info_dict['extra_param_to_segment_url']) + url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query)) + frag_filename = '%s-%s' % (ctx['tmpfilename'], name) try: - success = http_dl.download(frag_filename, {'url': url}) + success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()}) if not success: return False - with open(frag_filename, 'rb') as down: - down_data = down.read() - reader = FlvReader(down_data) - while True: - _, box_type, box_data = reader.read_box_info() - if box_type == b'mdat': - dest_stream.write(box_data) - break + (down, frag_sanitized) = sanitize_open(frag_filename, 'rb') + down_data = down.read() + down.close() + reader = FlvReader(down_data) + while True: + _, box_type, box_data = reader.read_box_info() + if box_type == b'mdat': + dest_stream.write(box_data) + break if live: - os.remove(frag_filename) + os.remove(encodeFilename(frag_sanitized)) else: - frags_filenames.append(frag_filename) + frags_filenames.append(frag_sanitized) except (compat_urllib_error.HTTPError, ) as err: if live and (err.code == 404 or err.code == 410): # We didn't keep up with the live window. Continue @@ -425,20 +387,9 @@ class F4mFD(FileDownloader): msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1)) self.report_warning(msg) - dest_stream.close() + self._finish_frag_download(ctx) - elapsed = time.time() - start - self.try_rename(tmpfilename, filename) for frag_file in frags_filenames: - os.remove(frag_file) - - fsize = os.path.getsize(encodeFilename(filename)) - self._hook_progress({ - 'downloaded_bytes': fsize, - 'total_bytes': fsize, - 'filename': filename, - 'status': 'finished', - 'elapsed': elapsed, - }) + os.remove(encodeFilename(frag_file)) return True diff --git a/youtube_dl/downloader/fragment.py b/youtube_dl/downloader/fragment.py new file mode 100644 index 000000000..5a64b29ee --- /dev/null +++ b/youtube_dl/downloader/fragment.py @@ -0,0 +1,111 @@ +from __future__ import division, unicode_literals + +import os +import time + +from .common import FileDownloader +from .http import HttpFD +from ..utils import ( + encodeFilename, + sanitize_open, +) + + +class HttpQuietDownloader(HttpFD): + def to_screen(self, *args, **kargs): + pass + + +class FragmentFD(FileDownloader): + """ + A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests). + """ + + def _prepare_and_start_frag_download(self, ctx): + self._prepare_frag_download(ctx) + self._start_frag_download(ctx) + + def _prepare_frag_download(self, ctx): + self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags'])) + self.report_destination(ctx['filename']) + dl = HttpQuietDownloader( + self.ydl, + { + 'continuedl': True, + 'quiet': True, + 'noprogress': True, + 'ratelimit': self.params.get('ratelimit', None), + 'retries': self.params.get('retries', 0), + 'test': self.params.get('test', False), + } + ) + tmpfilename = self.temp_name(ctx['filename']) + dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb') + ctx.update({ + 'dl': dl, + 'dest_stream': dest_stream, + 'tmpfilename': tmpfilename, + }) + + def _start_frag_download(self, ctx): + total_frags = ctx['total_frags'] + # This dict stores the download progress, it's updated by the progress + # hook + state = { + 'status': 'downloading', + 'downloaded_bytes': 0, + 'frag_index': 0, + 'frag_count': total_frags, + 'filename': ctx['filename'], + 'tmpfilename': ctx['tmpfilename'], + } + start = time.time() + ctx['started'] = start + + def frag_progress_hook(s): + if s['status'] not in ('downloading', 'finished'): + return + + frag_total_bytes = s.get('total_bytes', 0) + if s['status'] == 'finished': + state['downloaded_bytes'] += frag_total_bytes + state['frag_index'] += 1 + + estimated_size = ( + (state['downloaded_bytes'] + frag_total_bytes) / + (state['frag_index'] + 1) * total_frags) + time_now = time.time() + state['total_bytes_estimate'] = estimated_size + state['elapsed'] = time_now - start + + if s['status'] == 'finished': + progress = self.calc_percent(state['frag_index'], total_frags) + else: + frag_downloaded_bytes = s['downloaded_bytes'] + frag_progress = self.calc_percent(frag_downloaded_bytes, + frag_total_bytes) + progress = self.calc_percent(state['frag_index'], total_frags) + progress += frag_progress / float(total_frags) + + state['eta'] = self.calc_eta( + start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes) + state['speed'] = s.get('speed') + self._hook_progress(state) + + ctx['dl'].add_progress_hook(frag_progress_hook) + + return start + + def _finish_frag_download(self, ctx): + ctx['dest_stream'].close() + elapsed = time.time() - ctx['started'] + self.try_rename(ctx['tmpfilename'], ctx['filename']) + fsize = os.path.getsize(encodeFilename(ctx['filename'])) + + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': ctx['filename'], + 'status': 'finished', + 'elapsed': elapsed, + }) diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py index 8be4f4249..b5a3e1167 100644 --- a/youtube_dl/downloader/hls.py +++ b/youtube_dl/downloader/hls.py @@ -4,15 +4,16 @@ import os import re import subprocess -from ..postprocessor.ffmpeg import FFmpegPostProcessor from .common import FileDownloader -from ..compat import ( - compat_urlparse, - compat_urllib_request, -) +from .fragment import FragmentFD + +from ..compat import compat_urlparse +from ..postprocessor.ffmpeg import FFmpegPostProcessor from ..utils import ( encodeArgument, encodeFilename, + sanitize_open, + handle_youtubedl_headers, ) @@ -28,10 +29,22 @@ class HlsFD(FileDownloader): return False ffpp.check_version() - args = [ - encodeArgument(opt) - for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')] - args.append(encodeFilename(tmpfilename, True)) + args = [ffpp.executable, '-y'] + + if info_dict['http_headers'] and re.match(r'^https?://', url): + # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: + # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. + headers = handle_youtubedl_headers(info_dict['http_headers']) + args += [ + '-headers', + ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())] + + args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc'] + + args = [encodeArgument(opt) for opt in args] + args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True)) + + self._debug_cmd(args) retval = subprocess.call(args) if retval == 0: @@ -51,54 +64,51 @@ class HlsFD(FileDownloader): return False -class NativeHlsFD(FileDownloader): +class NativeHlsFD(FragmentFD): """ A more limited implementation that does not require ffmpeg """ + FD_NAME = 'hlsnative' + def real_download(self, filename, info_dict): - url = info_dict['url'] - self.report_destination(filename) - tmpfilename = self.temp_name(filename) + man_url = info_dict['url'] + self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME) + manifest = self.ydl.urlopen(man_url).read() - self.to_screen( - '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id']) - data = self.ydl.urlopen(url).read() - s = data.decode('utf-8', 'ignore') - segment_urls = [] + s = manifest.decode('utf-8', 'ignore') + fragment_urls = [] for line in s.splitlines(): line = line.strip() if line and not line.startswith('#'): segment_url = ( line if re.match(r'^https?://', line) - else compat_urlparse.urljoin(url, line)) - segment_urls.append(segment_url) - - is_test = self.params.get('test', False) - remaining_bytes = self._TEST_FILE_SIZE if is_test else None - byte_counter = 0 - with open(tmpfilename, 'wb') as outf: - for i, segurl in enumerate(segment_urls): - self.to_screen( - '[hlsnative] %s: Downloading segment %d / %d' % - (info_dict['id'], i + 1, len(segment_urls))) - seg_req = compat_urllib_request.Request(segurl) - if remaining_bytes is not None: - seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1)) - - segment = self.ydl.urlopen(seg_req).read() - if remaining_bytes is not None: - segment = segment[:remaining_bytes] - remaining_bytes -= len(segment) - outf.write(segment) - byte_counter += len(segment) - if remaining_bytes is not None and remaining_bytes <= 0: + else compat_urlparse.urljoin(man_url, line)) + fragment_urls.append(segment_url) + # We only download the first fragment during the test + if self.params.get('test', False): break - self._hook_progress({ - 'downloaded_bytes': byte_counter, - 'total_bytes': byte_counter, + ctx = { 'filename': filename, - 'status': 'finished', - }) - self.try_rename(tmpfilename, filename) + 'total_frags': len(fragment_urls), + } + + self._prepare_and_start_frag_download(ctx) + + frags_filenames = [] + for i, frag_url in enumerate(fragment_urls): + frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i) + success = ctx['dl'].download(frag_filename, {'url': frag_url}) + if not success: + return False + down, frag_sanitized = sanitize_open(frag_filename, 'rb') + ctx['dest_stream'].write(down.read()) + down.close() + frags_filenames.append(frag_sanitized) + + self._finish_frag_download(ctx) + + for frag_file in frags_filenames: + os.remove(encodeFilename(frag_file)) + return True diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py index b7f144af9..56840e026 100644 --- a/youtube_dl/downloader/http.py +++ b/youtube_dl/downloader/http.py @@ -4,16 +4,15 @@ import errno import os import socket import time +import re from .common import FileDownloader -from ..compat import ( - compat_urllib_request, - compat_urllib_error, -) +from ..compat import compat_urllib_error from ..utils import ( ContentTooShortError, encodeFilename, sanitize_open, + sanitized_Request, ) @@ -28,8 +27,8 @@ class HttpFD(FileDownloader): add_headers = info_dict.get('http_headers') if add_headers: headers.update(add_headers) - basic_request = compat_urllib_request.Request(url, None, headers) - request = compat_urllib_request.Request(url, None, headers) + basic_request = sanitized_Request(url, None, headers) + request = sanitized_Request(url, None, headers) is_test = self.params.get('test', False) @@ -57,6 +56,24 @@ class HttpFD(FileDownloader): # Establish connection try: data = self.ydl.urlopen(request) + # When trying to resume, Content-Range HTTP header of response has to be checked + # to match the value of requested Range HTTP header. This is due to a webservers + # that don't support resuming and serve a whole file with no Content-Range + # set in response despite of requested Range (see + # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799) + if resume_len > 0: + content_range = data.headers.get('Content-Range') + if content_range: + content_range_m = re.search(r'bytes (\d+)-', content_range) + # Content-Range is present and matches requested Range, resume is possible + if content_range_m and resume_len == int(content_range_m.group(1)): + break + # Content-Range is either not present or invalid. Assuming remote webserver is + # trying to send the whole file, resume is not possible, so wiping the local file + # and performing entire redownload + self.report_unable_to_resume() + resume_len = 0 + open_mode = 'wb' break except (compat_urllib_error.HTTPError, ) as err: if (err.code < 500 or err.code >= 600) and err.code != 416: diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py index 7d19bb808..14d56db47 100644 --- a/youtube_dl/downloader/rtmp.py +++ b/youtube_dl/downloader/rtmp.py @@ -105,7 +105,7 @@ class RtmpFD(FileDownloader): protocol = info_dict.get('rtmp_protocol', None) real_time = info_dict.get('rtmp_real_time', False) no_resume = info_dict.get('no_resume', False) - continue_dl = info_dict.get('continuedl', True) + continue_dl = self.params.get('continuedl', True) self.report_destination(filename) tmpfilename = self.temp_name(filename) @@ -117,7 +117,7 @@ class RtmpFD(FileDownloader): return False # Download using rtmpdump. rtmpdump returns exit code 2 when - # the connection was interrumpted and resuming appears to be + # the connection was interrupted and resuming appears to be # possible. This is part of rtmpdump's normal usage, AFAIK. basic_args = [ 'rtmpdump', '--verbose', '-r', url, diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 6655d7eb5..e7b536df0 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -3,9 +3,15 @@ from __future__ import unicode_literals from .abc import ABCIE from .abc7news import Abc7NewsIE from .academicearth import AcademicEarthCourseIE +from .acast import ( + ACastIE, + ACastChannelIE, +) from .addanime import AddAnimeIE from .adobetv import ( AdobeTVIE, + AdobeTVShowIE, + AdobeTVChannelIE, AdobeTVVideoIE, ) from .adultswim import AdultSwimIE @@ -38,12 +44,17 @@ from .arte import ( ) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE +from .audimedia import AudiMediaIE from .audiomack import AudiomackIE, AudiomackAlbumIE from .azubu import AzubuIE from .baidu import BaiduVideoIE from .bambuser import BambuserIE, BambuserChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE -from .bbccouk import BBCCoUkIE +from .bbc import ( + BBCCoUkIE, + BBCCoUkArticleIE, + BBCIE, +) from .beeg import BeegIE from .behindkink import BehindKinkIE from .beatportpro import BeatportProIE @@ -56,7 +67,10 @@ from .bloomberg import BloombergIE from .bpb import BpbIE from .br import BRIE from .breakcom import BreakIE -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .buzzfeed import BuzzFeedIE from .byutv import BYUtvIE from .c56 import C56IE @@ -64,7 +78,6 @@ from .camdemy import ( CamdemyIE, CamdemyFolderIE ) -from .canal13cl import Canal13clIE from .canalplus import CanalplusIE from .canalc2 import Canalc2IE from .cbs import CBSIE @@ -73,6 +86,7 @@ from .cbssports import CBSSportsIE from .ccc import CCCIE from .ceskatelevize import CeskaTelevizeIE from .channel9 import Channel9IE +from .chaturbate import ChaturbateIE from .chilloutzone import ChilloutzoneIE from .chirbit import ( ChirbitIE, @@ -85,6 +99,7 @@ from .cliphunter import CliphunterIE from .clipsyndicate import ClipsyndicateIE from .cloudy import CloudyIE from .clubic import ClubicIE +from .clyp import ClypIE from .cmt import CMTIE from .cnet import CNETIE from .cnn import ( @@ -115,12 +130,15 @@ from .dailymotion import ( ) from .daum import DaumIE from .dbtv import DBTVIE +from .dcn import DCNIE from .dctp import DctpTvIE from .deezer import DeezerPlaylistIE +from .democracynow import DemocracynowIE from .dfb import DFBIE from .dhm import DHMIE from .dotsub import DotsubIE from .douyutv import DouyuTVIE +from .dplay import DPlayIE from .dramafever import ( DramaFeverIE, DramaFeverSeriesIE, @@ -134,7 +152,6 @@ from .dump import DumpIE from .dumpert import DumpertIE from .defense import DefenseGouvFrIE from .discovery import DiscoveryIE -from .divxstage import DivxStageIE from .dropbox import DropboxIE from .eagleplatform import EaglePlatformIE from .ebaumsworld import EbaumsWorldIE @@ -154,6 +171,8 @@ from .eporner import EpornerIE from .eroprofile import EroProfileIE from .escapist import EscapistIE from .espn import ESPNIE +from .esri import EsriVideoIE +from .europa import EuropaIE from .everyonesmixtape import EveryonesMixtapeIE from .exfm import ExfmIE from .expotv import ExpoTVIE @@ -161,14 +180,12 @@ from .extremetube import ExtremeTubeIE from .facebook import FacebookIE from .faz import FazIE from .fc2 import FC2IE +from .fczenit import FczenitIE from .firstpost import FirstpostIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE from .fivetv import FiveTVIE -from .fktv import ( - FKTVIE, - FKTVPosteckeIE, -) +from .fktv import FKTVIE from .flickr import FlickrIE from .folketinget import FolketingetIE from .footyroom import FootyRoomIE @@ -188,7 +205,9 @@ from .francetv import ( from .freesound import FreesoundIE from .freespeech import FreespeechIE from .freevideo import FreeVideoIE +from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE +from .gameinformer import GameInformerIE from .gamekings import GamekingsIE from .gameone import ( GameOneIE, @@ -205,15 +224,18 @@ from .gfycat import GfycatIE from .giantbomb import GiantBombIE from .giga import GigaIE from .glide import GlideIE -from .globo import GloboIE +from .globo import ( + GloboIE, + GloboArticleIE, +) from .godtube import GodTubeIE from .goldenmoustache import GoldenMoustacheIE from .golem import GolemIE from .googledrive import GoogleDriveIE from .googleplus import GooglePlusIE from .googlesearch import GoogleSearchIE -from .gorillavid import GorillaVidIE from .goshgay import GoshgayIE +from .gputechconf import GPUTechConfIE from .groupon import GrouponIE from .hark import HarkIE from .hearthisat import HearThisAtIE @@ -225,7 +247,6 @@ from .historicfilms import HistoricFilmsIE from .history import HistoryIE from .hitbox import HitboxIE, HitboxLiveIE from .hornbunny import HornBunnyIE -from .hostingbulk import HostingBulkIE from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE @@ -237,13 +258,21 @@ from .imdb import ( ImdbIE, ImdbListIE ) -from .imgur import ImgurIE +from .imgur import ( + ImgurIE, + ImgurAlbumIE, +) from .ina import InaIE +from .indavideo import ( + IndavideoIE, + IndavideoEmbedIE, +) from .infoq import InfoQIE from .instagram import InstagramIE, InstagramUserIE from .internetvideoarchive import InternetVideoArchiveIE from .iprima import IPrimaIE from .iqiyi import IqiyiIE +from .ir90tv import Ir90TvIE from .ivi import ( IviIE, IviCompilationIE @@ -287,6 +316,11 @@ from .lifenews import ( LifeNewsIE, LifeEmbedIE, ) +from .limelight import ( + LimelightMediaIE, + LimelightChannelIE, + LimelightChannelListIE, +) from .liveleak import LiveLeakIE from .livestream import ( LivestreamIE, @@ -304,7 +338,6 @@ from .macgamestore import MacGameStoreIE from .mailru import MailRuIE from .malemotion import MalemotionIE from .mdr import MDRIE -from .megavideoz import MegaVideozIE from .metacafe import MetacafeIE from .metacritic import MetacriticIE from .mgoon import MgoonIE @@ -326,16 +359,16 @@ from .motherless import MotherlessIE from .motorsport import MotorsportIE from .movieclips import MovieClipsIE from .moviezine import MoviezineIE -from .movshare import MovShareIE from .mtv import ( MTVIE, MTVServicesEmbeddedIE, MTVIggyIE, + MTVDEIE, ) from .muenchentv import MuenchenTVIE from .musicplayon import MusicPlayOnIE -from .musicvault import MusicVaultIE from .muzu import MuzuTVIE +from .mwave import MwaveIE from .myspace import MySpaceIE, MySpaceAlbumIE from .myspass import MySpassIE from .myvi import MyviIE @@ -349,10 +382,14 @@ from .nbc import ( NBCNewsIE, NBCSportsIE, NBCSportsVPlayerIE, + MSNBCIE, ) from .ndr import ( NDRIE, NJoyIE, + NDREmbedBaseIE, + NDREmbedIE, + NJoyEmbedIE, ) from .ndtv import NDTVIE from .netzkino import NetzkinoIE @@ -387,10 +424,22 @@ from .noco import NocoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE from .nova import NovaIE -from .novamov import NovaMovIE -from .nowness import NownessIE -from .nowtv import NowTVIE -from .nowvideo import NowVideoIE +from .novamov import ( + NovaMovIE, + WholeCloudIE, + NowVideoIE, + VideoWeedIE, + CloudTimeIE, +) +from .nowness import ( + NownessIE, + NownessPlaylistIE, + NownessSeriesIE, +) +from .nowtv import ( + NowTVIE, + NowTVListIE, +) from .npo import ( NPOIE, NPOLiveIE, @@ -418,7 +467,6 @@ from .ooyala import ( OoyalaIE, OoyalaExternalIE, ) -from .openfilm import OpenFilmIE from .orf import ( ORFTVthekIE, ORFOE1IE, @@ -428,6 +476,7 @@ from .orf import ( from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE +from .periscope import PeriscopeIE from .philharmoniedeparis import PhilharmonieDeParisIE from .phoenix import PhoenixIE from .photobucket import PhotobucketIE @@ -436,8 +485,13 @@ from .planetaplay import PlanetaPlayIE from .pladform import PladformIE from .played import PlayedIE from .playfm import PlayFMIE +from .playtvak import PlaytvakIE from .playvid import PlayvidIE from .playwire import PlaywireIE +from .pluralsight import ( + PluralsightIE, + PluralsightCourseIE, +) from .podomatic import PodomaticIE from .porn91 import Porn91IE from .pornhd import PornHdIE @@ -483,6 +537,7 @@ from .rtl2 import RTL2IE from .rtp import RTPIE from .rts import RTSIE from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE +from .rtvnh import RTVNHIE from .ruhd import RUHDIE from .rutube import ( RutubeIE, @@ -509,9 +564,14 @@ from .senateisvp import SenateISVPIE from .servingsys import ServingSysIE from .sexu import SexuIE from .sexykarma import SexyKarmaIE +from .shahid import ShahidIE from .shared import SharedIE from .sharesix import ShareSixIE from .sina import SinaIE +from .skynewsarabia import ( + SkyNewsArabiaIE, + SkyNewsArabiaArticleIE, +) from .slideshare import SlideshareIE from .slutload import SlutloadIE from .smotri import ( @@ -534,7 +594,8 @@ from .soundcloud import ( SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE, - SoundcloudPlaylistIE + SoundcloudPlaylistIE, + SoundcloudSearchIE ) from .soundgasm import ( SoundgasmIE, @@ -553,6 +614,7 @@ from .spankwire import SpankwireIE from .spiegel import SpiegelIE, SpiegelArticleIE from .spiegeltv import SpiegeltvIE from .spike import SpikeIE +from .stitcher import StitcherIE from .sport5 import Sport5IE from .sportbox import ( SportBoxIE, @@ -586,8 +648,10 @@ from .teachingchannel import TeachingChannelIE from .teamcoco import TeamcocoIE from .techtalks import TechTalksIE from .ted import TEDIE +from .tele13 import Tele13IE from .telebruxelles import TeleBruxellesIE from .telecinco import TelecincoIE +from .telegraaf import TelegraafIE from .telemb import TeleMBIE from .teletask import TeleTaskIE from .tenplay import TenPlayIE @@ -595,7 +659,10 @@ from .testurl import TestURLIE from .testtube import TestTubeIE from .tf1 import TF1IE from .theonion import TheOnionIE -from .theplatform import ThePlatformIE +from .theplatform import ( + ThePlatformIE, + ThePlatformFeedIE, +) from .thesixtyone import TheSixtyOneIE from .thisamericanlife import ThisAmericanLifeIE from .thisav import ThisAVIE @@ -610,6 +677,7 @@ from .tnaflix import ( EMPFlixIE, MovieFapIE, ) +from .toggle import ToggleIE from .thvideo import ( THVideoIE, THVideoPlaylistIE @@ -653,7 +721,7 @@ from .twitch import ( TwitchBookmarksIE, TwitchStreamIE, ) -from .twitter import TwitterCardIE +from .twitter import TwitterCardIE, TwitterIE from .ubu import UbuIE from .udemy import ( UdemyIE, @@ -679,18 +747,16 @@ from .vgtv import ( from .vh1 import VH1IE from .vice import ViceIE from .viddler import ViddlerIE -from .videobam import VideoBamIE from .videodetective import VideoDetectiveIE -from .videolecturesnet import VideoLecturesNetIE from .videofyme import VideofyMeIE from .videomega import VideoMegaIE from .videopremium import VideoPremiumIE from .videott import VideoTtIE -from .videoweed import VideoWeedIE from .vidme import VidmeIE from .vidzi import VidziIE from .vier import VierIE, VierVideosIE from .viewster import ViewsterIE +from .viidea import ViideaIE from .vimeo import ( VimeoIE, VimeoAlbumIE, @@ -714,6 +780,7 @@ from .vk import ( VKIE, VKUserVideosIE, ) +from .vlive import VLiveIE from .vodlocker import VodlockerIE from .voicerepublic import VoiceRepublicIE from .vporn import VpornIE @@ -742,6 +809,7 @@ from .wrzuta import WrzutaIE from .wsj import WSJIE from .xbef import XBefIE from .xboxclips import XboxClipsIE +from .xfileshare import XFileShareIE from .xhamster import ( XHamsterIE, XHamsterEmbedIE, @@ -785,6 +853,7 @@ from .youtube import ( YoutubeTruncatedIDIE, YoutubeTruncatedURLIE, YoutubeUserIE, + YoutubePlaylistsIE, YoutubeWatchLaterIE, ) from .zapiks import ZapiksIE diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py index dc0fb85d6..c0e5d1abf 100644 --- a/youtube_dl/extractor/abc.py +++ b/youtube_dl/extractor/abc.py @@ -1,16 +1,20 @@ from __future__ import unicode_literals import re -import json from .common import InfoExtractor +from ..utils import ( + ExtractorError, + js_to_json, + int_or_none, +) class ABCIE(InfoExtractor): IE_NAME = 'abc.net.au' - _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)' + _VALID_URL = r'http://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', 'md5': 'cb3dd03b18455a661071ee1e28344d9f', 'info_dict': { @@ -19,23 +23,62 @@ class ABCIE(InfoExtractor): 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone', 'description': 'md5:809ad29c67a05f54eb41f2a105693a67', }, - } + }, { + 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326', + 'md5': 'db2a5369238b51f9811ad815b69dc086', + 'info_dict': { + 'id': 'NvqvPeNZsHU', + 'ext': 'mp4', + 'upload_date': '20150816', + 'uploader': 'ABC News (Australia)', + 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef', + 'uploader_id': 'NewsOnABC', + 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill', + }, + 'add_ie': ['Youtube'], + }, { + 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080', + 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f', + 'info_dict': { + 'id': '6880080', + 'ext': 'mp3', + 'title': 'NAB lifts interest rates, following Westpac and CBA', + 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728', + }, + }, { + 'url': 'http://www.abc.net.au/news/2015-10-19/6866214', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - urls_info_json = self._search_regex( - r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls', - flags=re.DOTALL) - urls_info = json.loads(urls_info_json.replace('\'', '"')) + mobj = re.search( + r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);', + webpage) + if mobj is None: + raise ExtractorError('Unable to extract video urls') + + urls_info = self._parse_json( + mobj.group('json_data'), video_id, transform_source=js_to_json) + + if not isinstance(urls_info, list): + urls_info = [urls_info] + + if mobj.group('type') == 'YouTube': + return self.playlist_result([ + self.url_result(url_info['url']) for url_info in urls_info]) + formats = [{ 'url': url_info['url'], - 'width': int(url_info['width']), - 'height': int(url_info['height']), - 'tbr': int(url_info['bitrate']), - 'filesize': int(url_info['filesize']), + 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none', + 'width': int_or_none(url_info.get('width')), + 'height': int_or_none(url_info.get('height')), + 'tbr': int_or_none(url_info.get('bitrate')), + 'filesize': int_or_none(url_info.get('filesize')), } for url_info in urls_info] + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/academicearth.py b/youtube_dl/extractor/academicearth.py index 47313fba8..34095501c 100644 --- a/youtube_dl/extractor/academicearth.py +++ b/youtube_dl/extractor/academicearth.py @@ -15,7 +15,7 @@ class AcademicEarthCourseIE(InfoExtractor): 'title': 'Laws of Nature', 'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.', }, - 'playlist_count': 4, + 'playlist_count': 3, } def _real_extract(self, url): diff --git a/youtube_dl/extractor/acast.py b/youtube_dl/extractor/acast.py new file mode 100644 index 000000000..be7913bc7 --- /dev/null +++ b/youtube_dl/extractor/acast.py @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import int_or_none + + +class ACastBaseIE(InfoExtractor): + _API_BASE_URL = 'https://www.acast.com/api/' + + +class ACastIE(ACastBaseIE): + IE_NAME = 'acast' + _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<channel>[^/]+)/(?P<id>[^/#?]+)' + _TEST = { + 'url': 'https://www.acast.com/condenasttraveler/-where-are-you-taipei-101-taiwan', + 'md5': 'ada3de5a1e3a2a381327d749854788bb', + 'info_dict': { + 'id': '57de3baa-4bb0-487e-9418-2692c1277a34', + 'ext': 'mp3', + 'title': '"Where Are You?": Taipei 101, Taiwan', + 'timestamp': 1196172000000, + 'description': 'md5:0c5d8201dfea2b93218ea986c91eee6e', + 'duration': 211, + } + } + + def _real_extract(self, url): + channel, display_id = re.match(self._VALID_URL, url).groups() + cast_data = self._download_json(self._API_BASE_URL + 'channels/%s/acasts/%s/playback' % (channel, display_id), display_id) + + return { + 'id': compat_str(cast_data['id']), + 'display_id': display_id, + 'url': cast_data['blings'][0]['audio'], + 'title': cast_data['name'], + 'description': cast_data.get('description'), + 'thumbnail': cast_data.get('image'), + 'timestamp': int_or_none(cast_data.get('publishingDate')), + 'duration': int_or_none(cast_data.get('duration')), + } + + +class ACastChannelIE(ACastBaseIE): + IE_NAME = 'acast:channel' + _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<id>[^/#?]+)' + _TEST = { + 'url': 'https://www.acast.com/condenasttraveler', + 'info_dict': { + 'id': '50544219-29bb-499e-a083-6087f4cb7797', + 'title': 'Condé Nast Traveler Podcast', + 'description': 'md5:98646dee22a5b386626ae31866638fbd', + }, + 'playlist_mincount': 20, + } + + @classmethod + def suitable(cls, url): + return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + channel_data = self._download_json(self._API_BASE_URL + 'channels/%s' % display_id, display_id) + casts = self._download_json(self._API_BASE_URL + 'channels/%s/acasts' % display_id, display_id) + entries = [self.url_result('https://www.acast.com/%s/%s' % (display_id, cast['url']), 'ACast') for cast in casts] + + return self.playlist_result(entries, compat_str(channel_data['id']), channel_data['name'], channel_data.get('description')) diff --git a/youtube_dl/extractor/adobetv.py b/youtube_dl/extractor/adobetv.py index 5e43adc51..8753ee2cf 100644 --- a/youtube_dl/extractor/adobetv.py +++ b/youtube_dl/extractor/adobetv.py @@ -1,23 +1,32 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( parse_duration, unified_strdate, str_to_int, + int_or_none, float_or_none, ISO639Utils, + determine_ext, ) -class AdobeTVIE(InfoExtractor): - _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)' +class AdobeTVBaseIE(InfoExtractor): + _API_BASE_URL = 'http://tv.adobe.com/api/v4/' + + +class AdobeTVIE(AdobeTVBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/', 'md5': '9bc5727bcdd55251f35ad311ca74fa1e', 'info_dict': { - 'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop', + 'id': '10981', 'ext': 'mp4', 'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop', 'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311', @@ -29,50 +38,106 @@ class AdobeTVIE(InfoExtractor): } def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - player = self._parse_json( - self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'), - video_id) + language, show_urlname, urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' - title = player.get('title') or self._search_regex( - r'data-title="([^"]+)"', webpage, 'title') - description = self._og_search_description(webpage) - thumbnail = self._og_search_thumbnail(webpage) - - upload_date = unified_strdate( - self._html_search_meta('datepublished', webpage, 'upload date')) - - duration = parse_duration( - self._html_search_meta('duration', webpage, 'duration') or - self._search_regex( - r'Runtime:\s*(\d{2}:\d{2}:\d{2})', - webpage, 'duration', fatal=False)) - - view_count = str_to_int(self._search_regex( - r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>', - webpage, 'view count')) + video_data = self._download_json( + self._API_BASE_URL + 'episode/get/?language=%s&show_urlname=%s&urlname=%s&disclosure=standard' % (language, show_urlname, urlname), + urlname)['data'][0] formats = [{ - 'url': source['src'], - 'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None, - 'tbr': source.get('bitrate'), - } for source in player['sources']] + 'url': source['url'], + 'format_id': source.get('quality_level') or source['url'].split('-')[-1].split('.')[0] or None, + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + 'tbr': int_or_none(source.get('video_data_rate')), + } for source in video_data['videos']] self._sort_formats(formats) return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'upload_date': upload_date, - 'duration': duration, - 'view_count': view_count, + 'id': compat_str(video_data['id']), + 'title': video_data['title'], + 'description': video_data.get('description'), + 'thumbnail': video_data.get('thumbnail'), + 'upload_date': unified_strdate(video_data.get('start_date')), + 'duration': parse_duration(video_data.get('duration')), + 'view_count': str_to_int(video_data.get('playcount')), 'formats': formats, } +class AdobeTVPlaylistBaseIE(AdobeTVBaseIE): + def _parse_page_data(self, page_data): + return [self.url_result(self._get_element_url(element_data)) for element_data in page_data] + + def _extract_playlist_entries(self, url, display_id): + page = self._download_json(url, display_id) + entries = self._parse_page_data(page['data']) + for page_num in range(2, page['paging']['pages'] + 1): + entries.extend(self._parse_page_data( + self._download_json(url + '&page=%d' % page_num, display_id)['data'])) + return entries + + +class AdobeTVShowIE(AdobeTVPlaylistBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)' + + _TEST = { + 'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost', + 'info_dict': { + 'id': '36', + 'title': 'The Complete Picture with Julieanne Kost', + 'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27', + }, + 'playlist_mincount': 136, + } + + def _get_element_url(self, element_data): + return element_data['urls'][0] + + def _real_extract(self, url): + language, show_urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' + query = 'language=%s&show_urlname=%s' % (language, show_urlname) + + show_data = self._download_json(self._API_BASE_URL + 'show/get/?%s' % query, show_urlname)['data'][0] + + return self.playlist_result( + self._extract_playlist_entries(self._API_BASE_URL + 'episode/?%s' % query, show_urlname), + compat_str(show_data['id']), + show_data['show_name'], + show_data['show_description']) + + +class AdobeTVChannelIE(AdobeTVPlaylistBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?' + + _TEST = { + 'url': 'http://tv.adobe.com/channel/development', + 'info_dict': { + 'id': 'development', + }, + 'playlist_mincount': 96, + } + + def _get_element_url(self, element_data): + return element_data['url'] + + def _real_extract(self, url): + language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' + query = 'language=%s&channel_urlname=%s' % (language, channel_urlname) + if category_urlname: + query += '&category_urlname=%s' % category_urlname + + return self.playlist_result( + self._extract_playlist_entries(self._API_BASE_URL + 'show/?%s' % query, channel_urlname), + channel_urlname) + + class AdobeTVVideoIE(InfoExtractor): _VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)' @@ -91,28 +156,25 @@ class AdobeTVVideoIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - player_params = self._parse_json(self._search_regex( - r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'), - video_id) + video_data = self._download_json(url + '?format=json', video_id) formats = [{ + 'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')), 'url': source['src'], - 'width': source.get('width'), - 'height': source.get('height'), - 'tbr': source.get('bitrate'), - } for source in player_params['sources']] + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + 'tbr': int_or_none(source.get('bitrate')), + } for source in video_data['sources']] + self._sort_formats(formats) # For both metadata and downloaded files the duration varies among # formats. I just pick the max one duration = max(filter(None, [ float_or_none(source.get('duration'), scale=1000) - for source in player_params['sources']])) + for source in video_data['sources']])) subtitles = {} - for translation in player_params.get('translations', []): + for translation in video_data.get('translations', []): lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium']) if lang_id not in subtitles: subtitles[lang_id] = [] @@ -124,8 +186,9 @@ class AdobeTVVideoIE(InfoExtractor): return { 'id': video_id, 'formats': formats, - 'title': player_params['title'], - 'description': self._og_search_description(webpage), + 'title': video_data['title'], + 'description': video_data.get('description'), + 'thumbnail': video_data['video'].get('poster'), 'duration': duration, 'subtitles': subtitles, } diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py index 39335b827..3ae618e71 100644 --- a/youtube_dl/extractor/adultswim.py +++ b/youtube_dl/extractor/adultswim.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from ..utils import ( + determine_ext, ExtractorError, float_or_none, xpath_text, @@ -40,7 +41,8 @@ class AdultSwimIE(InfoExtractor): 'id': 'rQxZvXQ4ROaSOqq-or2Mow', 'title': 'Rick and Morty - Pilot', 'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " - } + }, + 'skip': 'This video is only available for registered users', }, { 'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/', 'playlist': [ @@ -123,7 +125,6 @@ class AdultSwimIE(InfoExtractor): else: collections = bootstrapped_data['show']['collections'] collection, video_info = self.find_collection_containing_video(collections, episode_path) - # Video wasn't found in the collections, let's try `slugged_video`. if video_info is None: if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path: @@ -133,7 +134,15 @@ class AdultSwimIE(InfoExtractor): show = bootstrapped_data['show'] show_title = show['title'] - segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']] + stream = video_info.get('stream') + clips = [stream] if stream else video_info.get('clips') + if not clips: + raise ExtractorError( + 'This video is only available via cable service provider subscription that' + ' is not currently supported. You may want to use --cookies.' + if video_info.get('auth') is True else 'Unable to find stream or clips', + expected=True) + segment_ids = [clip['videoPlaybackID'] for clip in clips] episode_id = video_info['id'] episode_title = video_info['title'] @@ -142,7 +151,7 @@ class AdultSwimIE(InfoExtractor): entries = [] for part_num, segment_id in enumerate(segment_ids): - segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id + segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id segment_title = '%s - %s' % (show_title, episode_title) if len(segment_ids) > 1: @@ -156,19 +165,32 @@ class AdultSwimIE(InfoExtractor): xpath_text(idoc, './/trt', 'segment duration').strip()) formats = [] - file_els = idoc.findall('.//files/file') + file_els = idoc.findall('.//files/file') or idoc.findall('./files/file') + unique_urls = [] + unique_file_els = [] for file_el in file_els: + media_url = file_el.text + if not media_url or determine_ext(media_url) == 'f4m': + continue + if file_el.text not in unique_urls: + unique_urls.append(file_el.text) + unique_file_els.append(file_el) + + for file_el in unique_file_els: bitrate = file_el.attrib.get('bitrate') ftype = file_el.attrib.get('type') - - formats.append({ - 'format_id': '%s_%s' % (bitrate, ftype), - 'url': file_el.text.strip(), - # The bitrate may not be a number (for example: 'iphone') - 'tbr': int(bitrate) if bitrate.isdigit() else None, - 'quality': 1 if ftype == 'hd' else -1 - }) + media_url = file_el.text + if determine_ext(media_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, segment_title, 'mp4', preference=0, m3u8_id='hls')) + else: + formats.append({ + 'format_id': '%s_%s' % (bitrate, ftype), + 'url': file_el.text.strip(), + # The bitrate may not be a number (for example: 'iphone') + 'tbr': int(bitrate) if bitrate.isdigit() else None, + }) self._sort_formats(formats) diff --git a/youtube_dl/extractor/airmozilla.py b/youtube_dl/extractor/airmozilla.py index 611ad1e9d..f8e70f4e5 100644 --- a/youtube_dl/extractor/airmozilla.py +++ b/youtube_dl/extractor/airmozilla.py @@ -20,14 +20,14 @@ class AirMozillaIE(InfoExtractor): 'id': '6x4q2w', 'ext': 'mp4', 'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco', - 'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+', + 'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster', 'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...', 'timestamp': 1422487800, 'upload_date': '20150128', 'location': 'SFO Commons', 'duration': 3780, 'view_count': int, - 'categories': ['Main'], + 'categories': ['Main', 'Privacy'], } } diff --git a/youtube_dl/extractor/aljazeera.py b/youtube_dl/extractor/aljazeera.py index 612708e25..5b2c0dc9a 100644 --- a/youtube_dl/extractor/aljazeera.py +++ b/youtube_dl/extractor/aljazeera.py @@ -15,7 +15,8 @@ class AlJazeeraIE(InfoExtractor): 'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', 'uploader': 'Al Jazeera English', }, - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], + 'skip': 'Not accessible from Travis CI server', } def _real_extract(self, url): @@ -31,5 +32,5 @@ class AlJazeeraIE(InfoExtractor): 'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc' '&%40videoPlayer={0}'.format(brightcove_id) ), - 'ie_key': 'Brightcove', + 'ie_key': 'BrightcoveLegacy', } diff --git a/youtube_dl/extractor/anitube.py b/youtube_dl/extractor/anitube.py index 31f0d417c..23f942ae2 100644 --- a/youtube_dl/extractor/anitube.py +++ b/youtube_dl/extractor/anitube.py @@ -26,8 +26,8 @@ class AnitubeIE(InfoExtractor): video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - key = self._html_search_regex( - r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key') + key = self._search_regex( + r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key') config_xml = self._download_xml( 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key) diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py index 576f03b5b..f68dc3236 100644 --- a/youtube_dl/extractor/appletrailers.py +++ b/youtube_dl/extractor/appletrailers.py @@ -13,53 +13,53 @@ from ..utils import ( class AppleTrailersIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)' _TESTS = [{ - "url": "http://trailers.apple.com/trailers/wb/manofsteel/", + 'url': 'http://trailers.apple.com/trailers/wb/manofsteel/', 'info_dict': { 'id': 'manofsteel', }, - "playlist": [ + 'playlist': [ { - "md5": "d97a8e575432dbcb81b7c3acb741f8a8", - "info_dict": { - "id": "manofsteel-trailer4", - "ext": "mov", - "duration": 111, - "title": "Trailer 4", - "upload_date": "20130523", - "uploader_id": "wb", + 'md5': 'd97a8e575432dbcb81b7c3acb741f8a8', + 'info_dict': { + 'id': 'manofsteel-trailer4', + 'ext': 'mov', + 'duration': 111, + 'title': 'Trailer 4', + 'upload_date': '20130523', + 'uploader_id': 'wb', }, }, { - "md5": "b8017b7131b721fb4e8d6f49e1df908c", - "info_dict": { - "id": "manofsteel-trailer3", - "ext": "mov", - "duration": 182, - "title": "Trailer 3", - "upload_date": "20130417", - "uploader_id": "wb", + 'md5': 'b8017b7131b721fb4e8d6f49e1df908c', + 'info_dict': { + 'id': 'manofsteel-trailer3', + 'ext': 'mov', + 'duration': 182, + 'title': 'Trailer 3', + 'upload_date': '20130417', + 'uploader_id': 'wb', }, }, { - "md5": "d0f1e1150989b9924679b441f3404d48", - "info_dict": { - "id": "manofsteel-trailer", - "ext": "mov", - "duration": 148, - "title": "Trailer", - "upload_date": "20121212", - "uploader_id": "wb", + 'md5': 'd0f1e1150989b9924679b441f3404d48', + 'info_dict': { + 'id': 'manofsteel-trailer', + 'ext': 'mov', + 'duration': 148, + 'title': 'Trailer', + 'upload_date': '20121212', + 'uploader_id': 'wb', }, }, { - "md5": "5fe08795b943eb2e757fa95cb6def1cb", - "info_dict": { - "id": "manofsteel-teaser", - "ext": "mov", - "duration": 93, - "title": "Teaser", - "upload_date": "20120721", - "uploader_id": "wb", + 'md5': '5fe08795b943eb2e757fa95cb6def1cb', + 'info_dict': { + 'id': 'manofsteel-teaser', + 'ext': 'mov', + 'duration': 93, + 'title': 'Teaser', + 'upload_date': '20120721', + 'uploader_id': 'wb', }, }, ] diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py index 6f465789b..73be6d204 100644 --- a/youtube_dl/extractor/ard.py +++ b/youtube_dl/extractor/ard.py @@ -14,8 +14,8 @@ from ..utils import ( parse_duration, unified_strdate, xpath_text, - parse_xml, ) +from ..compat import compat_etree_fromstring class ARDMediathekIE(InfoExtractor): @@ -161,7 +161,7 @@ class ARDMediathekIE(InfoExtractor): raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True) if re.search(r'[\?&]rss($|[=&])', url): - doc = parse_xml(webpage) + doc = compat_etree_fromstring(webpage.encode('utf-8')) if doc.tag == 'rss': return GenericIE()._extract_rss(url, video_id, doc) diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 76de24477..2a00da3ee 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -4,6 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import ( + compat_parse_qs, + compat_urllib_parse_urlparse, +) from ..utils import ( find_xpath_attr, unified_strdate, @@ -77,7 +81,13 @@ class ArteTVPlus7IE(InfoExtractor): def _extract_from_webpage(self, webpage, video_id, lang): json_url = self._html_search_regex( [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'], - webpage, 'json vp url') + webpage, 'json vp url', default=None) + if not json_url: + iframe_url = self._html_search_regex( + r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1', + webpage, 'iframe url', group='url') + json_url = compat_parse_qs( + compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0] return self._extract_from_json_url(json_url, video_id, lang) def _extract_from_json_url(self, json_url, video_id, lang): diff --git a/youtube_dl/extractor/atresplayer.py b/youtube_dl/extractor/atresplayer.py index 29f8795d3..50e47ba0a 100644 --- a/youtube_dl/extractor/atresplayer.py +++ b/youtube_dl/extractor/atresplayer.py @@ -7,11 +7,11 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( int_or_none, float_or_none, + sanitized_Request, xpath_text, ExtractorError, ) @@ -63,7 +63,7 @@ class AtresPlayerIE(InfoExtractor): 'j_password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Content-Type', 'application/x-www-form-urlencoded') response = self._download_webpage( @@ -94,7 +94,7 @@ class AtresPlayerIE(InfoExtractor): formats = [] for fmt in ['windows', 'android_tablet']: - request = compat_urllib_request.Request( + request = sanitized_Request( self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token)) request.add_header('User-Agent', self._USER_AGENT) diff --git a/youtube_dl/extractor/audimedia.py b/youtube_dl/extractor/audimedia.py new file mode 100644 index 000000000..b0b089dee --- /dev/null +++ b/youtube_dl/extractor/audimedia.py @@ -0,0 +1,80 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, + sanitized_Request, +) + + +class AudiMediaIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test', + 'md5': '79a8b71c46d49042609795ab59779b66', + 'info_dict': { + 'id': '1564', + 'ext': 'mp4', + 'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test', + 'description': 'md5:60e5d30a78ced725f7b8d34370762941', + 'upload_date': '20151124', + 'timestamp': 1448354940, + 'duration': 74022, + 'view_count': int, + } + } + # extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken) + _AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2' + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload') + _, stage_mode, video_id, lang = raw_payload.split('-') + + # TODO: handle s and e stage_mode (live streams and ended live streams) + if stage_mode not in ('s', 'e'): + request = sanitized_Request( + 'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang), + headers={'X-Auth-Token': self._AUTH_TOKEN}) + json_data = self._download_json(request, video_id)['results'] + formats = [] + + stream_url_hls = json_data.get('stream_url_hls') + if stream_url_hls: + m3u8_formats = self._extract_m3u8_formats(stream_url_hls, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + + stream_url_hds = json_data.get('stream_url_hds') + if stream_url_hds: + f4m_formats = self._extract_f4m_formats(json_data.get('stream_url_hds') + '?hdcore=3.4.0', video_id, -1, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + + for video_version in json_data.get('video_versions'): + video_version_url = video_version.get('download_url') or video_version.get('stream_url') + if not video_version_url: + continue + formats.append({ + 'url': video_version_url, + 'width': int_or_none(video_version.get('width')), + 'height': int_or_none(video_version.get('height')), + 'abr': int_or_none(video_version.get('audio_bitrate')), + 'vbr': int_or_none(video_version.get('video_bitrate')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': json_data['title'], + 'description': json_data.get('subtitle'), + 'thumbnail': json_data.get('thumbnail_image', {}).get('file'), + 'timestamp': parse_iso8601(json_data.get('publication_date')), + 'duration': int_or_none(json_data.get('duration')), + 'view_count': int_or_none(json_data.get('view_count')), + 'formats': formats, + } diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index 8dff1d6e3..da986e063 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -6,13 +6,13 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_str, ) from ..utils import ( ExtractorError, int_or_none, float_or_none, + sanitized_Request, ) @@ -57,7 +57,7 @@ class BambuserIE(InfoExtractor): 'pass': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Referer', self._LOGIN_URL) response = self._download_webpage( @@ -126,7 +126,7 @@ class BambuserChannelIE(InfoExtractor): '&sort=created&access_mode=0%2C1%2C2&limit={count}' '&method=broadcast&format=json&vid_older_than={last}' ).format(user=user, count=self._STEP, last=last_id) - req = compat_urllib_request.Request(req_url) + req = sanitized_Request(req_url) # Without setting this header, we wouldn't get any result req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) data = self._download_json( diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 505877b77..c1ef8051d 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -10,6 +10,8 @@ from ..compat import ( ) from ..utils import ( ExtractorError, + float_or_none, + int_or_none, ) @@ -52,11 +54,11 @@ class BandcampIE(InfoExtractor): ext, abr_str = format_id.split('-', 1) formats.append({ 'format_id': format_id, - 'url': format_url, + 'url': self._proto_relative_url(format_url, 'http:'), 'ext': ext, 'vcodec': 'none', 'acodec': ext, - 'abr': int(abr_str), + 'abr': int_or_none(abr_str), }) self._sort_formats(formats) @@ -65,7 +67,7 @@ class BandcampIE(InfoExtractor): 'id': compat_str(data['id']), 'title': data['title'], 'formats': formats, - 'duration': float(data['duration']), + 'duration': float_or_none(data.get('duration')), } else: raise ExtractorError('No free songs found') @@ -93,8 +95,8 @@ class BandcampIE(InfoExtractor): final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') # If we could correctly generate the .rand field the url would be # in the "download_url" key - final_url = self._search_regex( - r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL') + final_url = self._proto_relative_url(self._search_regex( + r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:') return { 'id': video_id, diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py new file mode 100644 index 000000000..691aecc0d --- /dev/null +++ b/youtube_dl/extractor/bbc.py @@ -0,0 +1,944 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + float_or_none, + int_or_none, + parse_duration, + parse_iso8601, + remove_end, + unescapeHTML, +) +from ..compat import ( + compat_etree_fromstring, + compat_HTTPError, +) + + +class BBCCoUkIE(InfoExtractor): + IE_NAME = 'bbc.co.uk' + IE_DESC = 'BBC iPlayer' + _ID_REGEX = r'[pb][\da-z]{7}' + _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>%s)' % _ID_REGEX + + _MEDIASELECTOR_URLS = [ + # Provides HQ HLS streams with even better quality that pc mediaset but fails + # with geolocation in some cases when it's even not geo restricted at all (e.g. + # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable. + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s', + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s', + ] + + _MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection' + _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist' + + _NAMESPACES = ( + _MEDIASELECTION_NS, + _EMP_PLAYLIST_NS, + ) + + _TESTS = [ + { + 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', + 'info_dict': { + 'id': 'b039d07m', + 'ext': 'flv', + 'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4', + 'description': 'The Canadian poet and songwriter reflects on his musical career.', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', + 'info_dict': { + 'id': 'b00yng1d', + 'ext': 'flv', + 'title': 'The Man in Black: Series 3: The Printed Name', + 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", + 'duration': 1800, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Episode is no longer available on BBC iPlayer Radio', + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', + 'info_dict': { + 'id': 'b00yng1d', + 'ext': 'flv', + 'title': 'The Voice UK: Series 3: Blind Auditions 5', + 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.", + 'duration': 5100, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', + }, + { + 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', + 'info_dict': { + 'id': 'b03k3pb7', + 'ext': 'flv', + 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", + 'description': '2. Invasion', + 'duration': 3600, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', + }, { + 'url': 'http://www.bbc.co.uk/programmes/b04v20dw', + 'info_dict': { + 'id': 'b04v209v', + 'ext': 'flv', + 'title': 'Pete Tong, The Essential New Tune Special', + 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", + 'duration': 10800, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'Episode is no longer available on BBC iPlayer Radio', + }, { + 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3', + 'note': 'Audio', + 'info_dict': { + 'id': 'p02frcch', + 'ext': 'flv', + 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix', + 'description': 'French house superstar Madeon takes us out of the club and onto the after party.', + 'duration': 3507, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', + 'note': 'Video', + 'info_dict': { + 'id': 'p025c103', + 'ext': 'flv', + 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', + 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', + 'duration': 226, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls', + 'info_dict': { + 'id': 'p02n76xf', + 'ext': 'flv', + 'title': 'Natural World, 2015-2016: 2. Super Powered Owls', + 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d', + 'duration': 3540, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'geolocation', + }, { + 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition', + 'info_dict': { + 'id': 'b05zmgw1', + 'ext': 'flv', + 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.', + 'title': 'Royal Academy Summer Exhibition', + 'duration': 3540, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + 'skip': 'geolocation', + }, { + # iptv-all mediaset fails with geolocation however there is no geo restriction + # for this programme at all + 'url': 'http://www.bbc.co.uk/programmes/b06bp7lf', + 'info_dict': { + 'id': 'b06bp7kf', + 'ext': 'flv', + 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie", + 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.', + 'duration': 10800, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', + 'only_matching': True, + }, { + 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', + 'only_matching': True, + }, { + 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', + 'only_matching': True, + } + ] + + class MediaSelectionError(Exception): + def __init__(self, id): + self.id = id + + def _extract_asx_playlist(self, connection, programme_id): + asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') + return [ref.get('href') for ref in asx.findall('./Entry/ref')] + + def _extract_connection(self, connection, programme_id): + formats = [] + kind = connection.get('kind') + protocol = connection.get('protocol') + supplier = connection.get('supplier') + if protocol == 'http': + href = connection.get('href') + transfer_format = connection.get('transferFormat') + # ASX playlist + if supplier == 'asx': + for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): + formats.append({ + 'url': ref, + 'format_id': 'ref%s_%s' % (i, supplier), + }) + # Skip DASH until supported + elif transfer_format == 'dash': + pass + elif transfer_format == 'hls': + m3u8_formats = self._extract_m3u8_formats( + href, programme_id, ext='mp4', entry_protocol='m3u8_native', + m3u8_id=supplier, fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + # Direct link + else: + formats.append({ + 'url': href, + 'format_id': supplier or kind or protocol, + }) + elif protocol == 'rtmp': + application = connection.get('application', 'ondemand') + auth_string = connection.get('authString') + identifier = connection.get('identifier') + server = connection.get('server') + formats.append({ + 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), + 'play_path': identifier, + 'app': '%s?%s' % (application, auth_string), + 'page_url': 'http://www.bbc.co.uk', + 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', + 'rtmp_live': False, + 'ext': 'flv', + 'format_id': supplier, + }) + return formats + + def _extract_items(self, playlist): + return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS) + + def _findall_ns(self, element, xpath): + elements = [] + for ns in self._NAMESPACES: + elements.extend(element.findall(xpath % ns)) + return elements + + def _extract_medias(self, media_selection): + error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS) + if error is None: + media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS) + if error is not None: + raise BBCCoUkIE.MediaSelectionError(error.get('id')) + return self._findall_ns(media_selection, './{%s}media') + + def _extract_connections(self, media): + return self._findall_ns(media, './{%s}connection') + + def _extract_video(self, media, programme_id): + formats = [] + vbr = int_or_none(media.get('bitrate')) + vcodec = media.get('encoding') + service = media.get('service') + width = int_or_none(media.get('width')) + height = int_or_none(media.get('height')) + file_size = int_or_none(media.get('media_file_size')) + for connection in self._extract_connections(media): + conn_formats = self._extract_connection(connection, programme_id) + for format in conn_formats: + format.update({ + 'width': width, + 'height': height, + 'vbr': vbr, + 'vcodec': vcodec, + 'filesize': file_size, + }) + if service: + format['format_id'] = '%s_%s' % (service, format['format_id']) + formats.extend(conn_formats) + return formats + + def _extract_audio(self, media, programme_id): + formats = [] + abr = int_or_none(media.get('bitrate')) + acodec = media.get('encoding') + service = media.get('service') + for connection in self._extract_connections(media): + conn_formats = self._extract_connection(connection, programme_id) + for format in conn_formats: + format.update({ + 'format_id': '%s_%s' % (service, format['format_id']), + 'abr': abr, + 'acodec': acodec, + }) + formats.extend(conn_formats) + return formats + + def _get_subtitles(self, media, programme_id): + subtitles = {} + for connection in self._extract_connections(media): + captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions') + lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en') + subtitles[lang] = [ + { + 'url': connection.get('href'), + 'ext': 'ttml', + }, + ] + return subtitles + + def _raise_extractor_error(self, media_selection_error): + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, media_selection_error.id), + expected=True) + + def _download_media_selector(self, programme_id): + last_exception = None + for mediaselector_url in self._MEDIASELECTOR_URLS: + try: + return self._download_media_selector_url( + mediaselector_url % programme_id, programme_id) + except BBCCoUkIE.MediaSelectionError as e: + if e.id in ('notukerror', 'geolocation', 'selectionunavailable'): + last_exception = e + continue + self._raise_extractor_error(e) + self._raise_extractor_error(last_exception) + + def _download_media_selector_url(self, url, programme_id=None): + try: + media_selection = self._download_xml( + url, programme_id, 'Downloading media selection XML') + except ExtractorError as ee: + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404): + media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8')) + else: + raise + return self._process_media_selector(media_selection, programme_id) + + def _process_media_selector(self, media_selection, programme_id): + formats = [] + subtitles = None + + for media in self._extract_medias(media_selection): + kind = media.get('kind') + if kind == 'audio': + formats.extend(self._extract_audio(media, programme_id)) + elif kind == 'video': + formats.extend(self._extract_video(media, programme_id)) + elif kind == 'captions': + subtitles = self.extract_subtitles(media, programme_id) + return formats, subtitles + + def _download_playlist(self, playlist_id): + try: + playlist = self._download_json( + 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id, + playlist_id, 'Downloading playlist JSON') + + version = playlist.get('defaultAvailableVersion') + if version: + smp_config = version['smpConfig'] + title = smp_config['title'] + description = smp_config['summary'] + for item in smp_config['items']: + kind = item['kind'] + if kind != 'programme' and kind != 'radioProgramme': + continue + programme_id = item.get('vpid') + duration = int_or_none(item.get('duration')) + formats, subtitles = self._download_media_selector(programme_id) + return programme_id, title, description, duration, formats, subtitles + except ExtractorError as ee: + if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404): + raise + + # fallback to legacy playlist + return self._process_legacy_playlist(playlist_id) + + def _process_legacy_playlist_url(self, url, display_id): + playlist = self._download_legacy_playlist_url(url, display_id) + return self._extract_from_legacy_playlist(playlist, display_id) + + def _process_legacy_playlist(self, playlist_id): + return self._process_legacy_playlist_url( + 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id) + + def _download_legacy_playlist_url(self, url, playlist_id=None): + return self._download_xml( + url, playlist_id, 'Downloading legacy playlist XML') + + def _extract_from_legacy_playlist(self, playlist, playlist_id): + no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS) + if no_items is not None: + reason = no_items.get('reason') + if reason == 'preAvailability': + msg = 'Episode %s is not yet available' % playlist_id + elif reason == 'postAvailability': + msg = 'Episode %s is no longer available' % playlist_id + elif reason == 'noMedia': + msg = 'Episode %s is not currently available' % playlist_id + else: + msg = 'Episode %s is not available: %s' % (playlist_id, reason) + raise ExtractorError(msg, expected=True) + + for item in self._extract_items(playlist): + kind = item.get('kind') + if kind != 'programme' and kind != 'radioProgramme': + continue + title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text + description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS) + description = description_el.text if description_el is not None else None + + def get_programme_id(item): + def get_from_attributes(item): + for p in('identifier', 'group'): + value = item.get(p) + if value and re.match(r'^[pb][\da-z]{7}$', value): + return value + get_from_attributes(item) + mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS) + if mediator is not None: + return get_from_attributes(mediator) + + programme_id = get_programme_id(item) + duration = int_or_none(item.get('duration')) + + if programme_id: + formats, subtitles = self._download_media_selector(programme_id) + else: + formats, subtitles = self._process_media_selector(item, playlist_id) + programme_id = playlist_id + + return programme_id, title, description, duration, formats, subtitles + + def _real_extract(self, url): + group_id = self._match_id(url) + + webpage = self._download_webpage(url, group_id, 'Downloading video page') + + programme_id = None + duration = None + + tviplayer = self._search_regex( + r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', + webpage, 'player', default=None) + + if tviplayer: + player = self._parse_json(tviplayer, group_id).get('player', {}) + duration = int_or_none(player.get('duration')) + programme_id = player.get('vpid') + + if not programme_id: + programme_id = self._search_regex( + r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None) + + if programme_id: + formats, subtitles = self._download_media_selector(programme_id) + title = self._og_search_title(webpage) + description = self._search_regex( + r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', + webpage, 'description', default=None) + if not description: + description = self._html_search_meta('description', webpage) + else: + programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) + + self._sort_formats(formats) + + return { + 'id': programme_id, + 'title': title, + 'description': description, + 'thumbnail': self._og_search_thumbnail(webpage, default=None), + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + } + + +class BBCIE(BBCCoUkIE): + IE_NAME = 'bbc' + IE_DESC = 'BBC' + _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)' + + _MEDIASELECTOR_URLS = [ + # Provides HQ HLS streams but fails with geolocation in some cases when it's + # even not geo restricted at all + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s', + # Provides more formats, namely direct mp4 links, but fails on some videos with + # notukerror for non UK (?) users (e.g. + # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) + 'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s', + # Provides fewer formats, but works everywhere for everybody (hopefully) + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s', + ] + + _TESTS = [{ + # article with multiple videos embedded with data-playable containing vpids + 'url': 'http://www.bbc.com/news/world-europe-32668511', + 'info_dict': { + 'id': 'world-europe-32668511', + 'title': 'Russia stages massive WW2 parade despite Western boycott', + 'description': 'md5:00ff61976f6081841f759a08bf78cc9c', + }, + 'playlist_count': 2, + }, { + # article with multiple videos embedded with data-playable (more videos) + 'url': 'http://www.bbc.com/news/business-28299555', + 'info_dict': { + 'id': 'business-28299555', + 'title': 'Farnborough Airshow: Video highlights', + 'description': 'BBC reports and video highlights at the Farnborough Airshow.', + }, + 'playlist_count': 9, + 'skip': 'Save time', + }, { + # article with multiple videos embedded with `new SMP()` + # broken + 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460', + 'info_dict': { + 'id': '3662a707-0af9-3149-963f-47bea720b460', + 'title': 'BBC Blogs - Adam Curtis - BUGGER', + }, + 'playlist_count': 18, + }, { + # single video embedded with data-playable containing vpid + 'url': 'http://www.bbc.com/news/world-europe-32041533', + 'info_dict': { + 'id': 'p02mprgb', + 'ext': 'mp4', + 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV', + 'description': 'md5:2868290467291b37feda7863f7a83f54', + 'duration': 47, + 'timestamp': 1427219242, + 'upload_date': '20150324', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + # article with single video embedded with data-playable containing XML playlist + # with direct video links as progressiveDownloadUrl (for now these are extracted) + # and playlist with f4m and m3u8 as streamingUrl + 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu', + 'info_dict': { + 'id': '150615_telabyad_kentin_cogu', + 'ext': 'mp4', + 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde", + 'timestamp': 1434397334, + 'upload_date': '20150615', + }, + 'params': { + 'skip_download': True, + } + }, { + # single video embedded with data-playable containing XML playlists (regional section) + 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw', + 'info_dict': { + 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw', + 'ext': 'mp4', + 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', + 'timestamp': 1434713142, + 'upload_date': '20150619', + }, + 'params': { + 'skip_download': True, + } + }, { + # single video from video playlist embedded with vxp-playlist-data JSON + 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376', + 'info_dict': { + 'id': 'p02w6qjc', + 'ext': 'mp4', + 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', + 'duration': 56, + 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', + }, + 'params': { + 'skip_download': True, + } + }, { + # single video story with digitalData + 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret', + 'info_dict': { + 'id': 'p02q6gc4', + 'ext': 'flv', + 'title': 'Sri Lanka’s spicy secret', + 'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.', + 'timestamp': 1437674293, + 'upload_date': '20150723', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + # single video story without digitalData + 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star', + 'info_dict': { + 'id': 'p018zqqg', + 'ext': 'mp4', + 'title': 'Hyundai Santa Fe Sport: Rock star', + 'description': 'md5:b042a26142c4154a6e472933cf20793d', + 'timestamp': 1415867444, + 'upload_date': '20141113', + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + # single video with playlist.sxml URL in playlist param + 'url': 'http://www.bbc.com/sport/0/football/33653409', + 'info_dict': { + 'id': 'p02xycnp', + 'ext': 'mp4', + 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?', + 'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.', + 'duration': 140, + }, + 'params': { + # rtmp download + 'skip_download': True, + } + }, { + # article with multiple videos embedded with playlist.sxml in playlist param + 'url': 'http://www.bbc.com/sport/0/football/34475836', + 'info_dict': { + 'id': '34475836', + 'title': 'What Liverpool can expect from Klopp', + }, + 'playlist_count': 3, + }, { + # single video with playlist URL from weather section + 'url': 'http://www.bbc.com/weather/features/33601775', + 'only_matching': True, + }, { + # custom redirection to www.bbc.com + 'url': 'http://www.bbc.co.uk/news/science-environment-33661876', + 'only_matching': True, + }] + + @classmethod + def suitable(cls, url): + return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url) + + def _extract_from_media_meta(self, media_meta, video_id): + # Direct links to media in media metadata (e.g. + # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu) + # TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml + source_files = media_meta.get('sourceFiles') + if source_files: + return [{ + 'url': f['url'], + 'format_id': format_id, + 'ext': f.get('encoding'), + 'tbr': float_or_none(f.get('bitrate'), 1000), + 'filesize': int_or_none(f.get('filesize')), + } for format_id, f in source_files.items() if f.get('url')], [] + + programme_id = media_meta.get('externalId') + if programme_id: + return self._download_media_selector(programme_id) + + # Process playlist.sxml as legacy playlist + href = media_meta.get('href') + if href: + playlist = self._download_legacy_playlist_url(href) + _, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id) + return formats, subtitles + + return [], [] + + def _extract_from_playlist_sxml(self, url, playlist_id, timestamp): + programme_id, title, description, duration, formats, subtitles = \ + self._process_legacy_playlist_url(url, playlist_id) + self._sort_formats(formats) + return { + 'id': programme_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + 'subtitles': subtitles, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + timestamp = None + playlist_title = None + playlist_description = None + + ld = self._parse_json( + self._search_regex( + r'(?s)<script type="application/ld\+json">(.+?)</script>', + webpage, 'ld json', default='{}'), + playlist_id, fatal=False) + if ld: + timestamp = parse_iso8601(ld.get('datePublished')) + playlist_title = ld.get('headline') + playlist_description = ld.get('articleBody') + + if not timestamp: + timestamp = parse_iso8601(self._search_regex( + [r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"', + r'itemprop="datePublished"[^>]+datetime="([^"]+)"', + r'"datePublished":\s*"([^"]+)'], + webpage, 'date', default=None)) + + entries = [] + + # article with multiple videos embedded with playlist.sxml (e.g. + # http://www.bbc.com/sport/0/football/34475836) + playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage) + playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage)) + if playlists: + entries = [ + self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp) + for playlist_url in playlists] + + # news article with multiple videos embedded with data-playable + data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage) + if data_playables: + for _, data_playable_json in data_playables: + data_playable = self._parse_json( + unescapeHTML(data_playable_json), playlist_id, fatal=False) + if not data_playable: + continue + settings = data_playable.get('settings', {}) + if settings: + # data-playable with video vpid in settings.playlistObject.items (e.g. + # http://www.bbc.com/news/world-us-canada-34473351) + playlist_object = settings.get('playlistObject', {}) + if playlist_object: + items = playlist_object.get('items') + if items and isinstance(items, list): + title = playlist_object['title'] + description = playlist_object.get('summary') + duration = int_or_none(items[0].get('duration')) + programme_id = items[0].get('vpid') + formats, subtitles = self._download_media_selector(programme_id) + self._sort_formats(formats) + entries.append({ + 'id': programme_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + }) + else: + # data-playable without vpid but with a playlist.sxml URLs + # in otherSettings.playlist (e.g. + # http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani) + playlist = data_playable.get('otherSettings', {}).get('playlist', {}) + if playlist: + entries.append(self._extract_from_playlist_sxml( + playlist.get('progressiveDownloadUrl'), playlist_id, timestamp)) + + if entries: + playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News') + playlist_description = playlist_description or self._og_search_description(webpage, default=None) + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) + + # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) + programme_id = self._search_regex( + [r'data-video-player-vpid="(%s)"' % self._ID_REGEX, + r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX, + r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX], + webpage, 'vpid', default=None) + + if programme_id: + formats, subtitles = self._download_media_selector(programme_id) + self._sort_formats(formats) + # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star) + digital_data = self._parse_json( + self._search_regex( + r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'), + programme_id, fatal=False) + page_info = digital_data.get('page', {}).get('pageInfo', {}) + title = page_info.get('pageName') or self._og_search_title(webpage) + description = page_info.get('description') or self._og_search_description(webpage) + timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp + return { + 'id': programme_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'formats': formats, + 'subtitles': subtitles, + } + + playlist_title = self._html_search_regex( + r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title') + playlist_description = self._og_search_description(webpage, default=None) + + def extract_all(pattern): + return list(filter(None, map( + lambda s: self._parse_json(s, playlist_id, fatal=False), + re.findall(pattern, webpage)))) + + # Multiple video article (e.g. + # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460) + EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX + entries = [] + for match in extract_all(r'new\s+SMP\(({.+?})\)'): + embed_url = match.get('playerSettings', {}).get('externalEmbedUrl') + if embed_url and re.match(EMBED_URL, embed_url): + entries.append(embed_url) + entries.extend(re.findall( + r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage)) + if entries: + return self.playlist_result( + [self.url_result(entry, 'BBCCoUk') for entry in entries], + playlist_id, playlist_title, playlist_description) + + # Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511) + medias = extract_all(r"data-media-meta='({[^']+})'") + + if not medias: + # Single video article (e.g. http://www.bbc.com/news/video_and_audio/international) + media_asset = self._search_regex( + r'mediaAssetPage\.init\(\s*({.+?}), "/', + webpage, 'media asset', default=None) + if media_asset: + media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False) + medias = [] + for video in media_asset_page.get('videos', {}).values(): + medias.extend(video.values()) + + if not medias: + # Multiple video playlist with single `now playing` entry (e.g. + # http://www.bbc.com/news/video_and_audio/must_see/33767813) + vxp_playlist = self._parse_json( + self._search_regex( + r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>', + webpage, 'playlist data'), + playlist_id) + playlist_medias = [] + for item in vxp_playlist: + media = item.get('media') + if not media: + continue + playlist_medias.append(media) + # Download single video if found media with asset id matching the video id from URL + if item.get('advert', {}).get('assetId') == playlist_id: + medias = [media] + break + # Fallback to the whole playlist + if not medias: + medias = playlist_medias + + entries = [] + for num, media_meta in enumerate(medias, start=1): + formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id) + if not formats: + continue + self._sort_formats(formats) + + video_id = media_meta.get('externalId') + if not video_id: + video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num) + + title = media_meta.get('caption') + if not title: + title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num) + + duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration')) + + images = [] + for image in media_meta.get('images', {}).values(): + images.extend(image.values()) + if 'image' in media_meta: + images.append(media_meta['image']) + + thumbnails = [{ + 'url': image.get('href'), + 'width': int_or_none(image.get('width')), + 'height': int_or_none(image.get('height')), + } for image in images] + + entries.append({ + 'id': video_id, + 'title': title, + 'thumbnails': thumbnails, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + 'subtitles': subtitles, + }) + + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) + + +class BBCCoUkArticleIE(InfoExtractor): + _VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)' + IE_NAME = 'bbc.co.uk:article' + IE_DESC = 'BBC articles' + + _TEST = { + 'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer', + 'info_dict': { + 'id': '3jNQLTMrPlYGTBn0WV6M2MS', + 'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four', + 'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.', + }, + 'playlist_count': 4, + 'add_ie': ['BBCCoUk'], + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + title = self._og_search_title(webpage) + description = self._og_search_description(webpage).strip() + + entries = [self.url_result(programme_url) for programme_url in re.findall( + r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)] + + return self.playlist_result(entries, playlist_id, title, description) diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py deleted file mode 100644 index 5825d2867..000000000 --- a/youtube_dl/extractor/bbccouk.py +++ /dev/null @@ -1,379 +0,0 @@ -from __future__ import unicode_literals - -import xml.etree.ElementTree - -from .common import InfoExtractor -from ..utils import ( - ExtractorError, - int_or_none, -) -from ..compat import compat_HTTPError - - -class BBCCoUkIE(InfoExtractor): - IE_NAME = 'bbc.co.uk' - IE_DESC = 'BBC iPlayer' - _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})' - - _TESTS = [ - { - 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', - 'info_dict': { - 'id': 'b039d07m', - 'ext': 'flv', - 'title': 'Kaleidoscope, Leonard Cohen', - 'description': 'The Canadian poet and songwriter reflects on his musical career.', - 'duration': 1740, - }, - 'params': { - # rtmp download - 'skip_download': True, - } - }, - { - 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', - 'info_dict': { - 'id': 'b00yng1d', - 'ext': 'flv', - 'title': 'The Man in Black: Series 3: The Printed Name', - 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", - 'duration': 1800, - }, - 'params': { - # rtmp download - 'skip_download': True, - }, - 'skip': 'Episode is no longer available on BBC iPlayer Radio', - }, - { - 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', - 'info_dict': { - 'id': 'b00yng1d', - 'ext': 'flv', - 'title': 'The Voice UK: Series 3: Blind Auditions 5', - 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.", - 'duration': 5100, - }, - 'params': { - # rtmp download - 'skip_download': True, - }, - 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', - }, - { - 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', - 'info_dict': { - 'id': 'b03k3pb7', - 'ext': 'flv', - 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", - 'description': '2. Invasion', - 'duration': 3600, - }, - 'params': { - # rtmp download - 'skip_download': True, - }, - 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', - }, { - 'url': 'http://www.bbc.co.uk/programmes/b04v20dw', - 'info_dict': { - 'id': 'b04v209v', - 'ext': 'flv', - 'title': 'Pete Tong, The Essential New Tune Special', - 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", - 'duration': 10800, - }, - 'params': { - # rtmp download - 'skip_download': True, - } - }, { - 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3', - 'note': 'Audio', - 'info_dict': { - 'id': 'p02frcch', - 'ext': 'flv', - 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix', - 'description': 'French house superstar Madeon takes us out of the club and onto the after party.', - 'duration': 3507, - }, - 'params': { - # rtmp download - 'skip_download': True, - } - }, { - 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', - 'note': 'Video', - 'info_dict': { - 'id': 'p025c103', - 'ext': 'flv', - 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', - 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', - 'duration': 226, - }, - 'params': { - # rtmp download - 'skip_download': True, - } - }, { - 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls', - 'info_dict': { - 'id': 'p02n76xf', - 'ext': 'flv', - 'title': 'Natural World, 2015-2016: 2. Super Powered Owls', - 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d', - 'duration': 3540, - }, - 'params': { - # rtmp download - 'skip_download': True, - }, - 'skip': 'geolocation', - }, { - 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition', - 'info_dict': { - 'id': 'b05zmgw1', - 'ext': 'flv', - 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.', - 'title': 'Royal Academy Summer Exhibition', - 'duration': 3540, - }, - 'params': { - # rtmp download - 'skip_download': True, - }, - 'skip': 'geolocation', - }, { - 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', - 'only_matching': True, - }, { - 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', - 'only_matching': True, - }, { - 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', - 'only_matching': True, - } - ] - - def _extract_asx_playlist(self, connection, programme_id): - asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') - return [ref.get('href') for ref in asx.findall('./Entry/ref')] - - def _extract_connection(self, connection, programme_id): - formats = [] - protocol = connection.get('protocol') - supplier = connection.get('supplier') - if protocol == 'http': - href = connection.get('href') - # ASX playlist - if supplier == 'asx': - for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): - formats.append({ - 'url': ref, - 'format_id': 'ref%s_%s' % (i, supplier), - }) - # Direct link - else: - formats.append({ - 'url': href, - 'format_id': supplier, - }) - elif protocol == 'rtmp': - application = connection.get('application', 'ondemand') - auth_string = connection.get('authString') - identifier = connection.get('identifier') - server = connection.get('server') - formats.append({ - 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), - 'play_path': identifier, - 'app': '%s?%s' % (application, auth_string), - 'page_url': 'http://www.bbc.co.uk', - 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', - 'rtmp_live': False, - 'ext': 'flv', - 'format_id': supplier, - }) - return formats - - def _extract_items(self, playlist): - return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') - - def _extract_medias(self, media_selection): - error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') - if error is not None: - raise ExtractorError( - '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True) - return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') - - def _extract_connections(self, media): - return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection') - - def _extract_video(self, media, programme_id): - formats = [] - vbr = int(media.get('bitrate')) - vcodec = media.get('encoding') - service = media.get('service') - width = int(media.get('width')) - height = int(media.get('height')) - file_size = int(media.get('media_file_size')) - for connection in self._extract_connections(media): - conn_formats = self._extract_connection(connection, programme_id) - for format in conn_formats: - format.update({ - 'format_id': '%s_%s' % (service, format['format_id']), - 'width': width, - 'height': height, - 'vbr': vbr, - 'vcodec': vcodec, - 'filesize': file_size, - }) - formats.extend(conn_formats) - return formats - - def _extract_audio(self, media, programme_id): - formats = [] - abr = int(media.get('bitrate')) - acodec = media.get('encoding') - service = media.get('service') - for connection in self._extract_connections(media): - conn_formats = self._extract_connection(connection, programme_id) - for format in conn_formats: - format.update({ - 'format_id': '%s_%s' % (service, format['format_id']), - 'abr': abr, - 'acodec': acodec, - }) - formats.extend(conn_formats) - return formats - - def _get_subtitles(self, media, programme_id): - subtitles = {} - for connection in self._extract_connections(media): - captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions') - lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en') - subtitles[lang] = [ - { - 'url': connection.get('href'), - 'ext': 'ttml', - }, - ] - return subtitles - - def _download_media_selector(self, programme_id): - try: - media_selection = self._download_xml( - 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, - programme_id, 'Downloading media selection XML') - except ExtractorError as ee: - if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: - media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8')) - else: - raise - - formats = [] - subtitles = None - - for media in self._extract_medias(media_selection): - kind = media.get('kind') - if kind == 'audio': - formats.extend(self._extract_audio(media, programme_id)) - elif kind == 'video': - formats.extend(self._extract_video(media, programme_id)) - elif kind == 'captions': - subtitles = self.extract_subtitles(media, programme_id) - - return formats, subtitles - - def _download_playlist(self, playlist_id): - try: - playlist = self._download_json( - 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id, - playlist_id, 'Downloading playlist JSON') - - version = playlist.get('defaultAvailableVersion') - if version: - smp_config = version['smpConfig'] - title = smp_config['title'] - description = smp_config['summary'] - for item in smp_config['items']: - kind = item['kind'] - if kind != 'programme' and kind != 'radioProgramme': - continue - programme_id = item.get('vpid') - duration = int(item.get('duration')) - formats, subtitles = self._download_media_selector(programme_id) - return programme_id, title, description, duration, formats, subtitles - except ExtractorError as ee: - if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404): - raise - - # fallback to legacy playlist - playlist = self._download_xml( - 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, - playlist_id, 'Downloading legacy playlist XML') - - no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') - if no_items is not None: - reason = no_items.get('reason') - if reason == 'preAvailability': - msg = 'Episode %s is not yet available' % playlist_id - elif reason == 'postAvailability': - msg = 'Episode %s is no longer available' % playlist_id - elif reason == 'noMedia': - msg = 'Episode %s is not currently available' % playlist_id - else: - msg = 'Episode %s is not available: %s' % (playlist_id, reason) - raise ExtractorError(msg, expected=True) - - for item in self._extract_items(playlist): - kind = item.get('kind') - if kind != 'programme' and kind != 'radioProgramme': - continue - title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text - description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text - programme_id = item.get('identifier') - duration = int(item.get('duration')) - formats, subtitles = self._download_media_selector(programme_id) - - return programme_id, title, description, duration, formats, subtitles - - def _real_extract(self, url): - group_id = self._match_id(url) - - webpage = self._download_webpage(url, group_id, 'Downloading video page') - - programme_id = None - - tviplayer = self._search_regex( - r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', - webpage, 'player', default=None) - - if tviplayer: - player = self._parse_json(tviplayer, group_id).get('player', {}) - duration = int_or_none(player.get('duration')) - programme_id = player.get('vpid') - - if not programme_id: - programme_id = self._search_regex( - r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None) - - if programme_id: - formats, subtitles = self._download_media_selector(programme_id) - title = self._og_search_title(webpage) - description = self._search_regex( - r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', - webpage, 'description', fatal=False) - else: - programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) - - self._sort_formats(formats) - - return { - 'id': programme_id, - 'title': title, - 'description': description, - 'thumbnail': self._og_search_thumbnail(webpage, default=None), - 'duration': duration, - 'formats': formats, - 'subtitles': subtitles, - } diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py index b38057f2f..c8d921daf 100644 --- a/youtube_dl/extractor/beeg.py +++ b/youtube_dl/extractor/beeg.py @@ -1,65 +1,105 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..compat import ( + compat_chr, + compat_ord, + compat_urllib_parse_unquote, +) +from ..utils import ( + int_or_none, + parse_iso8601, +) class BeegIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)' _TEST = { 'url': 'http://beeg.com/5416503', - 'md5': '1bff67111adb785c51d1b42959ec10e5', + 'md5': '46c384def73b33dbc581262e5ee67cef', 'info_dict': { 'id': '5416503', 'ext': 'mp4', 'title': 'Sultry Striptease', - 'description': 'md5:6db3c6177972822aaba18652ff59c773', - 'categories': list, # NSFW - 'thumbnail': 're:https?://.*\.jpg$', + 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2', + 'timestamp': 1391813355, + 'upload_date': '20140207', + 'duration': 383, + 'tags': list, 'age_limit': 18, } } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + video = self._download_json( + 'http://beeg.com/api/v5/video/%s' % video_id, video_id) - webpage = self._download_webpage(url, video_id) + def split(o, e): + def cut(s, x): + n.append(s[:x]) + return s[x:] + n = [] + r = len(o) % e + if r > 0: + o = cut(o, r) + while len(o) > e: + o = cut(o, e) + n.append(o) + return n - quality_arr = self._search_regex( - r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats') + def decrypt_key(key): + # Reverse engineered from http://static.beeg.com/cpl/1105.js + a = '5ShMcIQlssOd7zChAIOlmeTZDaUxULbJRnywYaiB' + e = compat_urllib_parse_unquote(key) + o = ''.join([ + compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21) + for n in range(len(e))]) + return ''.join(split(o, 3)[::-1]) - formats = [{ - 'url': fmt[1], - 'format_id': fmt[0], - 'height': int(fmt[0][:-1]), - } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)] + def decrypt_url(encrypted_url): + encrypted_url = self._proto_relative_url( + encrypted_url.replace('{DATA_MARKERS}', ''), 'http:') + key = self._search_regex( + r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None) + if not key: + return encrypted_url + return encrypted_url.replace(key, decrypt_key(key)) + formats = [] + for format_id, video_url in video.items(): + if not video_url: + continue + height = self._search_regex( + r'^(\d+)[pP]$', format_id, 'height', default=None) + if not height: + continue + formats.append({ + 'url': decrypt_url(video_url), + 'format_id': format_id, + 'height': int(height), + }) self._sort_formats(formats) - title = self._html_search_regex( - r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') + title = video['title'] + video_id = video.get('id') or video_id + display_id = video.get('code') + description = video.get('desc') - description = self._html_search_regex( - r'<meta name="description" content="([^"]*)"', - webpage, 'description', fatal=False) - thumbnail = self._html_search_regex( - r'\'previewer.url\'\s*:\s*"([^"]*)"', - webpage, 'thumbnail', fatal=False) + timestamp = parse_iso8601(video.get('date'), ' ') + duration = int_or_none(video.get('duration')) - categories_str = self._html_search_regex( - r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False) - categories = ( - None if categories_str is None - else categories_str.split(',')) + tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None return { 'id': video_id, + 'display_id': display_id, 'title': title, 'description': description, - 'thumbnail': thumbnail, - 'categories': categories, + 'timestamp': timestamp, + 'duration': duration, + 'tags': tags, 'formats': formats, 'age_limit': 18, } diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py index 4d8cce1ef..1a0184861 100644 --- a/youtube_dl/extractor/bild.py +++ b/youtube_dl/extractor/bild.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, - fix_xml_ampersands, + unescapeHTML, ) @@ -17,26 +17,24 @@ class BildIE(InfoExtractor): 'info_dict': { 'id': '38184146', 'ext': 'mp4', - 'title': 'BILD hat sie getestet', + 'title': 'Das können die neuen iPads', + 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 196, - 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ', } } def _real_extract(self, url): video_id = self._match_id(url) - xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml" - doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands) - - duration = int_or_none(doc.attrib.get('duration'), scale=1000) + video_data = self._download_json( + url.split('.bild.html')[0] + ',view=json.bild.html', video_id) return { 'id': video_id, - 'title': doc.attrib['ueberschrift'], - 'description': doc.attrib.get('text'), - 'url': doc.attrib['src'], - 'thumbnail': doc.attrib.get('img'), - 'duration': duration, + 'title': unescapeHTML(video_data['title']).strip(), + 'description': unescapeHTML(video_data.get('description')), + 'url': video_data['clipList'][0]['srces'][0]['src'], + 'thumbnail': video_data.get('poster'), + 'duration': int_or_none(video_data.get('durationSec')), } diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py index ecc17ebeb..59beb11bc 100644 --- a/youtube_dl/extractor/bilibili.py +++ b/youtube_dl/extractor/bilibili.py @@ -2,141 +2,109 @@ from __future__ import unicode_literals import re -import itertools -import json -import xml.etree.ElementTree as ET from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( int_or_none, - unified_strdate, + unescapeHTML, ExtractorError, + xpath_text, ) class BiliBiliIE(InfoExtractor): - _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/' + _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?' _TESTS = [{ 'url': 'http://www.bilibili.tv/video/av1074402/', 'md5': '2c301e4dab317596e837c3e7633e7d86', 'info_dict': { - 'id': '1074402_part1', + 'id': '1554319', 'ext': 'flv', 'title': '【金坷垃】金泡沫', - 'duration': 308, + 'duration': 308313, 'upload_date': '20140420', 'thumbnail': 're:^https?://.+\.jpg', + 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', + 'timestamp': 1397983878, + 'uploader': '菊子桑', }, }, { 'url': 'http://www.bilibili.com/video/av1041170/', 'info_dict': { 'id': '1041170', 'title': '【BD1080P】刀语【诸神&异域】', + 'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~', + 'uploader': '枫叶逝去', + 'timestamp': 1396501299, }, 'playlist_count': 9, }] def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - if '(此视频不存在或被删除)' in webpage: - raise ExtractorError( - 'The video does not exist or was deleted', expected=True) - - if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage: - raise ExtractorError( - 'The video is not available in your region due to copyright reasons', - expected=True) - - video_code = self._search_regex( - r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code') - - title = self._html_search_meta( - 'media:title', video_code, 'title', fatal=True) - duration_str = self._html_search_meta( - 'duration', video_code, 'duration') - if duration_str is None: - duration = None - else: - duration_mobj = re.match( - r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$', - duration_str) - duration = ( - int_or_none(duration_mobj.group('hours'), default=0) * 3600 + - int(duration_mobj.group('minutes')) * 60 + - int(duration_mobj.group('seconds'))) - upload_date = unified_strdate(self._html_search_meta( - 'uploadDate', video_code, fatal=False)) - thumbnail = self._html_search_meta( - 'thumbnailUrl', video_code, 'thumbnail', fatal=False) - - cid = self._search_regex(r'cid=(\d+)', webpage, 'cid') - - entries = [] - - lq_page = self._download_webpage( - 'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid, - video_id, - note='Downloading LQ video info' + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + page_num = mobj.group('page_num') or '1' + + view_data = self._download_json( + 'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num), + video_id) + if 'error' in view_data: + raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True) + + cid = view_data['cid'] + title = unescapeHTML(view_data['title']) + + doc = self._download_xml( + 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid, + cid, + 'Downloading page %s/%s' % (page_num, view_data['pages']) ) - try: - err_info = json.loads(lq_page) - raise ExtractorError( - 'BiliBili said: ' + err_info['error_text'], expected=True) - except ValueError: - pass - lq_doc = ET.fromstring(lq_page) - lq_durls = lq_doc.findall('./durl') + if xpath_text(doc, './result') == 'error': + raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True) - hq_doc = self._download_xml( - 'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid, - video_id, - note='Downloading HQ video info', - fatal=False, - ) - if hq_doc is not False: - hq_durls = hq_doc.findall('./durl') - assert len(lq_durls) == len(hq_durls) - else: - hq_durls = itertools.repeat(None) + entries = [] - i = 1 - for lq_durl, hq_durl in zip(lq_durls, hq_durls): + for durl in doc.findall('./durl'): + size = xpath_text(durl, ['./filesize', './size']) formats = [{ - 'format_id': 'lq', - 'quality': 1, - 'url': lq_durl.find('./url').text, - 'filesize': int_or_none( - lq_durl.find('./size'), get_attr='text'), + 'url': durl.find('./url').text, + 'filesize': int_or_none(size), + 'ext': 'flv', }] - if hq_durl is not None: - formats.append({ - 'format_id': 'hq', - 'quality': 2, - 'ext': 'flv', - 'url': hq_durl.find('./url').text, - 'filesize': int_or_none( - hq_durl.find('./size'), get_attr='text'), - }) - self._sort_formats(formats) + backup_urls = durl.find('./backup_url') + if backup_urls is not None: + for backup_url in backup_urls.findall('./url'): + formats.append({'url': backup_url.text}) + formats.reverse() entries.append({ - 'id': '%s_part%d' % (video_id, i), + 'id': '%s_part%s' % (cid, xpath_text(durl, './order')), 'title': title, + 'duration': int_or_none(xpath_text(durl, './length'), 1000), 'formats': formats, - 'duration': duration, - 'upload_date': upload_date, - 'thumbnail': thumbnail, }) - i += 1 - - return { - '_type': 'multi_video', - 'entries': entries, - 'id': video_id, - 'title': title + info = { + 'id': compat_str(cid), + 'title': title, + 'description': view_data.get('description'), + 'thumbnail': view_data.get('pic'), + 'uploader': view_data.get('author'), + 'timestamp': int_or_none(view_data.get('created')), + 'view_count': int_or_none(view_data.get('play')), + 'duration': int_or_none(xpath_text(doc, './timelength')), } + + if len(entries) == 1: + entries[0].update(info) + return entries[0] + else: + info.update({ + '_type': 'multi_video', + 'id': video_id, + 'entries': entries, + }) + return info diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py index c3296283d..35375f7b1 100644 --- a/youtube_dl/extractor/bliptv.py +++ b/youtube_dl/extractor/bliptv.py @@ -4,14 +4,12 @@ import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( clean_html, int_or_none, parse_iso8601, + sanitized_Request, unescapeHTML, xpath_text, xpath_with_ns, @@ -219,7 +217,7 @@ class BlipTVIE(InfoExtractor): for lang, url in subtitles_urls.items(): # For some weird reason, blip.tv serves a video instead of subtitles # when we request with a common UA - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('User-Agent', 'youtube-dl') subtitles[lang] = [{ # The extension is 'srt' but it's actually an 'ass' file diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py index 0dca29b71..ebeef8f2a 100644 --- a/youtube_dl/extractor/bloomberg.py +++ b/youtube_dl/extractor/bloomberg.py @@ -6,9 +6,9 @@ from .common import InfoExtractor class BloombergIE(InfoExtractor): - _VALID_URL = r'https?://www\.bloomberg\.com/news/videos/[^/]+/(?P<id>[^/?#]+)' + _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2', # The md5 checksum changes 'info_dict': { @@ -17,22 +17,39 @@ class BloombergIE(InfoExtractor): 'title': 'Shah\'s Presentation on Foreign-Exchange Strategies', 'description': 'md5:a8ba0302912d03d246979735c17d2761', }, - } + }, { + 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets', + 'only_matching': True, + }, { + 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump', + 'only_matching': True, + }] def _real_extract(self, url): name = self._match_id(url) webpage = self._download_webpage(url, name) - video_id = self._search_regex(r'"bmmrId":"(.+?)"', webpage, 'id') + video_id = self._search_regex( + r'["\']bmmrId["\']\s*:\s*(["\'])(?P<url>.+?)\1', + webpage, 'id', group='url') title = re.sub(': Video$', '', self._og_search_title(webpage)) embed_info = self._download_json( 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id) formats = [] for stream in embed_info['streams']: - if stream["muxing_format"] == "TS": - formats.extend(self._extract_m3u8_formats(stream['url'], video_id)) + stream_url = stream.get('url') + if not stream_url: + continue + if stream['muxing_format'] == 'TS': + m3u8_formats = self._extract_m3u8_formats( + stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) else: - formats.extend(self._extract_f4m_formats(stream['url'], video_id)) + f4m_formats = self._extract_f4m_formats( + stream_url, video_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py index 809287d14..aa08051b1 100644 --- a/youtube_dl/extractor/breakcom.py +++ b/youtube_dl/extractor/breakcom.py @@ -18,6 +18,7 @@ class BreakIE(InfoExtractor): 'id': '2468056', 'ext': 'mp4', 'title': 'When Girls Act Like D-Bags', + 'age_limit': 13, } }, { 'url': 'http://www.break.com/video/ugc/baby-flex-2773063', diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 4721c2293..03a4f446e 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -3,15 +3,14 @@ from __future__ import unicode_literals import re import json -import xml.etree.ElementTree from .common import InfoExtractor from ..compat import ( + compat_etree_fromstring, compat_parse_qs, compat_str, compat_urllib_parse, compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) @@ -20,12 +19,18 @@ from ..utils import ( ExtractorError, find_xpath_attr, fix_xml_ampersands, + float_or_none, + js_to_json, + int_or_none, + parse_iso8601, + sanitized_Request, unescapeHTML, unsmuggle_url, ) -class BrightcoveIE(InfoExtractor): +class BrightcoveLegacyIE(InfoExtractor): + IE_NAME = 'brightcove:legacy' _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)' _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s' @@ -119,7 +124,7 @@ class BrightcoveIE(InfoExtractor): object_str = fix_xml_ampersands(object_str) try: - object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8')) + object_doc = compat_etree_fromstring(object_str.encode('utf-8')) except compat_xml_parse_error: return @@ -245,7 +250,7 @@ class BrightcoveIE(InfoExtractor): def _get_video_info(self, video_id, query_str, query, referer=None): request_url = self._FEDERATED_URL_TEMPLATE % query_str - req = compat_urllib_request.Request(request_url) + req = sanitized_Request(request_url) linkBase = query.get('linkBaseURL') if linkBase is not None: referer = linkBase[0] @@ -346,3 +351,183 @@ class BrightcoveIE(InfoExtractor): if 'url' not in info and not info.get('formats'): raise ExtractorError('Unable to extract video url for %s' % info['id']) return info + + +class BrightcoveNewIE(InfoExtractor): + IE_NAME = 'brightcove:new' + _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)' + _TESTS = [{ + 'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001', + 'md5': 'c8100925723840d4b0d243f7025703be', + 'info_dict': { + 'id': '4463358922001', + 'ext': 'mp4', + 'title': 'Meet the man behind Popcorn Time', + 'description': 'md5:eac376a4fe366edc70279bfb681aea16', + 'duration': 165.768, + 'timestamp': 1441391203, + 'upload_date': '20150904', + 'uploader_id': '929656772001', + 'formats': 'mincount:22', + }, + }, { + # with rtmp streams + 'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001', + 'info_dict': { + 'id': '4279049078001', + 'ext': 'mp4', + 'title': 'Titansgrave: Chapter 0', + 'description': 'Titansgrave: Chapter 0', + 'duration': 1242.058, + 'timestamp': 1433556729, + 'upload_date': '20150606', + 'uploader_id': '4036320279001', + 'formats': 'mincount:41', + }, + 'params': { + 'skip_download': True, + } + }, { + # ref: prefixed video id + 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442', + 'only_matching': True, + }] + + @staticmethod + def _extract_url(webpage): + urls = BrightcoveNewIE._extract_urls(webpage) + return urls[0] if urls else None + + @staticmethod + def _extract_urls(webpage): + # Reference: + # 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe + # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript + # 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html + # 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player + + entries = [] + + # Look for iframe embeds [1] + for _, url in re.findall( + r'<iframe[^>]+src=(["\'])((?:https?:)//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage): + entries.append(url) + + # Look for embed_in_page embeds [2] + for video_id, account_id, player_id, embed in re.findall( + # According to examples from [3] it's unclear whether video id + # may be optional and what to do when it is + # According to [4] data-video-id may be prefixed with ref: + r'''(?sx) + <video[^>]+ + data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*? + </video>.*? + <script[^>]+ + src=["\'](?:https?:)?//players\.brightcove\.net/ + (\d+)/([\da-f-]+)_([^/]+)/index\.min\.js + ''', webpage): + entries.append( + 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' + % (account_id, player_id, embed, video_id)) + + return entries + + def _real_extract(self, url): + account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups() + + webpage = self._download_webpage( + 'http://players.brightcove.net/%s/%s_%s/index.min.js' + % (account_id, player_id, embed), video_id) + + policy_key = None + + catalog = self._search_regex( + r'catalog\(({.+?})\);', webpage, 'catalog', default=None) + if catalog: + catalog = self._parse_json( + js_to_json(catalog), video_id, fatal=False) + if catalog: + policy_key = catalog.get('policyKey') + + if not policy_key: + policy_key = self._search_regex( + r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1', + webpage, 'policy key', group='pk') + + req = sanitized_Request( + 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' + % (account_id, video_id), + headers={'Accept': 'application/json;pk=%s' % policy_key}) + json_data = self._download_json(req, video_id) + + title = json_data['name'] + + formats = [] + for source in json_data.get('sources', []): + source_type = source.get('type') + src = source.get('src') + if source_type == 'application/x-mpegURL': + if not src: + continue + m3u8_formats = self._extract_m3u8_formats( + src, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + else: + streaming_src = source.get('streaming_src') + stream_name, app_name = source.get('stream_name'), source.get('app_name') + if not src and not streaming_src and (not stream_name or not app_name): + continue + tbr = float_or_none(source.get('avg_bitrate'), 1000) + height = int_or_none(source.get('height')) + f = { + 'tbr': tbr, + 'width': int_or_none(source.get('width')), + 'height': height, + 'filesize': int_or_none(source.get('size')), + 'container': source.get('container'), + 'vcodec': source.get('codec'), + 'ext': source.get('container').lower(), + } + + def build_format_id(kind): + format_id = kind + if tbr: + format_id += '-%dk' % int(tbr) + if height: + format_id += '-%dp' % height + return format_id + + if src or streaming_src: + f.update({ + 'url': src or streaming_src, + 'format_id': build_format_id('http' if src else 'http-streaming'), + 'preference': 2 if src else 1, + }) + else: + f.update({ + 'url': app_name, + 'play_path': stream_name, + 'format_id': build_format_id('rtmp'), + }) + formats.append(f) + self._sort_formats(formats) + + description = json_data.get('description') + thumbnail = json_data.get('thumbnail') + timestamp = parse_iso8601(json_data.get('published_at')) + duration = float_or_none(json_data.get('duration'), 1000) + tags = json_data.get('tags', []) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'uploader_id': account_id, + 'formats': formats, + 'tags': tags, + } diff --git a/youtube_dl/extractor/byutv.py b/youtube_dl/extractor/byutv.py index 3b2de517e..dda98059e 100644 --- a/youtube_dl/extractor/byutv.py +++ b/youtube_dl/extractor/byutv.py @@ -14,9 +14,10 @@ class BYUtvIE(InfoExtractor): 'info_dict': { 'id': 'studio-c-season-5-episode-5', 'ext': 'mp4', - 'description': 'md5:5438d33774b6bdc662f9485a340401cc', + 'description': 'md5:e07269172baff037f8e8bf9956bc9747', 'title': 'Season 5 Episode 5', - 'thumbnail': 're:^https?://.*\.jpg$' + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 1486.486, }, 'params': { 'skip_download': True, diff --git a/youtube_dl/extractor/canal13cl.py b/youtube_dl/extractor/canal13cl.py deleted file mode 100644 index 93241fefe..000000000 --- a/youtube_dl/extractor/canal13cl.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor - - -class Canal13clIE(InfoExtractor): - _VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)' - _TEST = { - 'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', - 'md5': '4cb1fa38adcad8fea88487a078831755', - 'info_dict': { - 'id': '1403022125', - 'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', - 'ext': 'mp4', - 'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda', - 'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.', - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - display_id = mobj.group('id') - - webpage = self._download_webpage(url, display_id) - - title = self._html_search_meta( - 'twitter:title', webpage, 'title', fatal=True) - description = self._html_search_meta( - 'twitter:description', webpage, 'description') - url = self._html_search_regex( - r'articuloVideo = \"(.*?)\"', webpage, 'url') - real_id = self._search_regex( - r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id) - thumbnail = self._html_search_regex( - r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail') - - return { - 'id': real_id, - 'display_id': display_id, - 'url': url, - 'title': title, - 'description': description, - 'ext': 'mp4', - 'thumbnail': thumbnail, - } diff --git a/youtube_dl/extractor/canalc2.py b/youtube_dl/extractor/canalc2.py index c4fefefe4..f6a1ff381 100644 --- a/youtube_dl/extractor/canalc2.py +++ b/youtube_dl/extractor/canalc2.py @@ -4,38 +4,53 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..utils import parse_duration class Canalc2IE(InfoExtractor): IE_NAME = 'canalc2.tv' - _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)' + _VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)' _TEST = { - 'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui', + 'url': 'http://www.canalc2.tv/video/12163', 'md5': '060158428b650f896c542dfbb3d6487f', 'info_dict': { 'id': '12163', - 'ext': 'mp4', - 'title': 'Terrasses du Numérique' + 'ext': 'flv', + 'title': 'Terrasses du Numérique', + 'duration': 122, + }, + 'params': { + 'skip_download': True, # Requires rtmpdump } } def _real_extract(self, url): - video_id = re.match(self._VALID_URL, url).group('id') - # We need to set the voir field for getting the file name - url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id + video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - file_name = self._search_regex( - r"so\.addVariable\('file','(.*?)'\);", - webpage, 'file name') - video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name + video_url = self._search_regex( + r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2', + webpage, 'video_url', group='file') + formats = [{'url': video_url}] + if video_url.startswith('rtmp://'): + rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url) + formats[0].update({ + 'url': rtmp.group('url'), + 'ext': 'flv', + 'app': rtmp.group('app'), + 'play_path': rtmp.group('play_path'), + 'page_url': url, + }) title = self._html_search_regex( - r'class="evenement8">(.*?)</a>', webpage, 'title') + r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title') + duration = parse_duration(self._search_regex( + r'id=["\']video_duree["\'][^>]*>([^<]+)', + webpage, 'duration', fatal=False)) return { 'id': video_id, - 'ext': 'mp4', - 'url': video_url, 'title': title, + 'duration': duration, + 'formats': formats, } diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py index 699b4f7d0..004372f8d 100644 --- a/youtube_dl/extractor/canalplus.py +++ b/youtube_dl/extractor/canalplus.py @@ -78,7 +78,8 @@ class CanalplusIE(InfoExtractor): if video_id is None: webpage = self._download_webpage(url, display_id) video_id = self._search_regex( - r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id') + [r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)', r'id=["\']canal_video_player(?P<id>\d+)'], + webpage, 'video id', group='id') info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id) doc = self._download_xml(info_url, video_id, 'Downloading video XML') @@ -106,15 +107,11 @@ class CanalplusIE(InfoExtractor): continue format_id = fmt.tag if format_id == 'HLS': - hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv') - for fmt in hls_formats: - fmt['preference'] = preference(format_id) - formats.extend(hls_formats) + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', preference=preference(format_id))) elif format_id == 'HDS': - hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id) - for fmt in hds_formats: - fmt['preference'] = preference(format_id) - formats.extend(hds_formats) + formats.extend(self._extract_f4m_formats( + format_url + '?hdcore=2.11.3', video_id, preference=preference(format_id))) else: formats.append({ 'url': format_url, diff --git a/youtube_dl/extractor/cbs.py b/youtube_dl/extractor/cbs.py index 75fffb156..40d07ab18 100644 --- a/youtube_dl/extractor/cbs.py +++ b/youtube_dl/extractor/cbs.py @@ -1,6 +1,10 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + sanitized_Request, + smuggle_url, +) class CBSIE(InfoExtractor): @@ -46,13 +50,19 @@ class CBSIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) + request = sanitized_Request(url) + # Android UA is served with higher quality (720p) streams (see + # https://github.com/rg3/youtube-dl/issues/7490) + request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)') + webpage = self._download_webpage(request, display_id) real_id = self._search_regex( [r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"], webpage, 'real video ID') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', - 'url': 'theplatform:%s' % real_id, + 'url': smuggle_url( + 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true&manifest=m3u' % real_id, + {'force_smil_url': True}), 'display_id': display_id, } diff --git a/youtube_dl/extractor/cbsnews.py b/youtube_dl/extractor/cbsnews.py index 52e61d85b..f9a64a0a2 100644 --- a/youtube_dl/extractor/cbsnews.py +++ b/youtube_dl/extractor/cbsnews.py @@ -67,9 +67,12 @@ class CBSNewsIE(InfoExtractor): 'format_id': format_id, } if uri.startswith('rtmp'): + play_path = re.sub( + r'{slistFilePath}', '', + uri.split('<break>')[-1].split('{break}')[-1]) fmt.update({ 'app': 'ondemand?auth=cbs', - 'play_path': 'mp4:' + uri.split('<break>')[-1], + 'play_path': 'mp4:' + play_path, 'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf', 'page_url': 'http://www.cbsnews.com', 'ext': 'flv', diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index dda583680..6f7b2a70d 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -5,7 +5,6 @@ import re from .common import InfoExtractor from ..compat import ( - compat_urllib_request, compat_urllib_parse, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, @@ -13,59 +12,86 @@ from ..compat import ( from ..utils import ( ExtractorError, float_or_none, + sanitized_Request, ) class CeskaTelevizeIE(InfoExtractor): - _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)' - - _TESTS = [ - { - 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220', + _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(?:[^/]+/)*(?P<id>[^/#?]+)/*(?:[#?].*)?$' + _TESTS = [{ + 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220', + 'info_dict': { + 'id': '61924494876951776', + 'ext': 'mp4', + 'title': 'Hyde Park Civilizace', + 'description': 'md5:fe93f6eda372d150759d11644ebbfb4a', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 3350, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, { + 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina', + 'info_dict': { + 'id': '61924494876844374', + 'ext': 'mp4', + 'title': 'První republika: Zpěvačka z Dupárny Bobina', + 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 88.4, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, { + # video with 18+ caution trailer + 'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/', + 'info_dict': { + 'id': '215562210900007-bogotart', + 'title': 'Queer: Bogotart', + 'description': 'Alternativní průvodce současným queer světem', + }, + 'playlist': [{ 'info_dict': { - 'id': '214411058091220', + 'id': '61924494876844842', 'ext': 'mp4', - 'title': 'Hyde Park Civilizace', - 'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře', - 'thumbnail': 're:^https?://.*\.jpg', - 'duration': 3350, - }, - 'params': { - # m3u8 download - 'skip_download': True, + 'title': 'Queer: Bogotart (Varování 18+)', + 'duration': 10.2, }, - }, - { - 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina', + }, { 'info_dict': { - 'id': '14716', + 'id': '61924494877068022', 'ext': 'mp4', - 'title': 'První republika: Zpěvačka z Dupárny Bobina', - 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.', + 'title': 'Queer: Bogotart (Queer)', 'thumbnail': 're:^https?://.*\.jpg', - 'duration': 88.4, - }, - 'params': { - # m3u8 download - 'skip_download': True, + 'duration': 1558.3, }, + }], + 'params': { + # m3u8 download + 'skip_download': True, }, - ] + }] def _real_extract(self, url): url = url.replace('/porady/', '/ivysilani/').replace('/video/', '') mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + playlist_id = mobj.group('id') - webpage = self._download_webpage(url, video_id) + webpage = self._download_webpage(url, playlist_id) NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.' if '%s</p>' % NOT_AVAILABLE_STRING in webpage: raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) - typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type') - episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id') + typ = self._html_search_regex( + r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type') + episode_id = self._html_search_regex( + r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id') data = { 'playlist[0][type]': typ, @@ -74,7 +100,7 @@ class CeskaTelevizeIE(InfoExtractor): 'requestSource': 'iVysilani', } - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', data=compat_urllib_parse.urlencode(data)) @@ -83,42 +109,52 @@ class CeskaTelevizeIE(InfoExtractor): req.add_header('X-Requested-With', 'XMLHttpRequest') req.add_header('Referer', url) - playlistpage = self._download_json(req, video_id) + playlistpage = self._download_json(req, playlist_id) playlist_url = playlistpage['url'] if playlist_url == 'error_region': raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) - req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url)) + req = sanitized_Request(compat_urllib_parse_unquote(playlist_url)) req.add_header('Referer', url) - playlist = self._download_json(req, video_id) - - item = playlist['playlist'][0] - formats = [] - for format_id, stream_url in item['streamUrls'].items(): - formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4')) - self._sort_formats(formats) - - title = self._og_search_title(webpage) - description = self._og_search_description(webpage) - duration = float_or_none(item.get('duration')) - thumbnail = item.get('previewImageUrl') - - subtitles = {} - subs = item.get('subtitles') - if subs: - subtitles = self.extract_subtitles(episode_id, subs) - - return { - 'id': episode_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'duration': duration, - 'formats': formats, - 'subtitles': subtitles, - } + playlist_title = self._og_search_title(webpage) + playlist_description = self._og_search_description(webpage) + + playlist = self._download_json(req, playlist_id)['playlist'] + playlist_len = len(playlist) + + entries = [] + for item in playlist: + formats = [] + for format_id, stream_url in item['streamUrls'].items(): + formats.extend(self._extract_m3u8_formats( + stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native')) + self._sort_formats(formats) + + item_id = item.get('id') or item['assetId'] + title = item['title'] + + duration = float_or_none(item.get('duration')) + thumbnail = item.get('previewImageUrl') + + subtitles = {} + if item.get('type') == 'VOD': + subs = item.get('subtitles') + if subs: + subtitles = self.extract_subtitles(episode_id, subs) + + entries.append({ + 'id': item_id, + 'title': playlist_title if playlist_len == 1 else '%s (%s)' % (playlist_title, title), + 'description': playlist_description if playlist_len == 1 else None, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + }) + + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) def _get_subtitles(self, episode_id, subs): original_subtitles = self._download_webpage( diff --git a/youtube_dl/extractor/channel9.py b/youtube_dl/extractor/channel9.py index 3dfc24f5b..c74553dcf 100644 --- a/youtube_dl/extractor/channel9.py +++ b/youtube_dl/extractor/channel9.py @@ -3,7 +3,11 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + parse_filesize, + qualities, +) class Channel9IE(InfoExtractor): @@ -28,7 +32,7 @@ class Channel9IE(InfoExtractor): 'title': 'Developer Kick-Off Session: Stuff We Love', 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f', 'duration': 4576, - 'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg', + 'thumbnail': 're:http://.*\.jpg', 'session_code': 'KOS002', 'session_day': 'Day 1', 'session_room': 'Arena 1A', @@ -44,31 +48,29 @@ class Channel9IE(InfoExtractor): 'title': 'Self-service BI with Power BI - nuclear testing', 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', 'duration': 1540, - 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', + 'thumbnail': 're:http://.*\.jpg', 'authors': ['Mike Wilmot'], }, + }, + { + # low quality mp4 is best + 'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library', + 'info_dict': { + 'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library', + 'ext': 'mp4', + 'title': 'Ranges for the Standard Library', + 'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d', + 'duration': 5646, + 'thumbnail': 're:http://.*\.jpg', + }, + 'params': { + 'skip_download': True, + }, } ] _RSS_URL = 'http://channel9.msdn.com/%s/RSS' - # Sorted by quality - _known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4'] - - def _restore_bytes(self, formatted_size): - if not formatted_size: - return 0 - m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size) - if not m: - return 0 - units = m.group('units') - try: - exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper()) - except ValueError: - return 0 - size = float(m.group('size')) - return int(size * (1024 ** exponent)) - def _formats_from_html(self, html): FORMAT_REGEX = r''' (?x) @@ -78,16 +80,20 @@ class Channel9IE(InfoExtractor): <h3>File\s+size</h3>\s*(?P<filesize>.*?)\s* </div>)? # File size part may be missing ''' - # Extract known formats + quality = qualities(( + 'MP3', 'MP4', + 'Low Quality WMV', 'Low Quality MP4', + 'Mid Quality WMV', 'Mid Quality MP4', + 'High Quality WMV', 'High Quality MP4')) formats = [{ 'url': x.group('url'), 'format_id': x.group('quality'), 'format_note': x.group('note'), 'format': '%s (%s)' % (x.group('quality'), x.group('note')), - 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate - 'preference': self._known_formats.index(x.group('quality')), + 'filesize_approx': parse_filesize(x.group('filesize')), + 'quality': quality(x.group('quality')), 'vcodec': 'none' if x.group('note') == 'Audio only' else None, - } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] + } for x in list(re.finditer(FORMAT_REGEX, html))] self._sort_formats(formats) @@ -158,7 +164,7 @@ class Channel9IE(InfoExtractor): def _extract_session_day(self, html): m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html) - return m.group('day') if m is not None else None + return m.group('day').strip() if m is not None else None def _extract_session_room(self, html): m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html) @@ -224,12 +230,12 @@ class Channel9IE(InfoExtractor): if contents is None: return contents - authors = self._extract_authors(html) + if len(contents) > 1: + raise ExtractorError('Got more than one entry') + result = contents[0] + result['authors'] = self._extract_authors(html) - for content in contents: - content['authors'] = authors - - return contents + return result def _extract_session(self, html, content_path): contents = self._extract_content(html, content_path) diff --git a/youtube_dl/extractor/chaturbate.py b/youtube_dl/extractor/chaturbate.py new file mode 100644 index 000000000..0b67ba67d --- /dev/null +++ b/youtube_dl/extractor/chaturbate.py @@ -0,0 +1,50 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class ChaturbateIE(InfoExtractor): + _VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)' + _TESTS = [{ + 'url': 'https://www.chaturbate.com/siswet19/', + 'info_dict': { + 'id': 'siswet19', + 'ext': 'mp4', + 'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'age_limit': 18, + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'https://en.chaturbate.com/siswet19/', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + m3u8_url = self._search_regex( + r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage, + 'playlist', default=None, group='url') + + if not m3u8_url: + error = self._search_regex( + r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>', + webpage, 'error', group='error') + raise ExtractorError(error, expected=True) + + formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') + + return { + 'id': video_id, + 'title': self._live_title(video_id), + 'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id, + 'age_limit': self._rta_search(webpage), + 'is_live': True, + 'formats': formats, + } diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index c949a4814..fd1770dac 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -6,6 +6,7 @@ import re from .common import InfoExtractor from ..utils import ExtractorError from .bliptv import BlipTVIE +from .screenwavemedia import ScreenwaveMediaIE class CinemassacreIE(InfoExtractor): @@ -83,10 +84,10 @@ class CinemassacreIE(InfoExtractor): playerdata_url = self._search_regex( [ - r'src="(http://(?:player2\.screenwavemedia\.com|player\.screenwavemedia\.com/play)/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"', - r'<iframe[^>]+src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"', + ScreenwaveMediaIE.EMBED_PATTERN, + r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"', ], - webpage, 'player data URL', default=None) + webpage, 'player data URL', default=None, group='url') if not playerdata_url: playerdata_url = BlipTVIE._extract_url(webpage) if not playerdata_url: diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py index a5c3cb7c6..3a47f6fa4 100644 --- a/youtube_dl/extractor/clipfish.py +++ b/youtube_dl/extractor/clipfish.py @@ -1,53 +1,62 @@ from __future__ import unicode_literals -import re -import time -import xml.etree.ElementTree - from .common import InfoExtractor from ..utils import ( - ExtractorError, - parse_duration, + int_or_none, + unified_strdate, ) class ClipfishIE(InfoExtractor): - IE_NAME = 'clipfish' - - _VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/' + _VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/', - 'md5': '2521cd644e862936cf2e698206e47385', + 'md5': '79bc922f3e8a9097b3d68a93780fd475', 'info_dict': { 'id': '3966754', 'ext': 'mp4', 'title': 'FIFA 14 - E3 2013 Trailer', + 'description': 'Video zu FIFA 14: E3 2013 Trailer', + 'upload_date': '20130611', 'duration': 82, - }, - 'skip': 'Blocked in the US' + 'view_count': int, + } } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group(1) - - info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' % - (video_id, int(time.time()))) - doc = self._download_xml( - info_url, video_id, note='Downloading info page') - title = doc.find('title').text - video_url = doc.find('filename').text - if video_url is None: - xml_bytes = xml.etree.ElementTree.tostring(doc) - raise ExtractorError('Cannot find video URL in document %r' % - xml_bytes) - thumbnail = doc.find('imageurl').text - duration = parse_duration(doc.find('duration').text) + video_id = self._match_id(url) + + video_info = self._download_json( + 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id, + video_id)['items'][0] + + formats = [] + + m3u8_url = video_info.get('media_videourl_hls') + if m3u8_url: + formats.append({ + 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'), + 'ext': 'mp4', + 'format_id': 'hls', + }) + + mp4_url = video_info.get('media_videourl') + if mp4_url: + formats.append({ + 'url': mp4_url, + 'format_id': 'mp4', + 'width': int_or_none(video_info.get('width')), + 'height': int_or_none(video_info.get('height')), + 'tbr': int_or_none(video_info.get('bitrate')), + }) return { 'id': video_id, - 'title': title, - 'url': video_url, - 'thumbnail': thumbnail, - 'duration': duration, + 'title': video_info['title'], + 'description': video_info.get('descr'), + 'formats': formats, + 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'), + 'duration': int_or_none(video_info.get('media_length')), + 'upload_date': unified_strdate(video_info.get('pubDate')), + 'view_count': int_or_none(video_info.get('media_views')) } diff --git a/youtube_dl/extractor/cliphunter.py b/youtube_dl/extractor/cliphunter.py index d46592cc5..2996b6b09 100644 --- a/youtube_dl/extractor/cliphunter.py +++ b/youtube_dl/extractor/cliphunter.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import determine_ext +from ..utils import int_or_none _translation_table = { @@ -42,31 +42,26 @@ class CliphunterIE(InfoExtractor): video_title = self._search_regex( r'mediaTitle = "([^"]+)"', webpage, 'title') - fmts = {} - for fmt in ('mp4', 'flv'): - fmt_list = self._parse_json(self._search_regex( - r'var %sjson\s*=\s*(\[.*?\]);' % fmt, webpage, '%s formats' % fmt), video_id) - for f in fmt_list: - fmts[f['fname']] = _decode(f['sUrl']) - - qualities = self._parse_json(self._search_regex( - r'var player_btns\s*=\s*(.*?);\n', webpage, 'quality info'), video_id) + gexo_files = self._parse_json( + self._search_regex( + r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'), + video_id) formats = [] - for fname, url in fmts.items(): - f = { - 'url': url, - } - if fname in qualities: - qual = qualities[fname] - f.update({ - 'format_id': '%s_%sp' % (determine_ext(url), qual['h']), - 'width': qual['w'], - 'height': qual['h'], - 'tbr': qual['br'], - }) - formats.append(f) - + for format_id, f in gexo_files.items(): + video_url = f.get('url') + if not video_url: + continue + fmt = f.get('fmt') + height = f.get('h') + format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id + formats.append({ + 'url': _decode(video_url), + 'format_id': format_id, + 'width': int_or_none(f.get('w')), + 'height': int_or_none(height), + 'tbr': int_or_none(f.get('br')), + }) self._sort_formats(formats) thumbnail = self._search_regex( diff --git a/youtube_dl/extractor/clubic.py b/youtube_dl/extractor/clubic.py index 14f215c5c..1dfa7c12e 100644 --- a/youtube_dl/extractor/clubic.py +++ b/youtube_dl/extractor/clubic.py @@ -12,9 +12,9 @@ from ..utils import ( class ClubicIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html' + _VALID_URL = r'http://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html' - _TEST = { + _TESTS = [{ 'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html', 'md5': '1592b694ba586036efac1776b0b43cd3', 'info_dict': { @@ -24,7 +24,10 @@ class ClubicIE(InfoExtractor): 'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*', 'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$', } - } + }, { + 'url': 'http://www.clubic.com/video/video-clubic-week-2-0-apple-iphone-6s-et-plus-mais-surtout-le-pencil-469792.html', + 'only_matching': True, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) diff --git a/youtube_dl/extractor/clyp.py b/youtube_dl/extractor/clyp.py new file mode 100644 index 000000000..57e643799 --- /dev/null +++ b/youtube_dl/extractor/clyp.py @@ -0,0 +1,57 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + parse_iso8601, +) + + +class ClypIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)' + _TEST = { + 'url': 'https://clyp.it/ojz2wfah', + 'md5': '1d4961036c41247ecfdcc439c0cddcbb', + 'info_dict': { + 'id': 'ojz2wfah', + 'ext': 'mp3', + 'title': 'Krisson80 - bits wip wip', + 'description': '#Krisson80BitsWipWip #chiptune\n#wip', + 'duration': 263.21, + 'timestamp': 1443515251, + 'upload_date': '20150929', + }, + } + + def _real_extract(self, url): + audio_id = self._match_id(url) + + metadata = self._download_json( + 'https://api.clyp.it/%s' % audio_id, audio_id) + + formats = [] + for secure in ('', 'Secure'): + for ext in ('Ogg', 'Mp3'): + format_id = '%s%s' % (secure, ext) + format_url = metadata.get('%sUrl' % format_id) + if format_url: + formats.append({ + 'url': format_url, + 'format_id': format_id, + 'vcodec': 'none', + }) + self._sort_formats(formats) + + title = metadata['Title'] + description = metadata.get('Description') + duration = float_or_none(metadata.get('Duration')) + timestamp = parse_iso8601(metadata.get('DateCreated')) + + return { + 'id': audio_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + } diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py index e96c59f71..f1311b14f 100644 --- a/youtube_dl/extractor/cmt.py +++ b/youtube_dl/extractor/cmt.py @@ -4,7 +4,7 @@ from .mtv import MTVIE class CMTIE(MTVIE): IE_NAME = 'cmt.com' - _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml' + _VALID_URL = r'https?://www\.cmt\.com/(?:videos|shows)/(?:[^/]+/)*(?P<videoid>\d+)' _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/' _TESTS = [{ @@ -16,4 +16,7 @@ class CMTIE(MTVIE): 'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"', 'description': 'Blame It All On My Roots', }, + }, { + 'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172', + 'only_matching': True, }] diff --git a/youtube_dl/extractor/cnet.py b/youtube_dl/extractor/cnet.py index 5dd69bff7..5c3908f72 100644 --- a/youtube_dl/extractor/cnet.py +++ b/youtube_dl/extractor/cnet.py @@ -1,15 +1,11 @@ # coding: utf-8 from __future__ import unicode_literals -import json +from .theplatform import ThePlatformIE +from ..utils import int_or_none -from .common import InfoExtractor -from ..utils import ( - ExtractorError, -) - -class CNETIE(InfoExtractor): +class CNETIE(ThePlatformIE): _VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/' _TESTS = [{ 'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/', @@ -18,25 +14,20 @@ class CNETIE(InfoExtractor): 'ext': 'flv', 'title': 'Hands-on with Microsoft Windows 8.1 Update', 'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.', - 'thumbnail': 're:^http://.*/flmswindows8.jpg$', 'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861', 'uploader': 'Sarah Mitroff', + 'duration': 70, }, - 'params': { - 'skip_download': 'requires rtmpdump', - } }, { 'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/', 'info_dict': { 'id': '56527b93-d25d-44e3-b738-f989ce2e49ba', 'ext': 'flv', + 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)', 'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole', 'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40', 'uploader': 'Ashley Esqueda', - 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)', - }, - 'params': { - 'skip_download': True, # requires rtmpdump + 'duration': 1482, }, }] @@ -45,26 +36,13 @@ class CNETIE(InfoExtractor): webpage = self._download_webpage(url, display_id) data_json = self._html_search_regex( - r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'", + r"data-cnet-video(?:-uvp)?-options='([^']+)'", webpage, 'data json') - data = json.loads(data_json) - vdata = data['video'] - if not vdata: - vdata = data['videos'][0] - if not vdata: - raise ExtractorError('Cannot find video data') - - mpx_account = data['config']['players']['default']['mpx_account'] - vid = vdata['files'].get('rtmp', vdata['files']['hds']) - tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid) + data = self._parse_json(data_json, display_id) + vdata = data.get('video') or data['videos'][0] video_id = vdata['id'] - title = vdata.get('headline') - if title is None: - title = vdata.get('title') - if title is None: - raise ExtractorError('Cannot find title!') - thumbnail = vdata.get('image', {}).get('path') + title = vdata['title'] author = vdata.get('author') if author: uploader = '%s %s' % (author['firstName'], author['lastName']) @@ -73,13 +51,34 @@ class CNETIE(InfoExtractor): uploader = None uploader_id = None + mpx_account = data['config']['uvpConfig']['default']['mpx_account'] + + metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id) + description = vdata.get('description') or metadata.get('description') + duration = int_or_none(vdata.get('duration')) or metadata.get('duration') + + formats = [] + subtitles = {} + for (fkey, vid) in vdata['files'].items(): + if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']: + continue + release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid) + if fkey == 'hds': + release_url += '&manifest=f4m' + tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey) + formats.extend(tp_formats) + subtitles = self._merge_subtitles(subtitles, tp_subtitles) + self._sort_formats(formats) + return { - '_type': 'url_transparent', - 'url': tp_link, 'id': video_id, 'display_id': display_id, 'title': title, + 'description': description, + 'thumbnail': metadata.get('thumbnail'), + 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, - 'thumbnail': thumbnail, + 'subtitles': subtitles, + 'formats': formats, } diff --git a/youtube_dl/extractor/collegerama.py b/youtube_dl/extractor/collegerama.py index fedd48490..40667a0f1 100644 --- a/youtube_dl/extractor/collegerama.py +++ b/youtube_dl/extractor/collegerama.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( float_or_none, int_or_none, + sanitized_Request, ) @@ -52,7 +52,7 @@ class CollegeRamaIE(InfoExtractor): } } - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions', json.dumps(player_options_request)) request.add_header('Content-Type', 'application/json') diff --git a/youtube_dl/extractor/comcarcoff.py b/youtube_dl/extractor/comcarcoff.py index 9c25b2223..81f3d7697 100644 --- a/youtube_dl/extractor/comcarcoff.py +++ b/youtube_dl/extractor/comcarcoff.py @@ -36,7 +36,7 @@ class ComCarCoffIE(InfoExtractor): webpage, 'full data json')) video_id = full_data['activeVideo']['video'] - video_data = full_data['videos'][video_id] + video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id] thumbnails = [{ 'url': video_data['images']['thumb'], }, { diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py index 91ebb0ce5..3e4bd10b6 100644 --- a/youtube_dl/extractor/comedycentral.py +++ b/youtube_dl/extractor/comedycentral.py @@ -151,12 +151,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor): mobj = re.match(self._VALID_URL, url) if mobj.group('shortname'): - if mobj.group('shortname') in ('tds', 'thedailyshow'): - url = 'http://thedailyshow.cc.com/full-episodes/' - else: - url = 'http://thecolbertreport.cc.com/full-episodes/' - mobj = re.match(self._VALID_URL, url, re.VERBOSE) - assert mobj is not None + return self.url_result('http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes') if mobj.group('clip'): if mobj.group('videotitle'): diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index b9014fc23..828f58f12 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -10,16 +10,18 @@ import re import socket import sys import time -import xml.etree.ElementTree from ..compat import ( compat_cookiejar, - compat_HTTPError, + compat_cookies, + compat_getpass, compat_http_client, compat_urllib_error, + compat_urllib_parse, compat_urllib_parse_urlparse, compat_urlparse, compat_str, + compat_etree_fromstring, ) from ..utils import ( NO_DEFAULT, @@ -28,13 +30,19 @@ from ..utils import ( clean_html, compiled_regex_type, determine_ext, + error_to_compat_str, ExtractorError, fix_xml_ampersands, float_or_none, int_or_none, RegexNotFoundError, sanitize_filename, + sanitized_Request, unescapeHTML, + unified_strdate, + url_basename, + xpath_text, + xpath_with_ns, ) @@ -65,7 +73,7 @@ class InfoExtractor(object): Potential fields: * url Mandatory. The URL of the video file - * ext Will be calculated from url if missing + * ext Will be calculated from URL if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. @@ -145,6 +153,7 @@ class InfoExtractor(object): description: Full video description. uploader: Full name of the video uploader. creator: The main artist who created the video. + release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. @@ -155,13 +164,15 @@ class InfoExtractor(object): lower to higher preference, each element is a dictionary with the "ext" entry and one of: * "data": The subtitles file contents - * "url": A url pointing to the subtitles file + * "url": A URL pointing to the subtitles file + "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles', used by the YoutubeIE for automatically generated captions - duration: Length of the video in seconds, as an integer. + duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video + repost_count: Number of reposts of the video average_rating: Average rating give by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following @@ -176,13 +187,18 @@ class InfoExtractor(object): Set to "root" to indicate that this is a comment to the original video. age_limit: Age restriction for the video, as an integer (years) - webpage_url: The url to the video webpage, if given to youtube-dl it + webpage_url: The URL to the video webpage, if given to youtube-dl it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] + tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. + start_time: Time in seconds where the reproduction should start, as + specified in the URL. + end_time: Time in seconds where the reproduction should end, as + specified in the URL. Unless mentioned otherwise, the fields should be Unicode strings. @@ -193,8 +209,8 @@ class InfoExtractor(object): There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. - Additionally, playlists can have "title" and "id" attributes with the same - semantics as videos (see above). + Additionally, playlists can have "title", "description" and "id" attributes + with the same semantics as videos (see above). _type "multi_video" indicates that there are multiple videos that @@ -295,11 +311,11 @@ class InfoExtractor(object): @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" - return cls.__name__[:-2] + return compat_str(cls.__name__[:-2]) @property def IE_NAME(self): - return type(self).__name__[:-2] + return compat_str(type(self).__name__[:-2]) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns the response handle """ @@ -317,7 +333,8 @@ class InfoExtractor(object): return False if errnote is None: errnote = 'Unable to download webpage' - errmsg = '%s: %s' % (errnote, compat_str(err)) + + errmsg = '%s: %s' % (errnote, error_to_compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: @@ -446,7 +463,7 @@ class InfoExtractor(object): return xml_string if transform_source: xml_string = transform_source(xml_string) - return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) + return compat_etree_fromstring(xml_string.encode('utf-8')) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', @@ -498,10 +515,22 @@ class InfoExtractor(object): """Report attempt to log in.""" self.to_screen('Logging in') + @staticmethod + def raise_login_required(msg='This video is only available for registered users'): + raise ExtractorError( + '%s. Use --username and --password or --netrc to provide account credentials.' % msg, + expected=True) + + @staticmethod + def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'): + raise ExtractorError( + '%s. You might want to use --proxy to workaround.' % msg, + expected=True) + # Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None, video_title=None): - """Returns a url that points to a page that should be processed""" + """Returns a URL that points to a page that should be processed""" # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, @@ -595,11 +624,11 @@ class InfoExtractor(object): else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError) as err: - self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) + self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err)) return (username, password) - def _get_tfa_info(self): + def _get_tfa_info(self, note='two-factor verification code'): """ Get the two-factor authentication info TODO - asking the user will be required for sms/phone verify @@ -613,19 +642,26 @@ class InfoExtractor(object): if downloader_params.get('twofactor', None) is not None: return downloader_params['twofactor'] - return None + return compat_getpass('Type %s and press [Return]: ' % note) # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): - content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')' - property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop) + content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))' + property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)' + % {'prop': re.escape(prop)}) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), template % (content_re, property_re), ] + @staticmethod + def _meta_regex(prop): + return r'''(?isx)<meta + (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1) + [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop) + def _og_search_property(self, prop, html, name=None, **kargs): if name is None: name = 'OpenGraph %s' % prop @@ -635,7 +671,7 @@ class InfoExtractor(object): return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): - return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs) + return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs) def _og_search_description(self, html, **kargs): return self._og_search_property('description', html, fatal=False, **kargs) @@ -656,9 +692,7 @@ class InfoExtractor(object): if display_name is None: display_name = name return self._html_search_regex( - r'''(?isx)<meta - (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1) - [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name), + self._meta_regex(name), html, display_name, fatal=fatal, group='content', **kwargs) def _dc_search_uploader(self, html): @@ -709,20 +743,23 @@ class InfoExtractor(object): @staticmethod def _hidden_inputs(html): - return dict([ - (input.group('name'), input.group('value')) for input in re.finditer( - r'''(?x) - <input\s+ - type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+ - name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+ - (?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)? - value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value) - ''', html) - ]) + html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html) + hidden_inputs = {} + for input in re.findall(r'(?i)<input([^>]+)>', html): + if not re.search(r'type=(["\'])(?:hidden|submit)\1', input): + continue + name = re.search(r'name=(["\'])(?P<value>.+?)\1', input) + if not name: + continue + value = re.search(r'value=(["\'])(?P<value>.*?)\1', input) + if not value: + continue + hidden_inputs[name.group('value')] = value.group('value') + return hidden_inputs def _form_hidden_inputs(self, form_id, html): form = self._search_regex( - r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, + r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, html, '%s form' % form_id, group='form') return self._hidden_inputs(form) @@ -806,7 +843,7 @@ class InfoExtractor(object): self._request_webpage(url, video_id, 'Checking %s URL' % item) return True except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError): + if isinstance(e.cause, compat_urllib_error.URLError): self.to_screen( '%s: %s URL is invalid, skipping' % (video_id, item)) return False @@ -837,13 +874,18 @@ class InfoExtractor(object): time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, - transform_source=lambda s: fix_xml_ampersands(s).strip()): + transform_source=lambda s: fix_xml_ampersands(s).strip(), + fatal=True): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest', # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) - transform_source=transform_source) + transform_source=transform_source, + fatal=fatal) + + if manifest is False: + return manifest formats = [] manifest_version = '1.0' @@ -851,6 +893,11 @@ class InfoExtractor(object): if not media_nodes: manifest_version = '2.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') + base_url = xpath_text( + manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'], + 'base URL', default=None) + if base_url: + base_url = base_url.strip() for i, media_el in enumerate(media_nodes): if manifest_version == '2.0': media_url = media_el.attrib.get('href') or media_el.attrib.get('url') @@ -858,13 +905,16 @@ class InfoExtractor(object): continue manifest_url = ( media_url if media_url.startswith('http://') or media_url.startswith('https://') - else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url)) + else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url)) # If media_url is itself a f4m manifest do the recursive extraction # since bitrates in parent manifest (this one) and media_url manifest # may differ leading to inability to resolve the format by requested # bitrate in f4m downloader if determine_ext(manifest_url) == 'f4m': - formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id)) + f4m_formats = self._extract_f4m_formats( + manifest_url, video_id, preference, f4m_id, fatal=fatal) + if f4m_formats: + formats.extend(f4m_formats) continue tbr = int_or_none(media_el.attrib.get('bitrate')) formats.append({ @@ -900,13 +950,15 @@ class InfoExtractor(object): if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) - m3u8_doc = self._download_webpage( + res = self._download_webpage_handle( m3u8_url, video_id, note=note or 'Downloading m3u8 information', errnote=errnote or 'Failed to download m3u8 information', fatal=fatal) - if m3u8_doc is False: - return m3u8_doc + if res is False: + return res + m3u8_doc, urlh = res + m3u8_url = urlh.geturl() last_info = None last_media = None kv_rex = re.compile( @@ -967,69 +1019,237 @@ class InfoExtractor(object): self._sort_formats(formats) return formats - # TODO: improve extraction - def _extract_smil_formats(self, smil_url, video_id, fatal=True): - smil = self._download_xml( - smil_url, video_id, 'Downloading SMIL file', - 'Unable to download SMIL file', fatal=fatal) + @staticmethod + def _xpath_ns(path, namespace=None): + if not namespace: + return path + out = [] + for c in path.split('/'): + if not c or c == '.': + out.append(c) + else: + out.append('{%s}%s' % (namespace, c)) + return '/'.join(out) + + def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None): + smil = self._download_smil(smil_url, video_id, fatal=fatal) + if smil is False: assert not fatal return [] - base = smil.find('./head/meta').get('base') + namespace = self._parse_smil_namespace(smil) + + return self._parse_smil_formats( + smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) + + def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None): + smil = self._download_smil(smil_url, video_id, fatal=fatal) + if smil is False: + return {} + return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params) + + def _download_smil(self, smil_url, video_id, fatal=True): + return self._download_xml( + smil_url, video_id, 'Downloading SMIL file', + 'Unable to download SMIL file', fatal=fatal) + + def _parse_smil(self, smil, smil_url, video_id, f4m_params=None): + namespace = self._parse_smil_namespace(smil) + + formats = self._parse_smil_formats( + smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) + subtitles = self._parse_smil_subtitles(smil, namespace=namespace) + + video_id = os.path.splitext(url_basename(smil_url))[0] + title = None + description = None + upload_date = None + for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): + name = meta.attrib.get('name') + content = meta.attrib.get('content') + if not name or not content: + continue + if not title and name == 'title': + title = content + elif not description and name in ('description', 'abstract'): + description = content + elif not upload_date and name == 'date': + upload_date = unified_strdate(content) + + thumbnails = [{ + 'id': image.get('type'), + 'url': image.get('src'), + 'width': int_or_none(image.get('width')), + 'height': int_or_none(image.get('height')), + } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')] + + return { + 'id': video_id, + 'title': title or video_id, + 'description': description, + 'upload_date': upload_date, + 'thumbnails': thumbnails, + 'formats': formats, + 'subtitles': subtitles, + } + + def _parse_smil_namespace(self, smil): + return self._search_regex( + r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None) + + def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): + base = smil_url + for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): + b = meta.get('base') or meta.get('httpBase') + if b: + base = b + break formats = [] rtmp_count = 0 - if smil.findall('./body/seq/video'): - video = smil.findall('./body/seq/video')[0] - fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count) - formats.extend(fmts) - else: - for video in smil.findall('./body/switch/video'): - fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count) - formats.extend(fmts) + http_count = 0 + + videos = smil.findall(self._xpath_ns('.//video', namespace)) + for video in videos: + src = video.get('src') + if not src: + continue + + bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) + filesize = int_or_none(video.get('size') or video.get('fileSize')) + width = int_or_none(video.get('width')) + height = int_or_none(video.get('height')) + proto = video.get('proto') + ext = video.get('ext') + src_ext = determine_ext(src) + streamer = video.get('streamer') or base + + if proto == 'rtmp' or streamer.startswith('rtmp'): + rtmp_count += 1 + formats.append({ + 'url': streamer, + 'play_path': src, + 'ext': 'flv', + 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), + 'tbr': bitrate, + 'filesize': filesize, + 'width': width, + 'height': height, + }) + if transform_rtmp_url: + streamer, src = transform_rtmp_url(streamer, src) + formats[-1].update({ + 'url': streamer, + 'play_path': src, + }) + continue + + src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src) + + if proto == 'm3u8' or src_ext == 'm3u8': + m3u8_formats = self._extract_m3u8_formats( + src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + continue + + if src_ext == 'f4m': + f4m_url = src_url + if not f4m_params: + f4m_params = { + 'hdcore': '3.2.0', + 'plugin': 'flowplayer-3.2.0.1', + } + f4m_url += '&' if '?' in f4m_url else '?' + f4m_url += compat_urllib_parse.urlencode(f4m_params) + f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + continue + + if src_url.startswith('http') and self._is_valid_url(src, video_id): + http_count += 1 + formats.append({ + 'url': src_url, + 'ext': ext or src_ext or 'flv', + 'format_id': 'http-%d' % (bitrate or http_count), + 'tbr': bitrate, + 'filesize': filesize, + 'width': width, + 'height': height, + }) + continue self._sort_formats(formats) return formats - def _parse_smil_video(self, video, video_id, base, rtmp_count): - src = video.get('src') - if not src: - return [], rtmp_count - bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) - width = int_or_none(video.get('width')) - height = int_or_none(video.get('height')) - proto = video.get('proto') - if not proto: - if base: - if base.startswith('rtmp'): - proto = 'rtmp' - elif base.startswith('http'): - proto = 'http' - ext = video.get('ext') - if proto == 'm3u8': - return self._extract_m3u8_formats(src, video_id, ext), rtmp_count - elif proto == 'rtmp': - rtmp_count += 1 - streamer = video.get('streamer') or base - return ([{ - 'url': streamer, - 'play_path': src, - 'ext': 'flv', - 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), - 'tbr': bitrate, - 'width': width, - 'height': height, - }], rtmp_count) - elif proto.startswith('http'): - return ([{ - 'url': base + src, - 'ext': ext or 'flv', - 'tbr': bitrate, - 'width': width, - 'height': height, - }], rtmp_count) + def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): + subtitles = {} + for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))): + src = textstream.get('src') + if not src: + continue + ext = textstream.get('ext') or determine_ext(src) + if not ext: + type_ = textstream.get('type') + SUBTITLES_TYPES = { + 'text/vtt': 'vtt', + 'text/srt': 'srt', + 'application/smptett+xml': 'tt', + } + if type_ in SUBTITLES_TYPES: + ext = SUBTITLES_TYPES[type_] + lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang + subtitles.setdefault(lang, []).append({ + 'url': src, + 'ext': ext, + }) + return subtitles + + def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True): + xspf = self._download_xml( + playlist_url, playlist_id, 'Downloading xpsf playlist', + 'Unable to download xspf manifest', fatal=fatal) + if xspf is False: + return [] + return self._parse_xspf(xspf, playlist_id) + + def _parse_xspf(self, playlist, playlist_id): + NS_MAP = { + 'xspf': 'http://xspf.org/ns/0/', + 's1': 'http://static.streamone.nl/player/ns/0', + } + + entries = [] + for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)): + title = xpath_text( + track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id) + description = xpath_text( + track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description') + thumbnail = xpath_text( + track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail') + duration = float_or_none( + xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000) + + formats = [{ + 'url': location.text, + 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)), + 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))), + 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))), + } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))] + self._sort_formats(formats) + + entries.append({ + 'id': playlist_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, + }) + return entries def _live_title(self, name): """ Generate the title for a live video """ @@ -1065,6 +1285,12 @@ class InfoExtractor(object): None, '/', True, False, expire_time, '', None, None, None) self._downloader.cookiejar.set_cookie(cookie) + def _get_cookies(self, url): + """ Return a compat_cookies.SimpleCookie with the cookies for the url """ + req = sanitized_Request(url) + self._downloader.cookiejar.add_cookie_header(req) + return compat_cookies.SimpleCookie(req.get_header('Cookie')) + def get_testcases(self, include_onlymatching=False): t = getattr(self, '_TEST', None) if t: @@ -1103,6 +1329,23 @@ class InfoExtractor(object): def _get_subtitles(self, *args, **kwargs): raise NotImplementedError("This method must be implemented by subclasses") + @staticmethod + def _merge_subtitle_items(subtitle_list1, subtitle_list2): + """ Merge subtitle items for one language. Items with duplicated URLs + will be dropped. """ + list1_urls = set([item['url'] for item in subtitle_list1]) + ret = list(subtitle_list1) + ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls]) + return ret + + @classmethod + def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): + """ Merge two subtitle dictionaries, language by language. """ + ret = dict(subtitle_dict1) + for lang in subtitle_dict2: + ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) + return ret + def extract_automatic_captions(self, *args, **kwargs): if (self._downloader.params.get('writeautomaticsub', False) or self._downloader.params.get('listsubtitles')): @@ -1116,7 +1359,7 @@ class InfoExtractor(object): class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. - They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query} + They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query} Instances should define _SEARCH_KEY and _MAX_RESULTS. """ diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py index 3db4db4e4..6f92ae2ed 100644 --- a/youtube_dl/extractor/condenast.py +++ b/youtube_dl/extractor/condenast.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import re -import json from .common import InfoExtractor from ..compat import ( @@ -12,6 +11,7 @@ from ..compat import ( ) from ..utils import ( orderedSet, + remove_end, ) @@ -24,21 +24,33 @@ class CondeNastIE(InfoExtractor): # The keys are the supported sites and the values are the name to be shown # to the user and in the extractor description. _SITES = { - 'wired': 'WIRED', + 'allure': 'Allure', + 'architecturaldigest': 'Architectural Digest', + 'arstechnica': 'Ars Technica', + 'bonappetit': 'Bon Appétit', + 'brides': 'Brides', + 'cnevids': 'Condé Nast', + 'cntraveler': 'Condé Nast Traveler', + 'details': 'Details', + 'epicurious': 'Epicurious', + 'glamour': 'Glamour', + 'golfdigest': 'Golf Digest', 'gq': 'GQ', + 'newyorker': 'The New Yorker', + 'self': 'SELF', + 'teenvogue': 'Teen Vogue', + 'vanityfair': 'Vanity Fair', 'vogue': 'Vogue', - 'glamour': 'Glamour', + 'wired': 'WIRED', 'wmagazine': 'W Magazine', - 'vanityfair': 'Vanity Fair', - 'cnevids': 'Condé Nast', } - _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys()) + _VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys()) IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) - EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys()) + EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys()) - _TEST = { + _TESTS = [{ 'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', 'md5': '1921f713ed48aabd715691f774c451f7', 'info_dict': { @@ -47,7 +59,16 @@ class CondeNastIE(InfoExtractor): 'title': '3D Printed Speakers Lit With LED', 'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', } - } + }, { + # JS embed + 'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js', + 'md5': 'f1a6f9cafb7083bab74a710f65d08999', + 'info_dict': { + 'id': '55f9cf8b61646d1acf00000c', + 'ext': 'mp4', + 'title': '3D printed TSA Travel Sentry keys really do open TSA locks', + } + }] def _extract_series(self, url, webpage): title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>', @@ -86,8 +107,8 @@ class CondeNastIE(InfoExtractor): info_url = base_info_url + data info_page = self._download_webpage(info_url, video_id, 'Downloading video info') - video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info') - video_info = json.loads(video_info) + video_info = self._search_regex(r'var\s+video\s*=\s*({.+?});', info_page, 'video info') + video_info = self._parse_json(video_info, video_id) formats = [{ 'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']), @@ -111,6 +132,13 @@ class CondeNastIE(InfoExtractor): url_type = mobj.group('type') item_id = mobj.group('id') + # Convert JS embed to regular embed + if url_type == 'embedjs': + parsed_url = compat_urlparse.urlparse(url) + url = compat_urlparse.urlunparse(parsed_url._replace( + path=remove_end(parsed_url.path, '.js').replace('/embedjs/', '/embed/'))) + url_type = 'embed' + self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site]) webpage = self._download_webpage(url, item_id) diff --git a/youtube_dl/extractor/criterion.py b/youtube_dl/extractor/criterion.py index 4fb178165..dedb810a0 100644 --- a/youtube_dl/extractor/criterion.py +++ b/youtube_dl/extractor/criterion.py @@ -27,9 +27,7 @@ class CriterionIE(InfoExtractor): final_url = self._search_regex( r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url') title = self._og_search_title(webpage) - description = self._html_search_regex( - r'<meta name="description" content="(.+?)" />', - webpage, 'video description') + description = self._html_search_meta('description', webpage) thumbnail = self._search_regex( r'so.addVariable\("thumbnailURL", "(.+?)"\)\;', webpage, 'thumbnail url') diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index d1b6d7366..00d943f77 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -5,31 +5,85 @@ import re import json import base64 import zlib -import xml.etree.ElementTree from hashlib import sha1 from math import pow, sqrt, floor from .common import InfoExtractor from ..compat import ( + compat_etree_fromstring, compat_urllib_parse, compat_urllib_parse_unquote, compat_urllib_request, + compat_urlparse, ) from ..utils import ( ExtractorError, bytes_to_intlist, intlist_to_bytes, + int_or_none, + lowercase_escape, + remove_end, + sanitized_Request, unified_strdate, urlencode_postdata, + xpath_text, ) from ..aes import ( aes_cbc_decrypt, ) -class CrunchyrollIE(InfoExtractor): - _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' +class CrunchyrollBaseIE(InfoExtractor): _NETRC_MACHINE = 'crunchyroll' + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + self.report_login() + login_url = 'https://www.crunchyroll.com/?a=formhandler' + data = urlencode_postdata({ + 'formname': 'RpcApiUser_Login', + 'name': username, + 'password': password, + }) + login_request = sanitized_Request(login_url, data) + login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') + self._download_webpage(login_request, None, False, 'Wrong login info') + + def _real_initialize(self): + self._login() + + def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None): + request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) + else sanitized_Request(url_or_request)) + # Accept-Language must be set explicitly to accept any language to avoid issues + # similar to https://github.com/rg3/youtube-dl/issues/6797. + # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction + # should be imposed or not (from what I can see it just takes the first language + # ignoring the priority and requires it to correspond the IP). By the way this causes + # Crunchyroll to not work in georestriction cases in some browsers that don't place + # the locale lang first in header. However allowing any language seems to workaround the issue. + request.add_header('Accept-Language', '*') + return super(CrunchyrollBaseIE, self)._download_webpage( + request, video_id, note, errnote, fatal, tries, timeout, encoding) + + @staticmethod + def _add_skip_wall(url): + parsed_url = compat_urlparse.urlparse(url) + qs = compat_urlparse.parse_qs(parsed_url.query) + # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message: + # > This content may be inappropriate for some people. + # > Are you sure you want to continue? + # since it's not disabled by default in crunchyroll account's settings. + # See https://github.com/rg3/youtube-dl/issues/7202. + qs['skip_wall'] = ['1'] + return compat_urlparse.urlunparse( + parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) + + +class CrunchyrollIE(CrunchyrollBaseIE): + _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' _TESTS = [{ 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', 'info_dict': { @@ -52,7 +106,7 @@ class CrunchyrollIE(InfoExtractor): 'id': '589804', 'ext': 'flv', 'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', - 'description': 'md5:fe2743efedb49d279552926d0bd0cd9e', + 'description': 'md5:2fbc01f90b87e8e9137296f37b461c12', 'thumbnail': 're:^https?://.*\.jpg$', 'uploader': 'Danny Choo Network', 'upload_date': '20120213', @@ -61,10 +115,13 @@ class CrunchyrollIE(InfoExtractor): # rtmp 'skip_download': True, }, - }, { 'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', 'only_matching': True, + }, { + # geo-restricted (US), 18+ maturity wall, non-premium available + 'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', + 'only_matching': True, }] _FORMAT_IDS = { @@ -74,24 +131,6 @@ class CrunchyrollIE(InfoExtractor): '1080': ('80', '108'), } - def _login(self): - (username, password) = self._get_login_info() - if username is None: - return - self.report_login() - login_url = 'https://www.crunchyroll.com/?a=formhandler' - data = urlencode_postdata({ - 'formname': 'RpcApiUser_Login', - 'name': username, - 'password': password, - }) - login_request = compat_urllib_request.Request(login_url, data) - login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') - self._download_webpage(login_request, None, False, 'Wrong login info') - - def _real_initialize(self): - self._login() - def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) @@ -197,7 +236,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return output def _extract_subtitles(self, subtitle): - sub_root = xml.etree.ElementTree.fromstring(subtitle) + sub_root = compat_etree_fromstring(subtitle) return [{ 'ext': 'srt', 'data': self._convert_subtitles_to_srt(sub_root), @@ -208,7 +247,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text def _get_subtitles(self, video_id, webpage): subtitles = {} - for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): + for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage): sub_page = self._download_webpage( 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, video_id, note='Downloading subtitles for ' + sub_name) @@ -234,8 +273,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text else: webpage_url = 'http://www.' + mobj.group('url') - webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage') - note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='') + webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage') + note_m = self._html_search_regex( + r'<div class="showmedia-trailer-notice">(.+?)</div>', + webpage, 'trailer-notice', default='') if note_m: raise ExtractorError(note_m) @@ -245,18 +286,29 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text if msg.get('type') == 'error': raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True) - video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL) + if 'To view this, please log in to verify you are 18 or older.' in webpage: + self.raise_login_required() + + video_title = self._html_search_regex( + r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>', + webpage, 'video_title') video_title = re.sub(r' {2,}', ' ', video_title) - video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='') - if not video_description: - video_description = None - video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) + video_description = self._html_search_regex( + r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id, + webpage, 'description', default=None) + if video_description: + video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) + video_upload_date = self._html_search_regex( + [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'], + webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) if video_upload_date: video_upload_date = unified_strdate(video_upload_date) - video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL) + video_uploader = self._html_search_regex( + r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage, + 'video_uploader', fatal=False) playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) - playerdata_req = compat_urllib_request.Request(playerdata_url) + playerdata_req = sanitized_Request(playerdata_url) playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') @@ -268,7 +320,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] video_format = fmt + 'p' - streamdata_req = compat_urllib_request.Request( + streamdata_req = sanitized_Request( 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' % (stream_id, stream_format, stream_quality), compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) @@ -279,13 +331,33 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text stream_info = streamdata.find('./{default}preload/stream_info') video_url = stream_info.find('./host').text video_play_path = stream_info.find('./file').text - formats.append({ + metadata = stream_info.find('./metadata') + format_info = { + 'format': video_format, + 'format_id': video_format, + 'height': int_or_none(xpath_text(metadata, './height')), + 'width': int_or_none(xpath_text(metadata, './width')), + } + + if '.fplive.net/' in video_url: + video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip()) + parsed_video_url = compat_urlparse.urlparse(video_url) + direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace( + netloc='v.lvlt.crcdn.net', + path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_play_path.split(':')[-1]))) + if self._is_valid_url(direct_video_url, video_id, video_format): + format_info.update({ + 'url': direct_video_url, + }) + formats.append(format_info) + continue + + format_info.update({ 'url': video_url, 'play_path': video_play_path, 'ext': 'flv', - 'format': video_format, - 'format_id': video_format, }) + formats.append(format_info) subtitles = self.extract_subtitles(video_id, webpage) @@ -301,9 +373,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text } -class CrunchyrollShowPlaylistIE(InfoExtractor): +class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): IE_NAME = "crunchyroll:playlist" - _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$' + _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)' _TESTS = [{ 'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', @@ -312,12 +384,25 @@ class CrunchyrollShowPlaylistIE(InfoExtractor): 'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' }, 'playlist_count': 13, + }, { + # geo-restricted (US), 18+ maturity wall, non-premium available + 'url': 'http://www.crunchyroll.com/cosplay-complex-ova', + 'info_dict': { + 'id': 'cosplay-complex-ova', + 'title': 'Cosplay Complex OVA' + }, + 'playlist_count': 3, + 'skip': 'Georestricted', + }, { + # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14 + 'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1', + 'only_matching': True, }] def _real_extract(self, url): show_id = self._match_id(url) - webpage = self._download_webpage(url, show_id) + webpage = self._download_webpage(self._add_skip_wall(url), show_id) title = self._html_search_regex( r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>', webpage, 'title') diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py index fbefd37d0..7b685d157 100644 --- a/youtube_dl/extractor/cspan.py +++ b/youtube_dl/extractor/cspan.py @@ -9,6 +9,7 @@ from ..utils import ( find_xpath_attr, smuggle_url, determine_ext, + ExtractorError, ) from .senateisvp import SenateISVPIE @@ -18,33 +19,32 @@ class CSpanIE(InfoExtractor): IE_DESC = 'C-SPAN' _TESTS = [{ 'url': 'http://www.c-span.org/video/?313572-1/HolderonV', - 'md5': '8e44ce11f0f725527daccc453f553eb0', + 'md5': '94b29a4f131ff03d23471dd6f60b6a1d', 'info_dict': { 'id': '315139', 'ext': 'mp4', 'title': 'Attorney General Eric Holder on Voting Rights Act Decision', - 'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.', + 'description': 'Attorney General Eric Holder speaks to reporters following the Supreme Court decision in [Shelby County v. Holder], in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced.', }, 'skip': 'Regularly fails on travis, for unknown reasons', }, { 'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models', - # For whatever reason, the served video alternates between - # two different ones + 'md5': '8e5fbfabe6ad0f89f3012a7943c1287b', 'info_dict': { - 'id': '340723', + 'id': 'c4486943', 'ext': 'mp4', - 'title': 'International Health Care Models', + 'title': 'CSPAN - International Health Care Models', 'description': 'md5:7a985a2d595dba00af3d9c9f0783c967', } }, { 'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall', - 'md5': '446562a736c6bf97118e389433ed88d4', + 'md5': '2ae5051559169baadba13fc35345ae74', 'info_dict': { 'id': '342759', 'ext': 'mp4', 'title': 'General Motors Ignition Switch Recall', 'duration': 14848, - 'description': 'md5:70c7c3b8fa63fa60d42772440596034c' + 'description': 'md5:118081aedd24bf1d3b68b3803344e7f3' }, }, { # Video from senate.gov @@ -57,67 +57,77 @@ class CSpanIE(InfoExtractor): }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - page_id = mobj.group('id') - webpage = self._download_webpage(url, page_id) - video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id') + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + matches = re.search(r'data-(prog|clip)id=\'([0-9]+)\'', webpage) + if matches: + video_type, video_id = matches.groups() + if video_type == 'prog': + video_type = 'program' + else: + senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) + if senate_isvp_url: + title = self._og_search_title(webpage) + surl = smuggle_url(senate_isvp_url, {'force_title': title}) + return self.url_result(surl, 'SenateISVP', video_id, title) - description = self._html_search_regex( - [ - # The full description - r'<div class=\'expandable\'>(.*?)<a href=\'#\'', - # If the description is small enough the other div is not - # present, otherwise this is a stripped version - r'<p class=\'initial\'>(.*?)</p>' - ], - webpage, 'description', flags=re.DOTALL, default=None) + def get_text_attr(d, attr): + return d.get(attr, {}).get('#text') - info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id - data = self._download_json(info_url, video_id) + data = self._download_json( + 'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id), + video_id)['video'] + if data['@status'] != 'Success': + raise ExtractorError('%s said: %s' % (self.IE_NAME, get_text_attr(data, 'error')), expected=True) doc = self._download_xml( - 'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id, + 'http://www.c-span.org/common/services/flashXml.php?%sid=%s' % (video_type, video_id), video_id) + description = self._html_search_meta('description', webpage) + title = find_xpath_attr(doc, './/string', 'name', 'title').text thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text - senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) - if senate_isvp_url: - surl = smuggle_url(senate_isvp_url, {'force_title': title}) - return self.url_result(surl, 'SenateISVP', video_id, title) - - files = data['video']['files'] - try: - capfile = data['video']['capfile']['#text'] - except KeyError: - capfile = None + files = data['files'] + capfile = get_text_attr(data, 'capfile') - entries = [{ - 'id': '%s_%d' % (video_id, partnum + 1), - 'title': ( - title if len(files) == 1 else - '%s part %d' % (title, partnum + 1)), - 'url': unescapeHTML(f['path']['#text']), - 'description': description, - 'thumbnail': thumbnail, - 'duration': int_or_none(f.get('length', {}).get('#text')), - 'subtitles': { - 'en': [{ - 'url': capfile, - 'ext': determine_ext(capfile, 'dfxp') - }], - } if capfile else None, - } for partnum, f in enumerate(files)] + entries = [] + for partnum, f in enumerate(files): + formats = [] + for quality in f['qualities']: + formats.append({ + 'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')), + 'url': unescapeHTML(get_text_attr(quality, 'file')), + 'height': int_or_none(get_text_attr(quality, 'height')), + 'tbr': int_or_none(get_text_attr(quality, 'bitrate')), + }) + self._sort_formats(formats) + entries.append({ + 'id': '%s_%d' % (video_id, partnum + 1), + 'title': ( + title if len(files) == 1 else + '%s part %d' % (title, partnum + 1)), + 'formats': formats, + 'description': description, + 'thumbnail': thumbnail, + 'duration': int_or_none(get_text_attr(f, 'length')), + 'subtitles': { + 'en': [{ + 'url': capfile, + 'ext': determine_ext(capfile, 'dfxp') + }], + } if capfile else None, + }) if len(entries) == 1: entry = dict(entries[0]) - entry['id'] = video_id + entry['id'] = 'c' + video_id if video_type == 'clip' else video_id return entry else: return { '_type': 'playlist', 'entries': entries, 'title': title, - 'id': video_id, + 'id': 'c' + video_id if video_type == 'clip' else video_id, } diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 1a41c0db1..0c5b6617f 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -7,14 +7,13 @@ import itertools from .common import InfoExtractor -from ..compat import ( - compat_str, - compat_urllib_request, -) from ..utils import ( + determine_ext, + error_to_compat_str, ExtractorError, int_or_none, - orderedSet, + parse_iso8601, + sanitized_Request, str_to_int, unescapeHTML, ) @@ -24,14 +23,20 @@ class DailymotionBaseInfoExtractor(InfoExtractor): @staticmethod def _build_request(url): """Build a request with the family filter disabled""" - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Cookie', 'family_filter=off; ff=off') return request + def _download_webpage_handle_no_ff(self, url, *args, **kwargs): + request = self._build_request(url) + return self._download_webpage_handle(request, *args, **kwargs) + + def _download_webpage_no_ff(self, url, *args, **kwargs): + request = self._build_request(url) + return self._download_webpage(request, *args, **kwargs) -class DailymotionIE(DailymotionBaseInfoExtractor): - """Information Extractor for Dailymotion""" +class DailymotionIE(DailymotionBaseInfoExtractor): _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)' IE_NAME = 'dailymotion' @@ -50,10 +55,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'info_dict': { 'id': 'x2iuewm', 'ext': 'mp4', - 'uploader': 'IGN', 'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News', - 'upload_date': '20150306', + 'description': 'Several come bundled with the Steam Controller.', + 'thumbnail': 're:^https?:.*\.(?:jpg|png)$', 'duration': 74, + 'timestamp': 1425657362, + 'upload_date': '20150306', + 'uploader': 'IGN', + 'uploader_id': 'xijv66', + 'age_limit': 0, + 'view_count': int, + 'comment_count': int, } }, # Vevo video @@ -82,46 +94,137 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'uploader': 'HotWaves1012', 'age_limit': 18, } + }, + # geo-restricted, player v5 + { + 'url': 'http://www.dailymotion.com/video/xhza0o', + 'only_matching': True, + }, + # with subtitles + { + 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news', + 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) - url = 'https://www.dailymotion.com/video/%s' % video_id - # Retrieve video webpage to extract further information - request = self._build_request(url) - webpage = self._download_webpage(request, video_id) + webpage = self._download_webpage_no_ff( + 'https://www.dailymotion.com/video/%s' % video_id, video_id) - # Extract URL, uploader and title from webpage - self.report_extraction(video_id) + age_limit = self._rta_search(webpage) + + description = self._og_search_description(webpage) or self._html_search_meta( + 'description', webpage, 'description') - # It may just embed a vevo video: - m_vevo = re.search( + view_count = str_to_int(self._search_regex( + [r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"', + r'video_views_count[^>]+>\s+([\d\.,]+)'], + webpage, 'view count', fatal=False)) + comment_count = int_or_none(self._search_regex( + r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"', + webpage, 'comment count', fatal=False)) + + player_v5 = self._search_regex( + [r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826 + r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);', + r'buildPlayer\(({.+?})\);'], + webpage, 'player v5', default=None) + if player_v5: + player = self._parse_json(player_v5, video_id) + metadata = player['metadata'] + + self._check_error(metadata) + + formats = [] + for quality, media_list in metadata['qualities'].items(): + for media in media_list: + media_url = media.get('url') + if not media_url: + continue + type_ = media.get('type') + if type_ == 'application/vnd.lumberjack.manifest': + continue + ext = determine_ext(media_url) + if type_ == 'application/x-mpegURL' or ext == 'm3u8': + m3u8_formats = self._extract_m3u8_formats( + media_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif type_ == 'application/f4m' or ext == 'f4m': + f4m_formats = self._extract_f4m_formats( + media_url, video_id, preference=-1, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + else: + f = { + 'url': media_url, + 'format_id': quality, + } + m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url) + if m: + f.update({ + 'width': int(m.group('width')), + 'height': int(m.group('height')), + }) + formats.append(f) + self._sort_formats(formats) + + title = metadata['title'] + duration = int_or_none(metadata.get('duration')) + timestamp = int_or_none(metadata.get('created_time')) + thumbnail = metadata.get('poster_url') + uploader = metadata.get('owner', {}).get('screenname') + uploader_id = metadata.get('owner', {}).get('id') + + subtitles = {} + subtitles_data = metadata.get('subtitles', {}).get('data', {}) + if subtitles_data and isinstance(subtitles_data, dict): + for subtitle_lang, subtitle in subtitles_data.items(): + subtitles[subtitle_lang] = [{ + 'ext': determine_ext(subtitle_url), + 'url': subtitle_url, + } for subtitle_url in subtitle.get('urls', [])] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'age_limit': age_limit, + 'view_count': view_count, + 'comment_count': comment_count, + 'formats': formats, + 'subtitles': subtitles, + } + + # vevo embed + vevo_id = self._search_regex( r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)', - webpage) - if m_vevo is not None: - vevo_id = m_vevo.group('id') - self.to_screen('Vevo video detected: %s' % vevo_id) - return self.url_result('vevo:%s' % vevo_id, ie='Vevo') + webpage, 'vevo embed', default=None) + if vevo_id: + return self.url_result('vevo:%s' % vevo_id, 'Vevo') - age_limit = self._rta_search(webpage) + # fallback old player + embed_page = self._download_webpage_no_ff( + 'https://www.dailymotion.com/embed/video/%s' % video_id, + video_id, 'Downloading embed page') - video_upload_date = None - mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage) - if mobj is not None: - video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) - - embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id - embed_request = self._build_request(embed_url) - embed_page = self._download_webpage( - embed_request, video_id, 'Downloading embed page') - info = self._search_regex(r'var info = ({.*?}),$', embed_page, - 'video info', flags=re.MULTILINE) - info = json.loads(info) - if info.get('error') is not None: - msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] - raise ExtractorError(msg, expected=True) + timestamp = parse_iso8601(self._html_search_meta( + 'video:release_date', webpage, 'upload date')) + + info = self._parse_json( + self._search_regex( + r'var info = ({.*?}),$', embed_page, + 'video info', flags=re.MULTILINE), + video_id) + + self._check_error(info) formats = [] for (key, format_id) in self._FORMATS: @@ -139,16 +242,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'width': width, 'height': height, }) - if not formats: - raise ExtractorError('Unable to extract video URL') + self._sort_formats(formats) # subtitles video_subtitles = self.extract_subtitles(video_id, webpage) - view_count = str_to_int(self._search_regex( - r'video_views_count[^>]+>\s+([\d\.,]+)', - webpage, 'view count', fatal=False)) - title = self._og_search_title(webpage, default=None) if title is None: title = self._html_search_regex( @@ -159,8 +257,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'id': video_id, 'formats': formats, 'uploader': info['owner.screenname'], - 'upload_date': video_upload_date, + 'timestamp': timestamp, 'title': title, + 'description': description, 'subtitles': video_subtitles, 'thumbnail': info['thumbnail_url'], 'age_limit': age_limit, @@ -168,13 +267,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'duration': info['duration'] } + def _check_error(self, info): + if info.get('error') is not None: + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True) + def _get_subtitles(self, video_id, webpage): try: sub_list = self._download_webpage( 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id, video_id, note=False) except ExtractorError as err: - self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) + self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} info = json.loads(sub_list) if (info['total'] > 0): @@ -199,18 +303,26 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): }] def _extract_entries(self, id): - video_ids = [] + video_ids = set() + processed_urls = set() for pagenum in itertools.count(1): - request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum)) - webpage = self._download_webpage(request, - id, 'Downloading page %s' % pagenum) + page_url = self._PAGE_TEMPLATE % (id, pagenum) + webpage, urlh = self._download_webpage_handle_no_ff( + page_url, id, 'Downloading page %s' % pagenum) + if urlh.geturl() in processed_urls: + self.report_warning('Stopped at duplicated page %s, which is the same as %s' % ( + page_url, urlh.geturl()), id) + break + + processed_urls.add(urlh.geturl()) - video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage)) + for video_id in re.findall(r'data-xid="(.+?)"', webpage): + if video_id not in video_ids: + yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') + video_ids.add(video_id) if re.search(self._MORE_PAGES_INDICATOR, webpage) is None: break - return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') - for video_id in orderedSet(video_ids)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -227,7 +339,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): class DailymotionUserIE(DailymotionPlaylistIE): IE_NAME = 'dailymotion:user' - _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$' + _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)' _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s' _TESTS = [{ 'url': 'https://www.dailymotion.com/user/nqtv', @@ -236,6 +348,17 @@ class DailymotionUserIE(DailymotionPlaylistIE): 'title': 'Rémi Gaillard', }, 'playlist_mincount': 100, + }, { + 'url': 'http://www.dailymotion.com/user/UnderProject', + 'info_dict': { + 'id': 'UnderProject', + 'title': 'UnderProject', + }, + 'playlist_mincount': 1800, + 'expected_warnings': [ + 'Stopped at duplicated page', + ], + 'skip': 'Takes too long time', }] def _real_extract(self, url): @@ -286,8 +409,7 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - request = self._build_request(url) - webpage = self._download_webpage(request, video_id) + webpage = self._download_webpage_no_ff(url, video_id) title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title') diff --git a/youtube_dl/extractor/dbtv.py b/youtube_dl/extractor/dbtv.py index 212217625..133cdc50b 100644 --- a/youtube_dl/extractor/dbtv.py +++ b/youtube_dl/extractor/dbtv.py @@ -13,8 +13,8 @@ from ..utils import ( class DBTVIE(InfoExtractor): - _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?dbtv\.no/(?:(?:lazyplayer|player)/)?(?P<id>[0-9]+)(?:#(?P<display_id>.+))?' + _TESTS = [{ 'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen', 'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc', 'info_dict': { @@ -30,12 +30,18 @@ class DBTVIE(InfoExtractor): 'view_count': int, 'categories': list, } - } + }, { + 'url': 'http://dbtv.no/3649835190001', + 'only_matching': True, + }, { + 'url': 'http://www.dbtv.no/lazyplayer/4631135248001', + 'only_matching': True, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - display_id = mobj.group('display_id') + display_id = mobj.group('display_id') or video_id data = self._download_json( 'http://api.dbtv.no/discovery/%s' % video_id, display_id) diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py new file mode 100644 index 000000000..9737cff14 --- /dev/null +++ b/youtube_dl/extractor/dcn.py @@ -0,0 +1,82 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urllib_parse +from ..utils import ( + int_or_none, + parse_iso8601, + sanitized_Request, +) + + +class DCNIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)' + _TEST = { + 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887', + 'info_dict': + { + 'id': '17375', + 'ext': 'mp4', + 'title': 'رحلة العمر : الحلقة 1', + 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 2041, + 'timestamp': 1227504126, + 'upload_date': '20081124', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + request = sanitized_Request( + 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id, + headers={'Origin': 'http://www.dcndigital.ae'}) + + video = self._download_json(request, video_id) + title = video.get('title_en') or video['title_ar'] + + webpage = self._download_webpage( + 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + + compat_urllib_parse.urlencode({ + 'id': video['id'], + 'user_id': video['user_id'], + 'signature': video['signature'], + 'countries': 'Q0M=', + 'filter': 'DENY', + }), video_id) + + m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url') + formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') + + rtsp_url = self._search_regex( + r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False) + if rtsp_url: + formats.append({ + 'url': rtsp_url, + 'format_id': 'rtsp', + }) + + self._sort_formats(formats) + + img = video.get('img') + thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None + duration = int_or_none(video.get('duration')) + description = video.get('description_en') or video.get('description_ar') + timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ') + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + } diff --git a/youtube_dl/extractor/democracynow.py b/youtube_dl/extractor/democracynow.py new file mode 100644 index 000000000..6cd395e11 --- /dev/null +++ b/youtube_dl/extractor/democracynow.py @@ -0,0 +1,88 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import os.path + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + url_basename, + remove_start, +) + + +class DemocracynowIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)' + IE_NAME = 'democracynow' + _TESTS = [{ + 'url': 'http://www.democracynow.org/shows/2015/7/3', + 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', + 'info_dict': { + 'id': '2015-0703-001', + 'ext': 'mp4', + 'title': 'July 03, 2015 - Democracy Now!', + 'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs', + }, + }, { + 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree', + 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', + 'info_dict': { + 'id': '2015-0703-001', + 'ext': 'mp4', + 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag', + 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21', + }, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + description = self._og_search_description(webpage) + + json_data = self._parse_json(self._search_regex( + r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'), + display_id) + video_id = None + formats = [] + + default_lang = 'en' + + subtitles = {} + + def add_subtitle_item(lang, info_dict): + if lang not in subtitles: + subtitles[lang] = [] + subtitles[lang].append(info_dict) + + # chapter_file are not subtitles + if 'caption_file' in json_data: + add_subtitle_item(default_lang, { + 'url': compat_urlparse.urljoin(url, json_data['caption_file']), + }) + + for subtitle_item in json_data.get('captions', []): + lang = subtitle_item.get('language', '').lower() or default_lang + add_subtitle_item(lang, { + 'url': compat_urlparse.urljoin(url, subtitle_item['url']), + }) + + for key in ('file', 'audio', 'video'): + media_url = json_data.get(key, '') + if not media_url: + continue + media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url)) + video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn') + formats.append({ + 'url': media_url, + }) + + self._sort_formats(formats) + + return { + 'id': video_id or display_id, + 'title': json_data['title'], + 'description': description, + 'subtitles': subtitles, + 'formats': formats, + } diff --git a/youtube_dl/extractor/dhm.py b/youtube_dl/extractor/dhm.py index 3ed1f1663..44e0c5d4d 100644 --- a/youtube_dl/extractor/dhm.py +++ b/youtube_dl/extractor/dhm.py @@ -1,10 +1,7 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import ( - xpath_text, - parse_duration, -) +from ..utils import parse_duration class DHMIE(InfoExtractor): @@ -34,24 +31,14 @@ class DHMIE(InfoExtractor): }] def _real_extract(self, url): - video_id = self._match_id(url) + playlist_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + webpage = self._download_webpage(url, playlist_id) playlist_url = self._search_regex( r"file\s*:\s*'([^']+)'", webpage, 'playlist url') - playlist = self._download_xml(playlist_url, video_id) - - track = playlist.find( - './{http://xspf.org/ns/0/}trackList/{http://xspf.org/ns/0/}track') - - video_url = xpath_text( - track, './{http://xspf.org/ns/0/}location', - 'video url', fatal=True) - thumbnail = xpath_text( - track, './{http://xspf.org/ns/0/}image', - 'thumbnail') + entries = self._extract_xspf_playlist(playlist_url, playlist_id) title = self._search_regex( [r'dc:title="([^"]+)"', r'<title> »([^<]+)</title>'], @@ -63,11 +50,10 @@ class DHMIE(InfoExtractor): r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)', webpage, 'duration', default=None)) - return { - 'id': video_id, - 'url': video_url, + entries[0].update({ 'title': title, 'description': description, 'duration': duration, - 'thumbnail': thumbnail, - } + }) + + return self.playlist_result(entries, playlist_id) diff --git a/youtube_dl/extractor/divxstage.py b/youtube_dl/extractor/divxstage.py deleted file mode 100644 index b88379e06..000000000 --- a/youtube_dl/extractor/divxstage.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class DivxStageIE(NovaMovIE): - IE_NAME = 'divxstage' - IE_DESC = 'DivxStage' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'} - - _HOST = 'www.divxstage.eu' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _TITLE_REGEX = r'<div class="video_det">\s*<strong>([^<]+)</strong>' - _DESCRIPTION_REGEX = r'<div class="video_det">\s*<strong>[^<]+</strong>\s*<p>([^<]+)</p>' - - _TEST = { - 'url': 'http://www.divxstage.eu/video/57f238e2e5e01', - 'md5': '63969f6eb26533a1968c4d325be63e72', - 'info_dict': { - 'id': '57f238e2e5e01', - 'ext': 'flv', - 'title': 'youtubedl test video', - 'description': 'This is a test video for youtubedl.', - } - } diff --git a/youtube_dl/extractor/dplay.py b/youtube_dl/extractor/dplay.py new file mode 100644 index 000000000..6cda56a7f --- /dev/null +++ b/youtube_dl/extractor/dplay.py @@ -0,0 +1,51 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import time + +from .common import InfoExtractor +from ..utils import int_or_none + + +class DPlayIE(InfoExtractor): + _VALID_URL = r'http://www\.dplay\.se/[^/]+/(?P<id>[^/?#]+)' + + _TEST = { + 'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/', + 'info_dict': { + 'id': '3172', + 'ext': 'mp4', + 'display_id': 'season-1-svensken-lar-sig-njuta-av-livet', + 'title': 'Svensken lär sig njuta av livet', + 'duration': 2650, + }, + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex( + r'data-video-id="(\d+)"', webpage, 'video id') + + info = self._download_json( + 'http://www.dplay.se/api/v2/ajax/videos?video_id=' + video_id, + video_id)['data'][0] + + self._set_cookie( + 'secure.dplay.se', 'dsc-geo', + '{"countryCode":"NL","expiry":%d}' % ((time.time() + 20 * 60) * 1000)) + # TODO: consider adding support for 'stream_type=hds', it seems to + # require setting some cookies + manifest_url = self._download_json( + 'https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls' % video_id, + video_id, 'Getting manifest url for hls stream')['hls'] + formats = self._extract_m3u8_formats( + manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native') + + return { + 'id': video_id, + 'display_id': display_id, + 'title': info['title'], + 'formats': formats, + 'duration': int_or_none(info.get('video_metadata_length'), scale=1000), + } diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py index 38e6597c8..d836c1a6c 100644 --- a/youtube_dl/extractor/dramafever.py +++ b/youtube_dl/extractor/dramafever.py @@ -7,7 +7,6 @@ from .common import InfoExtractor from ..compat import ( compat_HTTPError, compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( @@ -16,6 +15,7 @@ from ..utils import ( determine_ext, int_or_none, parse_iso8601, + sanitized_Request, ) @@ -51,7 +51,7 @@ class DramaFeverBaseIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) response = self._download_webpage( request, None, 'Logging in as %s' % username) diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py index 999fb5620..e5aadcd25 100644 --- a/youtube_dl/extractor/dumpert.py +++ b/youtube_dl/extractor/dumpert.py @@ -2,15 +2,18 @@ from __future__ import unicode_literals import base64 +import re from .common import InfoExtractor -from ..compat import compat_urllib_request -from ..utils import qualities +from ..utils import ( + qualities, + sanitized_Request, +) class DumpertIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/mediabase/(?P<id>[0-9]+/[0-9a-zA-Z]+)' - _TEST = { + _VALID_URL = r'(?P<protocol>https?)://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)' + _TESTS = [{ 'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/', 'md5': '1b9318d7d5054e7dcb9dc7654f21d643', 'info_dict': { @@ -20,12 +23,18 @@ class DumpertIE(InfoExtractor): 'description': 'Niet schrikken hoor', 'thumbnail': 're:^https?://.*\.jpg$', } - } + }, { + 'url': 'http://www.dumpert.nl/embed/6675421/dc440fe7/', + 'only_matching': True, + }] def _real_extract(self, url): - video_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + protocol = mobj.group('protocol') - req = compat_urllib_request.Request(url) + url = '%s://www.dumpert.nl/mediabase/%s' % (protocol, video_id) + req = sanitized_Request(url) req.add_header('Cookie', 'nsfw=1; cpc=10') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/eagleplatform.py b/youtube_dl/extractor/eagleplatform.py index 688dfc2f7..7bbf617d4 100644 --- a/youtube_dl/extractor/eagleplatform.py +++ b/youtube_dl/extractor/eagleplatform.py @@ -21,7 +21,7 @@ class EaglePlatformIE(InfoExtractor): _TESTS = [{ # http://lenta.ru/news/2015/03/06/navalny/ 'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201', - 'md5': '0b7994faa2bd5c0f69a3db6db28d078d', + 'md5': '70f5187fb620f2c1d503b3b22fd4efe3', 'info_dict': { 'id': '227304', 'ext': 'mp4', @@ -36,7 +36,7 @@ class EaglePlatformIE(InfoExtractor): # http://muz-tv.ru/play/7129/ # http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true 'url': 'eagleplatform:media.clipyou.ru:12820', - 'md5': '6c2ebeab03b739597ce8d86339d5a905', + 'md5': '90b26344ba442c8e44aa4cf8f301164a', 'info_dict': { 'id': '12820', 'ext': 'mp4', @@ -48,7 +48,8 @@ class EaglePlatformIE(InfoExtractor): 'skip': 'Georestricted', }] - def _handle_error(self, response): + @staticmethod + def _handle_error(response): status = int_or_none(response.get('status', 200)) if status != 200: raise ExtractorError(' '.join(response['errors']), expected=True) @@ -58,6 +59,9 @@ class EaglePlatformIE(InfoExtractor): self._handle_error(response) return response + def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'): + return self._download_json(url_or_request, video_id, note)['data'][0] + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id') @@ -69,7 +73,7 @@ class EaglePlatformIE(InfoExtractor): title = media['title'] description = media.get('description') - thumbnail = media.get('snapshot') + thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:') duration = int_or_none(media.get('duration')) view_count = int_or_none(media.get('views')) @@ -78,13 +82,20 @@ class EaglePlatformIE(InfoExtractor): if age_restriction: age_limit = 0 if age_restriction == 'allow_all' else 18 - m3u8_data = self._download_json( - media['sources']['secure_m3u8']['auto'], - video_id, 'Downloading m3u8 JSON') + secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:') + m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON') formats = self._extract_m3u8_formats( - m3u8_data['data'][0], video_id, - 'mp4', entry_protocol='m3u8_native') + m3u8_url, video_id, + 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') + + mp4_url = self._get_video_url( + # Secure mp4 URL is constructed according to Player.prototype.mp4 from + # http://lentaru.media.eagleplatform.com/player/player.js + re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4', secure_m3u8), + video_id, 'Downloading mp4 JSON') + formats.append({'url': mp4_url, 'format_id': 'mp4'}) + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/eitb.py b/youtube_dl/extractor/eitb.py index 2cba82532..c83845fc2 100644 --- a/youtube_dl/extractor/eitb.py +++ b/youtube_dl/extractor/eitb.py @@ -1,39 +1,92 @@ # encoding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from .brightcove import BrightcoveIE -from ..utils import ExtractorError +from ..utils import ( + float_or_none, + int_or_none, + parse_iso8601, + sanitized_Request, +) class EitbIE(InfoExtractor): IE_NAME = 'eitb.tv' - _VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)' + _VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)' _TEST = { - 'add_ie': ['Brightcove'], - 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/', + 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/', 'md5': 'edf4436247185adee3ea18ce64c47998', 'info_dict': { - 'id': '2743577154001', + 'id': '4090227752001', 'ext': 'mp4', 'title': '60 minutos (Lasa y Zabala, 30 años)', - # All videos from eitb has this description in the brightcove info - 'description': '.', - 'uploader': 'Euskal Telebista', + 'description': 'Programa de reportajes de actualidad.', + 'duration': 3996.76, + 'timestamp': 1381789200, + 'upload_date': '20131014', + 'tags': list, }, } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - chapter_id = mobj.group('chapter_id') - webpage = self._download_webpage(url, chapter_id) - bc_url = BrightcoveIE._extract_brightcove_url(webpage) - if bc_url is None: - raise ExtractorError('Could not extract the Brightcove url') - # The BrightcoveExperience object doesn't contain the video id, we set - # it manually - bc_url += '&%40videoPlayer={0}'.format(chapter_id) - return self.url_result(bc_url, BrightcoveIE.ie_key()) + video_id = self._match_id(url) + + video = self._download_json( + 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id, + video_id, 'Downloading video JSON') + + media = video['web_media'][0] + + formats = [] + for rendition in media['RENDITIONS']: + video_url = rendition.get('PMD_URL') + if not video_url: + continue + tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000) + format_id = 'http' + if tbr: + format_id += '-%d' % int(tbr) + formats.append({ + 'url': rendition['PMD_URL'], + 'format_id': format_id, + 'width': int_or_none(rendition.get('FRAME_WIDTH')), + 'height': int_or_none(rendition.get('FRAME_HEIGHT')), + 'tbr': tbr, + }) + + hls_url = media.get('HLS_SURL') + if hls_url: + request = sanitized_Request( + 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/', + headers={'Referer': url}) + token_data = self._download_json( + request, video_id, 'Downloading auth token', fatal=False) + if token_data: + token = token_data.get('token') + if token: + m3u8_formats = self._extract_m3u8_formats( + '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + + hds_url = media.get('HDS_SURL') + if hds_url: + f4m_formats = self._extract_f4m_formats( + '%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'), + video_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'], + 'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'), + 'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'), + 'duration': float_or_none(media.get('LENGTH'), 1000), + 'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '), + 'tags': media.get('TAGS'), + 'formats': formats, + } diff --git a/youtube_dl/extractor/engadget.py b/youtube_dl/extractor/engadget.py index 4ea37ebd9..e4180701d 100644 --- a/youtube_dl/extractor/engadget.py +++ b/youtube_dl/extractor/engadget.py @@ -10,7 +10,7 @@ from ..utils import ( class EngadgetIE(InfoExtractor): _VALID_URL = r'''(?x)https?://www.engadget.com/ - (?:video/5min/(?P<id>\d+)| + (?:video(?:/5min)?/(?P<id>\d+)| [\d/]+/.*?) ''' diff --git a/youtube_dl/extractor/eroprofile.py b/youtube_dl/extractor/eroprofile.py index 316033cf1..7fcd0151d 100644 --- a/youtube_dl/extractor/eroprofile.py +++ b/youtube_dl/extractor/eroprofile.py @@ -71,8 +71,7 @@ class EroProfileIE(InfoExtractor): m = re.search(r'You must be logged in to view this video\.', webpage) if m: - raise ExtractorError( - 'This video requires login. Please specify a username and password and try again.', expected=True) + self.raise_login_required('This video requires login') video_id = self._search_regex( [r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'], diff --git a/youtube_dl/extractor/escapist.py b/youtube_dl/extractor/escapist.py index c85b4c458..a3d7bbbcb 100644 --- a/youtube_dl/extractor/escapist.py +++ b/youtube_dl/extractor/escapist.py @@ -3,13 +3,12 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request - from ..utils import ( determine_ext, clean_html, int_or_none, float_or_none, + sanitized_Request, ) @@ -75,7 +74,7 @@ class EscapistIE(InfoExtractor): video_id = ims_video['videoID'] key = ims_video['hash'] - config_req = compat_urllib_request.Request( + config_req = sanitized_Request( 'http://www.escapistmagazine.com/videos/' 'vidconfig.php?videoID=%s&hash=%s' % (video_id, key)) config_req.add_header('Referer', url) diff --git a/youtube_dl/extractor/esri.py b/youtube_dl/extractor/esri.py new file mode 100644 index 000000000..bf5d2019f --- /dev/null +++ b/youtube_dl/extractor/esri.py @@ -0,0 +1,74 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + int_or_none, + parse_filesize, + unified_strdate, +) + + +class EsriVideoIE(InfoExtractor): + _VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)' + _TEST = { + 'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications', + 'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc', + 'info_dict': { + 'id': '1124', + 'ext': 'mp4', + 'title': 'ArcGIS Online - Developing Applications', + 'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 185, + 'upload_date': '20120419', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + formats = [] + for width, height, content in re.findall( + r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage): + for video_url, ext, filesize in re.findall( + r'<a[^>]+href="([^"]+)">([^<]+) \(([^<]+)\)</a>', content): + formats.append({ + 'url': compat_urlparse.urljoin(url, video_url), + 'ext': ext.lower(), + 'format_id': '%s-%s' % (ext.lower(), height), + 'width': int(width), + 'height': int(height), + 'filesize_approx': parse_filesize(filesize), + }) + self._sort_formats(formats) + + title = self._html_search_meta('title', webpage, 'title') + description = self._html_search_meta( + 'description', webpage, 'description', fatal=False) + + thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False) + if thumbnail: + thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail) + + duration = int_or_none(self._search_regex( + [r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"], + webpage, 'duration', fatal=False)) + + upload_date = unified_strdate(self._html_search_meta( + 'last-modified', webpage, 'upload date', fatal=None)) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'upload_date': upload_date, + 'formats': formats + } diff --git a/youtube_dl/extractor/europa.py b/youtube_dl/extractor/europa.py new file mode 100644 index 000000000..adc43919e --- /dev/null +++ b/youtube_dl/extractor/europa.py @@ -0,0 +1,93 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + int_or_none, + orderedSet, + parse_duration, + qualities, + unified_strdate, + xpath_text +) + + +class EuropaIE(InfoExtractor): + _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)' + _TESTS = [{ + 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758', + 'md5': '574f080699ddd1e19a675b0ddf010371', + 'info_dict': { + 'id': 'I107758', + 'ext': 'mp4', + 'title': 'TRADE - Wikileaks on TTIP', + 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015', + 'thumbnail': 're:^https?://.*\.jpg$', + 'upload_date': '20150811', + 'duration': 34, + 'view_count': int, + 'formats': 'mincount:3', + } + }, { + 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786', + 'only_matching': True, + }, { + 'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + playlist = self._download_xml( + 'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id) + + def get_item(type_, preference): + items = {} + for item in playlist.findall('./info/%s/item' % type_): + lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None) + if lang and label: + items[lang] = label.strip() + for p in preference: + if items.get(p): + return items[p] + + query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + preferred_lang = query.get('sitelang', ('en', ))[0] + + preferred_langs = orderedSet((preferred_lang, 'en', 'int')) + + title = get_item('title', preferred_langs) or video_id + description = get_item('description', preferred_langs) + thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail') + upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date')) + duration = parse_duration(xpath_text(playlist, './info/duration', 'duration')) + view_count = int_or_none(xpath_text(playlist, './info/views', 'views')) + + language_preference = qualities(preferred_langs[::-1]) + + formats = [] + for file_ in playlist.findall('./files/file'): + video_url = xpath_text(file_, './url') + if not video_url: + continue + lang = xpath_text(file_, './lg') + formats.append({ + 'url': video_url, + 'format_id': lang, + 'format_note': xpath_text(file_, './lglabel'), + 'language_preference': language_preference(lang) + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnmail, + 'upload_date': upload_date, + 'duration': duration, + 'view_count': view_count, + 'formats': formats + } diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py index d872d828f..493d38af8 100644 --- a/youtube_dl/extractor/everyonesmixtape.py +++ b/youtube_dl/extractor/everyonesmixtape.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -42,7 +40,7 @@ class EveryonesMixtapeIE(InfoExtractor): playlist_id = mobj.group('id') pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id - pllist_req = compat_urllib_request.Request(pllist_url) + pllist_req = sanitized_Request(pllist_url) pllist_req.add_header('X-Requested-With', 'XMLHttpRequest') playlist_list = self._download_json( @@ -55,7 +53,7 @@ class EveryonesMixtapeIE(InfoExtractor): raise ExtractorError('Playlist id not found') pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no - pl_req = compat_urllib_request.Request(pl_url) + pl_req = sanitized_Request(pl_url) pl_req.add_header('X-Requested-With', 'XMLHttpRequest') playlist = self._download_json( pl_req, playlist_id, note='Downloading playlist info') diff --git a/youtube_dl/extractor/expotv.py b/youtube_dl/extractor/expotv.py index a38b773e8..1585a03bb 100644 --- a/youtube_dl/extractor/expotv.py +++ b/youtube_dl/extractor/expotv.py @@ -33,20 +33,27 @@ class ExpoTVIE(InfoExtractor): webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') - config_url = 'http://client.expotv.com/video/config/%s/%s' % ( - video_id, player_key) config = self._download_json( - config_url, video_id, - note='Downloading video configuration') + 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), + video_id, 'Downloading video configuration') - formats = [{ - 'url': fcfg['file'], - 'height': int_or_none(fcfg.get('height')), - 'format_note': fcfg.get('label'), - 'ext': self._search_regex( - r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'], - 'file extension', default=None), - } for fcfg in config['sources']] + formats = [] + for fcfg in config['sources']: + media_url = fcfg.get('file') + if not media_url: + continue + if fcfg.get('type') == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) + else: + formats.append({ + 'url': media_url, + 'height': int_or_none(fcfg.get('height')), + 'format_id': fcfg.get('label'), + 'ext': self._search_regex( + r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, + 'file extension', default=None) or fcfg.get('type'), + }) self._sort_formats(formats) title = self._og_search_title(webpage) diff --git a/youtube_dl/extractor/extremetube.py b/youtube_dl/extractor/extremetube.py index c826a5404..3403581fd 100644 --- a/youtube_dl/extractor/extremetube.py +++ b/youtube_dl/extractor/extremetube.py @@ -3,23 +3,20 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_parse_qs, - compat_urllib_request, -) from ..utils import ( - qualities, + int_or_none, + sanitized_Request, str_to_int, ) class ExtremeTubeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)' + _VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)' _TESTS = [{ 'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'md5': '344d0c6d50e2f16b06e49ca011d8ac69', 'info_dict': { - 'id': '652431', + 'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'ext': 'mp4', 'title': 'Music Video 14 british euro brit european cumshots swallow', 'uploader': 'unknown', @@ -29,14 +26,18 @@ class ExtremeTubeIE(InfoExtractor): }, { 'url': 'http://www.extremetube.com/gay/video/abcde-1234', 'only_matching': True, + }, { + 'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick', + 'only_matching': True, + }, { + 'url': 'http://www.extremetube.com/video/652431', + 'only_matching': True, }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - url = 'http://www.' + mobj.group('url') + video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -49,20 +50,36 @@ class ExtremeTubeIE(InfoExtractor): r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>', webpage, 'view count', fatal=False)) - flash_vars = compat_parse_qs(self._search_regex( - r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars')) + flash_vars = self._parse_json( + self._search_regex( + r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'), + video_id) formats = [] - quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p']) - for k, vals in flash_vars.items(): - m = re.match(r'quality_(?P<quality>[0-9]+p)$', k) - if m is not None: - formats.append({ - 'format_id': m.group('quality'), - 'quality': quality(m.group('quality')), - 'url': vals[0], + for quality_key, video_url in flash_vars.items(): + height = int_or_none(self._search_regex( + r'quality_(\d+)[pP]$', quality_key, 'height', default=None)) + if not height: + continue + f = { + 'url': video_url, + } + mobj = re.search( + r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url) + if mobj: + height = int(mobj.group('height')) + bitrate = int(mobj.group('bitrate')) + f.update({ + 'format_id': '%dp-%dk' % (height, bitrate), + 'height': height, + 'tbr': bitrate, }) - + else: + f.update({ + 'format_id': '%dp' % height, + 'height': height, + }) + formats.append(f) self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index e17bb9aea..39c481068 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -7,16 +7,17 @@ import socket from .common import InfoExtractor from ..compat import ( compat_http_client, - compat_str, compat_urllib_error, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( + error_to_compat_str, ExtractorError, - int_or_none, limit_length, + sanitized_Request, urlencode_postdata, + get_element_by_id, + clean_html, ) @@ -42,6 +43,7 @@ class FacebookIE(InfoExtractor): 'id': '637842556329505', 'ext': 'mp4', 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam', + 'uploader': 'Tennis on Facebook', } }, { 'note': 'Video without discernible title', @@ -50,6 +52,7 @@ class FacebookIE(InfoExtractor): 'id': '274175099429670', 'ext': 'mp4', 'title': 'Facebook video #274175099429670', + 'uploader': 'Asif Nawab Butt', }, 'expected_warnings': [ 'title' @@ -70,7 +73,7 @@ class FacebookIE(InfoExtractor): if useremail is None: return - login_page_req = compat_urllib_request.Request(self._LOGIN_URL) + login_page_req = sanitized_Request(self._LOGIN_URL) login_page_req.add_header('Cookie', 'locale=en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', @@ -91,7 +94,7 @@ class FacebookIE(InfoExtractor): 'timezone': '-60', 'trynum': '1', } - request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) + request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, @@ -106,14 +109,14 @@ class FacebookIE(InfoExtractor): r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'), 'name_action_selected': 'dont_save', } - check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) + check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning('unable to log in: %s' % compat_str(err)) + self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) return def _real_initialize(self): @@ -138,16 +141,20 @@ class FacebookIE(InfoExtractor): data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse_unquote(data['params']) params = json.loads(params_raw) - video_data = params['video_data'][0] formats = [] - for quality in ['sd', 'hd']: - src = video_data.get('%s_src' % quality) - if src is not None: - formats.append({ - 'format_id': quality, - 'url': src, - }) + for format_id, f in params['video_data'].items(): + if not f or not isinstance(f, list): + continue + for quality in ('sd', 'hd'): + for src_type in ('src', 'src_no_ratelimit'): + src = f[0].get('%s_%s' % (quality, src_type)) + if src: + formats.append({ + 'format_id': '%s_%s_%s' % (format_id, quality, src_type), + 'url': src, + 'preference': -10 if format_id == 'progressive' else 0, + }) if not formats: raise ExtractorError('Cannot find video formats') @@ -157,15 +164,15 @@ class FacebookIE(InfoExtractor): if not video_title: video_title = self._html_search_regex( r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>', - webpage, 'alternative title', fatal=False) + webpage, 'alternative title', default=None) video_title = limit_length(video_title, 80) if not video_title: video_title = 'Facebook video #%s' % video_id + uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage)) return { 'id': video_id, 'title': video_title, 'formats': formats, - 'duration': int_or_none(video_data.get('video_duration')), - 'thumbnail': video_data.get('thumbnail_src'), + 'uploader': uploader, } diff --git a/youtube_dl/extractor/faz.py b/youtube_dl/extractor/faz.py index cebdd0193..6f9b003c2 100644 --- a/youtube_dl/extractor/faz.py +++ b/youtube_dl/extractor/faz.py @@ -2,6 +2,11 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + xpath_element, + xpath_text, + int_or_none, +) class FazIE(InfoExtractor): @@ -37,31 +42,32 @@ class FazIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) + description = self._og_search_description(webpage) config_xml_url = self._search_regex( - r'writeFLV\(\'(.+?)\',', webpage, 'config xml url') + r'videoXMLURL\s*=\s*"([^"]+)', webpage, 'config xml url') config = self._download_xml( config_xml_url, video_id, 'Downloading config xml') - encodings = config.find('ENCODINGS') + encodings = xpath_element(config, 'ENCODINGS', 'encodings', True) formats = [] for pref, code in enumerate(['LOW', 'HIGH', 'HQ']): - encoding = encodings.find(code) - if encoding is None: - continue - encoding_url = encoding.find('FILENAME').text - formats.append({ - 'url': encoding_url, - 'format_id': code.lower(), - 'quality': pref, - }) + encoding = xpath_element(encodings, code) + if encoding: + encoding_url = xpath_text(encoding, 'FILENAME') + if encoding_url: + formats.append({ + 'url': encoding_url, + 'format_id': code.lower(), + 'quality': pref, + 'tbr': int_or_none(xpath_text(encoding, 'AVERAGEBITRATE')), + }) self._sort_formats(formats) - descr = self._html_search_regex( - r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, - 'description': descr, - 'thumbnail': config.find('STILL/STILL_BIG').text, + 'description': description.strip() if description else None, + 'thumbnail': xpath_text(config, 'STILL/STILL_BIG'), + 'duration': int_or_none(xpath_text(config, 'DURATION')), } diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py index 1ccc1a964..4c81271d3 100644 --- a/youtube_dl/extractor/fc2.py +++ b/youtube_dl/extractor/fc2.py @@ -10,12 +10,14 @@ from ..compat import ( compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, + sanitized_Request, ) class FC2IE(InfoExtractor): - _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)' + _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)*content/(?P<id>[^/]+)' IE_NAME = 'fc2' _NETRC_MACHINE = 'fc2' _TESTS = [{ @@ -35,8 +37,11 @@ class FC2IE(InfoExtractor): 'params': { 'username': 'ytdl@yt-dl.org', 'password': '(snip)', - 'skip': 'requires actual password' - } + }, + 'skip': 'requires actual password', + }, { + 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF', + 'only_matching': True, }] def _login(self): @@ -52,11 +57,8 @@ class FC2IE(InfoExtractor): 'Submit': ' Login ', } - # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode - # chokes on unicode - login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items()) - login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8') - request = compat_urllib_request.Request( + login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') + request = sanitized_Request( 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in') @@ -65,7 +67,7 @@ class FC2IE(InfoExtractor): return False # this is also needed - login_redir = compat_urllib_request.Request('http://id.fc2.com/?mode=redirect&login=done') + login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done') self._download_webpage( login_redir, None, note='Login redirect', errnote='Login redirect failed') @@ -80,13 +82,13 @@ class FC2IE(InfoExtractor): title = self._og_search_title(webpage) thumbnail = self._og_search_thumbnail(webpage) - refer = url.replace('/content/', '/a/content/') + refer = url.replace('/content/', '/a/content/') if '/a/content/' not in url else url mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest() info_url = ( "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&". - format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E'))) + format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E'))) info_webpage = self._download_webpage( info_url, video_id, note='Downloading info page') diff --git a/youtube_dl/extractor/fczenit.py b/youtube_dl/extractor/fczenit.py new file mode 100644 index 000000000..f1f150ef2 --- /dev/null +++ b/youtube_dl/extractor/fczenit.py @@ -0,0 +1,41 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class FczenitIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/gl(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://fc-zenit.ru/video/gl6785/', + 'md5': '458bacc24549173fe5a5aa29174a5606', + 'info_dict': { + 'id': '6785', + 'ext': 'mp4', + 'title': '«Зенит-ТВ»: как Олег Шатов играл против «Урала»', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_title = self._html_search_regex(r'<div class=\"photoalbum__title\">([^<]+)', webpage, 'title') + + bitrates_raw = self._html_search_regex(r'bitrates:.*\n(.*)\]', webpage, 'video URL') + bitrates = re.findall(r'url:.?\'(.+?)\'.*?bitrate:.?([0-9]{3}?)', bitrates_raw) + + formats = [{ + 'url': furl, + 'tbr': tbr, + } for furl, tbr in bitrates] + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_title, + 'formats': formats, + } diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py index 157094e8c..2955965d9 100644 --- a/youtube_dl/extractor/fivemin.py +++ b/youtube_dl/extractor/fivemin.py @@ -2,11 +2,15 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( - compat_str, compat_urllib_parse, + compat_parse_qs, + compat_urllib_parse_urlparse, + compat_urlparse, ) from ..utils import ( ExtractorError, + parse_duration, + replace_extension, ) @@ -28,6 +32,7 @@ class FiveMinIE(InfoExtractor): 'id': '518013791', 'ext': 'mp4', 'title': 'iPad Mini with Retina Display Review', + 'duration': 177, }, }, { @@ -38,9 +43,52 @@ class FiveMinIE(InfoExtractor): 'id': '518086247', 'ext': 'mp4', 'title': 'How to Make a Next-Level Fruit Salad', + 'duration': 184, }, }, ] + _ERRORS = { + 'ErrorVideoNotExist': 'We\'re sorry, but the video you are trying to watch does not exist.', + 'ErrorVideoNoLongerAvailable': 'We\'re sorry, but the video you are trying to watch is no longer available.', + 'ErrorVideoRejected': 'We\'re sorry, but the video you are trying to watch has been removed.', + 'ErrorVideoUserNotGeo': 'We\'re sorry, but the video you are trying to watch cannot be viewed from your current location.', + 'ErrorVideoLibraryRestriction': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.', + 'ErrorExposurePermission': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.', + } + _QUALITIES = { + 1: { + 'width': 640, + 'height': 360, + }, + 2: { + 'width': 854, + 'height': 480, + }, + 4: { + 'width': 1280, + 'height': 720, + }, + 8: { + 'width': 1920, + 'height': 1080, + }, + 16: { + 'width': 640, + 'height': 360, + }, + 32: { + 'width': 854, + 'height': 480, + }, + 64: { + 'width': 1280, + 'height': 720, + }, + 128: { + 'width': 640, + 'height': 360, + }, + } def _real_extract(self, url): video_id = self._match_id(url) @@ -59,26 +107,36 @@ class FiveMinIE(InfoExtractor): 'https://syn.5min.com/handlers/SenseHandler.ashx?' + query, video_id) if not response['success']: - err_msg = response['errorMessage'] - if err_msg == 'ErrorVideoUserNotGeo': - msg = 'Video not available from your location' - else: - msg = 'Aol said: %s' % err_msg - raise ExtractorError(msg, expected=True, video_id=video_id) + raise ExtractorError( + '%s said: %s' % ( + self.IE_NAME, + self._ERRORS.get(response['errorMessage'], response['errorMessage'])), + expected=True) info = response['binding'][0] - second_id = compat_str(int(video_id[:-2]) + 1) formats = [] - for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]: - if any(r['ID'] == quality for r in info['Renditions']): + parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs( + compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0]) + for rendition in info['Renditions']: + if rendition['RenditionType'] == 'm3u8': + formats.extend(self._extract_m3u8_formats(rendition['Url'], video_id, m3u8_id='hls')) + elif rendition['RenditionType'] == 'aac': + continue + else: + rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType']))) + quality = self._QUALITIES.get(rendition['ID'], {}) formats.append({ - 'format_id': compat_str(quality), - 'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality), - 'height': height, + 'format_id': '%s-%d' % (rendition['RenditionType'], rendition['ID']), + 'url': rendition_url, + 'width': quality.get('width'), + 'height': quality.get('height'), }) + self._sort_formats(formats) return { 'id': video_id, 'title': info['Title'], + 'thumbnail': info.get('ThumbURL'), + 'duration': parse_duration(info.get('Duration')), 'formats': formats, } diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py index 190d9f9ad..5f6e65dae 100644 --- a/youtube_dl/extractor/fktv.py +++ b/youtube_dl/extractor/fktv.py @@ -1,13 +1,10 @@ from __future__ import unicode_literals -import re -import random -import json - from .common import InfoExtractor from ..utils import ( - get_element_by_id, clean_html, + determine_ext, + js_to_json, ) @@ -17,66 +14,38 @@ class FKTVIE(InfoExtractor): _TEST = { 'url': 'http://fernsehkritik.tv/folge-1', + 'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79', 'info_dict': { - 'id': '00011', - 'ext': 'flv', + 'id': '1', + 'ext': 'mp4', 'title': 'Folge 1 vom 10. April 2007', - 'description': 'md5:fb4818139c7cfe6907d4b83412a6864f', + 'thumbnail': 're:^https?://.*\.jpg$', }, } def _real_extract(self, url): - episode = int(self._match_id(url)) - - video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%s.jpg' % episode - start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%s/Start' % episode, - episode) - playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage, - 'playlist', flags=re.DOTALL) - files = json.loads(re.sub('{[^{}]*?}', '{}', playlist)) - - videos = [] - for i, _ in enumerate(files, 1): - video_id = '%04d%d' % (episode, i) - video_url = 'http://fernsehkritik.tv/js/directme.php?file=%s%s.flv' % (episode, '' if i == 1 else '-%d' % i) - videos.append({ - 'ext': 'flv', - 'id': video_id, - 'url': video_url, - 'title': clean_html(get_element_by_id('eptitle', start_webpage)), - 'description': clean_html(get_element_by_id('contentlist', start_webpage)), - 'thumbnail': video_thumbnail - }) - return { - '_type': 'multi_video', - 'entries': videos, - 'id': 'folge-%s' % episode, - } - - -class FKTVPosteckeIE(InfoExtractor): - IE_NAME = 'fernsehkritik.tv:postecke' - _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)' - _TEST = { - 'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120', - 'md5': '262f0adbac80317412f7e57b4808e5c4', - 'info_dict': { - 'id': '0120', - 'ext': 'flv', - 'title': 'Postecke 120', - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - episode = int(mobj.group('ep')) + episode = self._match_id(url) + + webpage = self._download_webpage( + 'http://fernsehkritik.tv/folge-%s/play' % episode, episode) + title = clean_html(self._html_search_regex( + '<h3>([^<]+)</h3>', webpage, 'title')) + thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False) + sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json) + + formats = [] + for source in sources: + furl = source.get('src') + if furl: + formats.append({ + 'url': furl, + 'format_id': determine_ext(furl), + }) + self._sort_formats(formats) - server = random.randint(2, 4) - video_id = '%04d' % episode - video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode) - video_title = 'Postecke %d' % episode return { - 'id': video_id, - 'url': video_url, - 'title': video_title, + 'id': episode, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, } diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index 2fe76d661..452b27b26 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -1,67 +1,87 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..compat import compat_urllib_request +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, - find_xpath_attr, + int_or_none, + qualities, ) class FlickrIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' + _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/[\w\-_@]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', - 'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b', + 'md5': '164fe3fa6c22e18d448d4d5af2330f31', 'info_dict': { 'id': '5645318632', - 'ext': 'mp4', - "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", - "uploader_id": "forestwander-nature-pictures", - "title": "Dark Hollow Waterfalls" + 'ext': 'mpg', + 'description': 'Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.', + 'title': 'Dark Hollow Waterfalls', + 'duration': 19, + 'timestamp': 1303528740, + 'upload_date': '20110423', + 'uploader_id': '10922353@N03', + 'uploader': 'Forest Wander', + 'comment_count': int, + 'view_count': int, + 'tags': list, } } - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) + _API_BASE_URL = 'https://api.flickr.com/services/rest?' - video_id = mobj.group('id') - video_uploader_id = mobj.group('uploader_id') - webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id - req = compat_urllib_request.Request(webpage_url) - req.add_header( - 'User-Agent', - # it needs a more recent version - 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20150101 Firefox/38.0 (Chrome)') - webpage = self._download_webpage(req, video_id) + def _call_api(self, method, video_id, api_key, note, secret=None): + query = { + 'photo_id': video_id, + 'method': 'flickr.%s' % method, + 'api_key': api_key, + 'format': 'json', + 'nojsoncallback': 1, + } + if secret: + query['secret'] = secret + data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note) + if data['stat'] != 'ok': + raise ExtractorError(data['message']) + return data - secret = self._search_regex(r'secret"\s*:\s*"(\w+)"', webpage, 'secret') + def _real_extract(self, url): + video_id = self._match_id(url) - first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self' - first_xml = self._download_xml(first_url, video_id, 'Downloading first data webpage') + api_key = self._download_json('https://www.flickr.com/hermes_error_beacon.gne', video_id, 'Downloading api key',)['site_key'] - node_id = find_xpath_attr( - first_xml, './/{http://video.yahoo.com/YEP/1.0/}Item', 'id', - 'id').text + video_info = self._call_api('photos.getInfo', video_id, api_key, 'Downloading video info')['photo'] + if video_info['media'] == 'video': + streams = self._call_api('video.getStreamInfo', video_id, api_key, 'Downloading streams info', video_info['secret'])['streams'] - second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1' - second_xml = self._download_xml(second_url, video_id, 'Downloading second data webpage') + preference = qualities(['iphone_wifi', '700', 'appletv', 'orig']) - self.report_extraction(video_id) + formats = [] + for stream in streams['stream']: + stream_type = str(stream.get('type')) + formats.append({ + 'format_id': stream_type, + 'url': stream['_content'], + 'preference': preference(stream_type), + }) + self._sort_formats(formats) - stream = second_xml.find('.//STREAM') - if stream is None: - raise ExtractorError('Unable to extract video url') - video_url = stream.attrib['APP'] + stream.attrib['FULLPATH'] + owner = video_info.get('owner', {}) - return { - 'id': video_id, - 'url': video_url, - 'ext': 'mp4', - 'title': self._og_search_title(webpage), - 'description': self._og_search_description(webpage), - 'thumbnail': self._og_search_thumbnail(webpage), - 'uploader_id': video_uploader_id, - } + return { + 'id': video_id, + 'title': video_info['title']['_content'], + 'description': video_info.get('description', {}).get('_content'), + 'formats': formats, + 'timestamp': int_or_none(video_info.get('dateuploaded')), + 'duration': int_or_none(video_info.get('video', {}).get('duration')), + 'uploader_id': owner.get('nsid'), + 'uploader': owner.get('realname'), + 'comment_count': int_or_none(video_info.get('comments', {}).get('_content')), + 'view_count': int_or_none(video_info.get('views')), + 'tags': [tag.get('_content') for tag in video_info.get('tags', {}).get('tag', [])] + } + else: + raise ExtractorError('not a video', expected=True) diff --git a/youtube_dl/extractor/folketinget.py b/youtube_dl/extractor/folketinget.py index 0fb29de75..75399fa7d 100644 --- a/youtube_dl/extractor/folketinget.py +++ b/youtube_dl/extractor/folketinget.py @@ -30,6 +30,10 @@ class FolketingetIE(InfoExtractor): 'upload_date': '20141120', 'duration': 3960, }, + 'params': { + # rtmp download + 'skip_download': True, + }, } def _real_extract(self, url): diff --git a/youtube_dl/extractor/footyroom.py b/youtube_dl/extractor/footyroom.py index 4c7dbca40..370fd006f 100644 --- a/youtube_dl/extractor/footyroom.py +++ b/youtube_dl/extractor/footyroom.py @@ -13,6 +13,7 @@ class FootyRoomIE(InfoExtractor): 'title': 'Schalke 04 0 – 2 Real Madrid', }, 'playlist_count': 3, + 'skip': 'Video for this match is not available', }, { 'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/', 'info_dict': { diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py index b2284ab01..fc4a5a0fb 100644 --- a/youtube_dl/extractor/fourtube.py +++ b/youtube_dl/extractor/fourtube.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_duration, parse_iso8601, + sanitized_Request, str_to_int, ) @@ -32,6 +30,7 @@ class FourTubeIE(InfoExtractor): 'view_count': int, 'like_count': int, 'categories': list, + 'age_limit': 18, } } @@ -45,10 +44,10 @@ class FourTubeIE(InfoExtractor): thumbnail = self._html_search_meta('thumbnailUrl', webpage) uploader_id = self._html_search_regex( r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">', - webpage, 'uploader id') + webpage, 'uploader id', fatal=False) uploader = self._html_search_regex( r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">', - webpage, 'uploader') + webpage, 'uploader', fatal=False) categories_html = self._search_regex( r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>', @@ -67,13 +66,24 @@ class FourTubeIE(InfoExtractor): webpage, 'like count', fatal=False)) duration = parse_duration(self._html_search_meta('duration', webpage)) - params_js = self._search_regex( - r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', - webpage, 'initialization parameters' - ) - params = self._parse_json('[%s]' % params_js, video_id) - media_id = params[0] - sources = ['%s' % p for p in params[2]] + media_id = self._search_regex( + r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage, + 'media id', default=None, group='id') + sources = [ + quality + for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)] + if not (media_id and sources): + player_js = self._download_webpage( + self._search_regex( + r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2', + webpage, 'player JS', group='url'), + video_id, 'Downloading player JS') + params_js = self._search_regex( + r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', + player_js, 'initialization parameters') + params = self._parse_json('[%s]' % params_js, video_id) + media_id = params[0] + sources = ['%s' % p for p in params[2]] token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format( media_id, '+'.join(sources)) @@ -81,7 +91,7 @@ class FourTubeIE(InfoExtractor): b'Content-Type': b'application/x-www-form-urlencoded', b'Origin': b'http://www.4tube.com', } - token_req = compat_urllib_request.Request(token_url, b'{}', headers) + token_req = sanitized_Request(token_url, b'{}', headers) tokens = self._download_json(token_req, video_id) formats = [{ 'url': tokens[format]['token'], diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py index 917f76b1e..3a4a59135 100644 --- a/youtube_dl/extractor/foxnews.py +++ b/youtube_dl/extractor/foxnews.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( parse_iso8601, @@ -8,7 +10,8 @@ from ..utils import ( class FoxNewsIE(InfoExtractor): - _VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)' + IE_DESC = 'Fox News and Fox Business Video' + _VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)' _TESTS = [ { 'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips', @@ -42,13 +45,19 @@ class FoxNewsIE(InfoExtractor): 'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com', 'only_matching': True, }, + { + 'url': 'http://video.foxbusiness.com/v/4442309889001', + 'only_matching': True, + }, ] def _real_extract(self, url): - video_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + host = mobj.group('host') video = self._download_json( - 'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id) + 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id) item = video['channel']['item'] title = item['title'] diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index 75723c00d..8e60cf60f 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -78,28 +78,48 @@ class FranceTVBaseInfoExtractor(InfoExtractor): }) self._sort_formats(formats) + title = info['titre'] + subtitle = info.get('sous_titre') + if subtitle: + title += ' - %s' % subtitle + + subtitles = {} + subtitles_list = [{ + 'url': subformat['url'], + 'ext': subformat.get('format'), + } for subformat in info.get('subtitles', []) if subformat.get('url')] + if subtitles_list: + subtitles['fr'] = subtitles_list + return { 'id': video_id, - 'title': info['titre'], + 'title': title, 'description': clean_html(info['synopsis']), 'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']), 'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']), 'timestamp': int_or_none(info['diffusion']['timestamp']), 'formats': formats, + 'subtitles': subtitles, } class PluzzIE(FranceTVBaseInfoExtractor): IE_NAME = 'pluzz.francetv.fr' - _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html' + _VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html' # Can't use tests, videos expire in 7 days def _real_extract(self, url): - title = re.match(self._VALID_URL, url).group(1) - webpage = self._download_webpage(url, title) - video_id = self._search_regex( - r'data-diffusion="(\d+)"', webpage, 'ID') + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + video_id = self._html_search_meta( + 'id_video', webpage, 'video id', default=None) + if not video_id: + video_id = self._search_regex( + r'data-diffusion=["\'](\d+)', webpage, 'video id') + return self._extract_video(video_id, 'Pluzz') @@ -115,6 +135,9 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor): 'title': 'Soir 3', 'upload_date': '20130826', 'timestamp': 1377548400, + 'subtitles': { + 'fr': 'mincount:2', + }, }, }, { 'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html', @@ -214,15 +237,15 @@ class FranceTVIE(FranceTVBaseInfoExtractor): }, # france5 { - 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968', - 'md5': '78f0f4064f9074438e660785bbf2c5d9', + 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1', + 'md5': 'f6c577df3806e26471b3d21631241fd0', 'info_dict': { - 'id': '108961659', + 'id': '123327454', 'ext': 'flv', - 'title': 'C à dire ?!', - 'description': 'md5:1a4aeab476eb657bf57c4ff122129f81', - 'upload_date': '20140915', - 'timestamp': 1410795000, + 'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?', + 'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4', + 'upload_date': '20150831', + 'timestamp': 1441035120, }, }, # franceo diff --git a/youtube_dl/extractor/funimation.py b/youtube_dl/extractor/funimation.py new file mode 100644 index 000000000..d1a95d87f --- /dev/null +++ b/youtube_dl/extractor/funimation.py @@ -0,0 +1,193 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + clean_html, + determine_ext, + encode_dict, + int_or_none, + sanitized_Request, + ExtractorError, + urlencode_postdata +) + + +class FunimationIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)' + + _NETRC_MACHINE = 'funimation' + + _TESTS = [{ + 'url': 'http://www.funimation.com/shows/air/videos/official/breeze', + 'info_dict': { + 'id': '658', + 'display_id': 'breeze', + 'ext': 'mp4', + 'title': 'Air - 1 - Breeze', + 'description': 'md5:1769f43cd5fc130ace8fd87232207892', + 'thumbnail': 're:https?://.*\.jpg', + }, + }, { + 'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play', + 'info_dict': { + 'id': '31128', + 'display_id': 'role-play', + 'ext': 'mp4', + 'title': '.hack//SIGN - 1 - Role Play', + 'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd', + 'thumbnail': 're:https?://.*\.jpg', + }, + }, { + 'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview', + 'info_dict': { + 'id': '9635', + 'display_id': 'broadcast-dub-preview', + 'ext': 'mp4', + 'title': 'Attack on Titan: Junior High - Broadcast Dub Preview', + 'description': 'md5:f8ec49c0aff702a7832cd81b8a44f803', + 'thumbnail': 're:https?://.*\.(?:jpg|png)', + }, + }] + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + data = urlencode_postdata(encode_dict({ + 'email_field': username, + 'password_field': password, + })) + login_request = sanitized_Request('http://www.funimation.com/login', data, headers={ + 'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0', + 'Content-Type': 'application/x-www-form-urlencoded' + }) + login_page = self._download_webpage( + login_request, None, 'Logging in as %s' % username) + if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')): + return + error = self._html_search_regex( + r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>', + login_page, 'error messages', default=None) + if error: + raise ExtractorError('Unable to login: %s' % error, expected=True) + raise ExtractorError('Unable to log in') + + def _real_initialize(self): + self._login() + + def _real_extract(self, url): + display_id = self._match_id(url) + + errors = [] + formats = [] + + ERRORS_MAP = { + 'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn', + 'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut', + 'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut', + 'ERROR_VIDEO_EXPIRED': 'videoExpired', + 'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable', + 'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription', + 'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription', + 'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding', + 'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN', + 'ERROR_STREAM_NOT_FOUND': 'streamNotFound', + } + + USER_AGENTS = ( + # PC UA is served with m3u8 that provides some bonus lower quality formats + ('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'), + # Mobile UA allows to extract direct links and also does not fail when + # PC UA fails with hulu error (e.g. + # http://www.funimation.com/shows/hacksign/videos/official/role-play) + ('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'), + ) + + for kind, user_agent in USER_AGENTS: + request = sanitized_Request(url) + request.add_header('User-Agent', user_agent) + webpage = self._download_webpage( + request, display_id, 'Downloading %s webpage' % kind) + + playlist = self._parse_json( + self._search_regex( + r'var\s+playersData\s*=\s*(\[.+?\]);\n', + webpage, 'players data'), + display_id)[0]['playlist'] + + items = next(item['items'] for item in playlist if item.get('items')) + item = next(item for item in items if item.get('itemAK') == display_id) + + error_messages = {} + video_error_messages = self._search_regex( + r'var\s+videoErrorMessages\s*=\s*({.+?});\n', + webpage, 'error messages', default=None) + if video_error_messages: + error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False) + if error_messages_json: + for _, error in error_messages_json.items(): + type_ = error.get('type') + description = error.get('description') + content = error.get('content') + if type_ == 'text' and description and content: + error_message = ERRORS_MAP.get(description) + if error_message: + error_messages[error_message] = content + + for video in item.get('videoSet', []): + auth_token = video.get('authToken') + if not auth_token: + continue + funimation_id = video.get('FUNImationID') or video.get('videoId') + preference = 1 if video.get('languageMode') == 'dub' else 0 + if not auth_token.startswith('?'): + auth_token = '?%s' % auth_token + for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)): + format_url = video.get('%sUrl' % quality) + if not format_url: + continue + if not format_url.startswith(('http', '//')): + errors.append(format_url) + continue + if determine_ext(format_url) == 'm3u8': + m3u8_formats = self._extract_m3u8_formats( + format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native', + preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + else: + tbr = int_or_none(self._search_regex( + r'-(\d+)[Kk]', format_url, 'tbr', default=None)) + formats.append({ + 'url': format_url + auth_token, + 'format_id': '%s-http-%dp' % (funimation_id, height), + 'height': height, + 'tbr': tbr, + 'preference': preference, + }) + + if not formats and errors: + raise ExtractorError( + '%s returned error: %s' + % (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))), + expected=True) + + self._sort_formats(formats) + + title = item['title'] + artist = item.get('artist') + if artist: + title = '%s - %s' % (artist, title) + description = self._og_search_description(webpage) or item.get('description') + thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl') + video_id = item.get('itemId') or display_id + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py index dd87257c4..7f21d7410 100644 --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -45,15 +45,24 @@ class FunnyOrDieIE(InfoExtractor): links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0) - bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates') - bitrates = [int(b) for b in bitrates.rstrip(',').split(',')] - bitrates.sort() + m3u8_url = self._search_regex( + r'<source[^>]+src=(["\'])(?P<url>.+?/master\.m3u8)\1', + webpage, 'm3u8 url', default=None, group='url') formats = [] + + m3u8_formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + + bitrates = [int(bitrate) for bitrate in re.findall(r'[,/]v(\d+)[,/]', m3u8_url)] + bitrates.sort() + for bitrate in bitrates: for link in links: formats.append({ - 'url': '%s%d.%s' % (link[0], bitrate, link[1]), + 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])), 'format_id': '%s-%d' % (link[1], bitrate), 'vbr': bitrate, }) diff --git a/youtube_dl/extractor/gameinformer.py b/youtube_dl/extractor/gameinformer.py new file mode 100644 index 000000000..25870c131 --- /dev/null +++ b/youtube_dl/extractor/gameinformer.py @@ -0,0 +1,43 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import int_or_none + + +class GameInformerIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx' + _TEST = { + 'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx', + 'info_dict': { + 'id': '4515472681001', + 'ext': 'm3u8', + 'title': 'Replay - Animal Crossing', + 'description': 'md5:2e211891b215c85d061adc7a4dd2d930', + 'timestamp': 1443457610706, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + bc_api_url = self._search_regex(r"getVideo\('([^']+)'", webpage, 'brightcove api url') + json_data = self._download_json( + bc_api_url + '&video_fields=id,name,shortDescription,publishedDate,videoStillURL,length,IOSRenditions', + display_id) + + return { + 'id': compat_str(json_data['id']), + 'display_id': display_id, + 'url': json_data['IOSRenditions'][0]['url'], + 'title': json_data['name'], + 'description': json_data.get('shortDescription'), + 'timestamp': int_or_none(json_data.get('publishedDate')), + 'duration': int_or_none(json_data.get('length')), + } diff --git a/youtube_dl/extractor/gametrailers.py b/youtube_dl/extractor/gametrailers.py index a6ab795ae..c3f031d9c 100644 --- a/youtube_dl/extractor/gametrailers.py +++ b/youtube_dl/extractor/gametrailers.py @@ -1,19 +1,62 @@ from __future__ import unicode_literals -from .mtv import MTVServicesInfoExtractor +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_age_limit, + url_basename, +) -class GametrailersIE(MTVServicesInfoExtractor): - _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)' +class GametrailersIE(InfoExtractor): + _VALID_URL = r'http://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)' + _TEST = { - 'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer', - 'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7', + 'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review', + 'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a', 'info_dict': { - 'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d', + 'id': '2983958', 'ext': 'mp4', - 'title': 'E3 2013: Debut Trailer', - 'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!', + 'display_id': '116437-Just-Cause-3-Review', + 'title': 'Just Cause 3 - Review', + 'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?', }, } - _FEED_URL = 'http://www.gametrailers.com/feeds/mrss' + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + title = self._html_search_regex( + r'<title>(.+?)\|', webpage, 'title').strip() + embed_url = self._proto_relative_url( + self._search_regex( + r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage, + 'embed url'), + scheme='http:') + video_id = url_basename(embed_url) + embed_page = self._download_webpage(embed_url, video_id) + embed_vars_json = self._search_regex( + r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page, + 'embed vars') + info = self._parse_json(embed_vars_json, video_id) + + formats = [] + for media in info['media']: + if media['mediaPurpose'] == 'play': + formats.append({ + 'url': media['uri'], + 'height': media['height'], + 'width:': media['width'], + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'formats': formats, + 'thumbnail': info.get('thumbUri'), + 'description': self._og_search_description(webpage), + 'duration': int_or_none(info.get('videoLengthInSeconds')), + 'age_limit': parse_age_limit(info.get('audienceRating')), + } diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py index 43f916412..3befd3e7b 100644 --- a/youtube_dl/extractor/gdcvault.py +++ b/youtube_dl/extractor/gdcvault.py @@ -3,11 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + remove_end, + HEADRequest, + sanitized_Request, ) -from ..utils import remove_end class GDCVaultIE(InfoExtractor): @@ -73,10 +74,20 @@ class GDCVaultIE(InfoExtractor): return video_formats def _parse_flv(self, xml_description): - video_formats = [] + formats = [] akamai_url = xml_description.find('./metadata/akamaiHost').text + audios = xml_description.find('./metadata/audios') + if audios is not None: + for audio in audios: + formats.append({ + 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url, + 'play_path': remove_end(audio.get('url'), '.flv'), + 'ext': 'flv', + 'vcodec': 'none', + 'format_id': audio.get('code'), + }) slide_video_path = xml_description.find('./metadata/slideVideo').text - video_formats.append({ + formats.append({ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url, 'play_path': remove_end(slide_video_path, '.flv'), 'ext': 'flv', @@ -86,7 +97,7 @@ class GDCVaultIE(InfoExtractor): 'format_id': 'slides', }) speaker_video_path = xml_description.find('./metadata/speakerVideo').text - video_formats.append({ + formats.append({ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url, 'play_path': remove_end(speaker_video_path, '.flv'), 'ext': 'flv', @@ -95,7 +106,7 @@ class GDCVaultIE(InfoExtractor): 'preference': -1, 'format_id': 'speaker', }) - return video_formats + return formats def _login(self, webpage_url, display_id): (username, password) = self._get_login_info() @@ -112,7 +123,7 @@ class GDCVaultIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form)) + request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(request, display_id, 'Logging in') start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') @@ -133,16 +144,18 @@ class GDCVaultIE(InfoExtractor): r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);', start_page, 'url', default=None) if direct_url: - video_url = 'http://www.gdcvault.com/' + direct_url title = self._html_search_regex( r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>', start_page, 'title') + video_url = 'http://www.gdcvault.com' + direct_url + # resolve the url so that we can detect the correct extension + head = self._request_webpage(HEADRequest(video_url), video_id) + video_url = head.geturl() return { 'id': video_id, 'display_id': display_id, 'url': video_url, - 'ext': 'flv', 'title': title, } @@ -168,8 +181,8 @@ class GDCVaultIE(InfoExtractor): # Fallback to the older format xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename') - xml_decription_url = xml_root + 'xml/' + xml_name - xml_description = self._download_xml(xml_decription_url, display_id) + xml_description_url = xml_root + 'xml/' + xml_name + xml_description = self._download_xml(xml_description_url, display_id) video_title = xml_description.find('./metadata/title').text video_formats = self._parse_mp4(xml_description) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index abd98e500..7cf13fddf 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -4,12 +4,13 @@ from __future__ import unicode_literals import os import re +import sys from .common import InfoExtractor from .youtube import YoutubeIE from ..compat import ( + compat_etree_fromstring, compat_urllib_parse_unquote, - compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) @@ -20,7 +21,7 @@ from ..utils import ( HEADRequest, is_html, orderedSet, - parse_xml, + sanitized_Request, smuggle_url, unescapeHTML, unified_strdate, @@ -29,7 +30,10 @@ from ..utils import ( url_basename, xpath_text, ) -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .nbc import NBCSportsVPlayerIE from .ooyala import OoyalaIE from .rutv import RUTVIE @@ -48,6 +52,9 @@ from .vimeo import VimeoIE from .dailymotion import DailymotionCloudIE from .onionstudios import OnionStudiosIE from .snagfilms import SnagFilmsEmbedIE +from .screenwavemedia import ScreenwaveMediaIE +from .mtv import MTVServicesEmbeddedIE +from .pladform import PladformIE from .googledrive import GoogleDriveIE @@ -131,6 +138,90 @@ class GenericIE(InfoExtractor): 'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624', } }, + # SMIL from http://videolectures.net/promogram_igor_mekjavic_eng + { + 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml', + 'info_dict': { + 'id': 'smil', + 'ext': 'mp4', + 'title': 'Automatics, robotics and biocybernetics', + 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', + 'upload_date': '20130627', + 'formats': 'mincount:16', + 'subtitles': 'mincount:1', + }, + 'params': { + 'force_generic_extractor': True, + 'skip_download': True, + }, + }, + # SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html + { + 'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil', + 'info_dict': { + 'id': 'hds', + 'ext': 'flv', + 'title': 'hds', + 'formats': 'mincount:1', + }, + 'params': { + 'skip_download': True, + }, + }, + # SMIL from https://www.restudy.dk/video/play/id/1637 + { + 'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml', + 'info_dict': { + 'id': 'video_1637', + 'ext': 'flv', + 'title': 'video_1637', + 'formats': 'mincount:3', + }, + 'params': { + 'skip_download': True, + }, + }, + # SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm + { + 'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil', + 'info_dict': { + 'id': 'smil-service', + 'ext': 'flv', + 'title': 'smil-service', + 'formats': 'mincount:1', + }, + 'params': { + 'skip_download': True, + }, + }, + # SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370 + { + 'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil', + 'info_dict': { + 'id': '4719370', + 'ext': 'mp4', + 'title': '571de1fd-47bc-48db-abf9-238872a58d1f', + 'formats': 'mincount:3', + }, + 'params': { + 'skip_download': True, + }, + }, + # XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html + { + 'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf', + 'info_dict': { + 'id': 'mZlp2ctYIUEB', + 'ext': 'mp4', + 'title': 'Tikibad ontruimd wegens brand', + 'description': 'md5:05ca046ff47b931f9b04855015e163a4', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 33, + }, + 'params': { + 'skip_download': True, + }, + }, # google redirect { 'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE', @@ -148,6 +239,22 @@ class GenericIE(InfoExtractor): } }, { + # redirect in Refresh HTTP header + 'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1', + 'info_dict': { + 'id': 'pO8h3EaFRdo', + 'ext': 'mp4', + 'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set', + 'description': 'md5:6294cc1af09c4049e0652b51a2df10d5', + 'upload_date': '20150917', + 'uploader_id': 'brtvofficial', + 'uploader': 'Boiler Room', + }, + 'params': { + 'skip_download': False, + }, + }, + { 'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html', 'md5': '85b90ccc9d73b4acd9138d3af4c27f89', 'info_dict': { @@ -173,7 +280,7 @@ class GenericIE(InfoExtractor): # it also tests brightcove videos that need to set the 'Referer' in the # http requests { - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', 'info_dict': { 'id': '2765128793001', @@ -197,7 +304,7 @@ class GenericIE(InfoExtractor): 'uploader': 'thestar.com', 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.', }, - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], }, { 'url': 'http://www.championat.com/video/football/v/87/87499.html', @@ -212,7 +319,7 @@ class GenericIE(InfoExtractor): }, { # https://github.com/rg3/youtube-dl/issues/3541 - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1', 'info_dict': { 'id': '3866516442001', @@ -234,9 +341,24 @@ class GenericIE(InfoExtractor): 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', 'ext': 'mp4', 'title': '2cc213299525360.mov', # that's what we get + 'duration': 238.231, }, 'add_ie': ['Ooyala'], }, + { + # ooyala video embedded with http://player.ooyala.com/iframe.js + 'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/', + 'info_dict': { + 'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB', + 'ext': 'mp4', + 'title': '"Steve Jobs: Man in the Machine" trailer', + 'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."', + 'duration': 135.427, + }, + 'params': { + 'skip_download': True, + }, + }, # multiple ooyala embeds on SBN network websites { 'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok', @@ -277,14 +399,6 @@ class GenericIE(InfoExtractor): 'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.', }, }, - # BBC iPlayer embeds - { - 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/posts/BUGGER', - 'info_dict': { - 'title': 'BBC - Blogs - Adam Curtis - BUGGER', - }, - 'playlist_mincount': 18, - }, # RUTV embed { 'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html', @@ -713,6 +827,19 @@ class GenericIE(InfoExtractor): 'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014', }, }, + # Kaltura embed protected with referrer + { + 'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero', + 'info_dict': { + 'id': '1_g4fbemnq', + 'ext': 'mp4', + 'title': 'Violetta - Achter De Schermen - Ruggero', + 'description': 'Achter de schermen met Ruggero', + 'timestamp': 1435133761, + 'upload_date': '20150624', + 'uploader_id': 'echojecka', + }, + }, # Eagle.Platform embed (generic URL) { 'url': 'http://lenta.ru/news/2015/03/06/navalny/', @@ -837,8 +964,9 @@ class GenericIE(InfoExtractor): 'info_dict': { 'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs', 'ext': 'mp4', - 'description': 'VIDEO: Index/Match versus VLOOKUP.', + 'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.', 'title': 'This is what separates the Excel masters from the wannabes', + 'duration': 191.933, }, 'params': { # m3u8 downloads @@ -914,6 +1042,41 @@ class GenericIE(InfoExtractor): 'description': 'New experience with Acrobat DC', 'duration': 248.667, }, + }, + # ScreenwaveMedia embed + { + 'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1', + 'md5': '24ace5baba0d35d55c6810b51f34e9e0', + 'info_dict': { + 'id': 'cinemasnob-55d26273809dd', + 'ext': 'mp4', + 'title': 'cinemasnob', + }, + }, + # BrightcoveInPageEmbed embed + { + 'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/', + 'info_dict': { + 'id': '4238694884001', + 'ext': 'flv', + 'title': 'Tabletop: Dread, Last Thoughts', + 'description': 'Tabletop: Dread, Last Thoughts', + 'duration': 51690, + }, + }, + # JWPlayer with M3U8 + { + 'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video', + 'info_dict': { + 'id': 'playlist', + 'ext': 'mp4', + 'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ', + 'uploader': 'ren.tv', + }, + 'params': { + # m3u8 downloads + 'skip_download': True, + } } ] @@ -1057,7 +1220,7 @@ class GenericIE(InfoExtractor): full_response = None if head_response is False: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Accept-Encoding', '*') full_response = self._request_webpage(request, video_id) head_response = full_response @@ -1086,7 +1249,7 @@ class GenericIE(InfoExtractor): '%s on generic information extractor.' % ('Forcing' if force else 'Falling back')) if not full_response: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) # Some webservers may serve compressed content of rather big size (e.g. gzipped flac) # making it impossible to download only chunk of the file (yet we need only 512kB to # test whether it's HTML or not). According to youtube-dl default Accept-Encoding @@ -1119,11 +1282,15 @@ class GenericIE(InfoExtractor): self.report_extraction(video_id) - # Is it an RSS feed? + # Is it an RSS feed, a SMIL file or a XSPF playlist? try: - doc = parse_xml(webpage) + doc = compat_etree_fromstring(webpage.encode('utf-8')) if doc.tag == 'rss': return self._extract_rss(url, video_id, doc) + elif re.match(r'^(?:{[^}]+})?smil$', doc.tag): + return self._parse_smil(doc, url, video_id) + elif doc.tag == '{http://xspf.org/ns/0/}playlist': + return self.playlist_result(self._parse_xspf(doc, video_id), video_id) except compat_xml_parse_error: pass @@ -1169,14 +1336,14 @@ class GenericIE(InfoExtractor): return self.playlist_result( urlrs, playlist_id=video_id, playlist_title=video_title) - # Look for BrightCove: - bc_urls = BrightcoveIE._extract_brightcove_urls(webpage) + # Look for Brightcove Legacy Studio embeds + bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage) if bc_urls: self.to_screen('Brightcove video detected.') entries = [{ '_type': 'url', 'url': smuggle_url(bc_url, {'Referer': url}), - 'ie_key': 'Brightcove' + 'ie_key': 'BrightcoveLegacy' } for bc_url in bc_urls] return { @@ -1186,6 +1353,11 @@ class GenericIE(InfoExtractor): 'entries': entries, } + # Look for Brightcove New Studio embeds + bc_urls = BrightcoveNewIE._extract_urls(webpage) + if bc_urls: + return _playlist_from_matches(bc_urls, ie='BrightcoveNew') + # Look for embedded rtl.nl player matches = re.findall( r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"', @@ -1329,12 +1501,12 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url')) # Look for Ooyala videos - mobj = (re.search(r'player\.ooyala\.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or + mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage)) if mobj is not None: - return OoyalaIE._build_url_result(mobj.group('ec')) + return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url})) # Look for multiple Ooyala embeds on SBN network websites mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage) @@ -1342,7 +1514,7 @@ class GenericIE(InfoExtractor): embeds = self._parse_json(mobj.group(1), video_id, fatal=False) if embeds: return _playlist_from_matches( - embeds, getter=lambda v: OoyalaIE._url_for_embed_code(v['provider_video_id']), ie='Ooyala') + embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala') # Look for Aparat videos mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage) @@ -1492,12 +1664,9 @@ class GenericIE(InfoExtractor): return self.url_result(url, ie='Vulture') # Look for embedded mtvservices player - mobj = re.search( - r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"', - webpage) - if mobj is not None: - url = unescapeHTML(mobj.group('url')) - return self.url_result(url, ie='MTVServicesEmbedded') + mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage) + if mtvservices_url: + return self.url_result(mtvservices_url, ie='MTVServicesEmbedded') # Look for embedded yahoo player mobj = re.search( @@ -1536,7 +1705,7 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url'), 'MLB') mobj = re.search( - r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL, + r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL, webpage) if mobj is not None: return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast') @@ -1554,10 +1723,12 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url'), 'Zapiks') # Look for Kaltura embeds - mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) or - re.search(r'(?s)(["\'])(?:https?:)?//cdnapisec\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?\1.*?entry_id\s*:\s*(["\'])(?P<id>[^\2]+?)\2', webpage)) + mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_?[Ii]d'\s*:\s*'(?P<id>[^']+)',", webpage) or + re.search(r'(?s)(?P<q1>["\'])(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?(?P=q1).*?entry_?[Ii]d\s*:\s*(?P<q2>["\'])(?P<id>.+?)(?P=q2)', webpage)) if mobj is not None: - return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura') + return self.url_result(smuggle_url( + 'kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), + {'source_url': url}), 'Kaltura') # Look for Eagle.Platform embeds mobj = re.search( @@ -1572,10 +1743,9 @@ class GenericIE(InfoExtractor): return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform') # Look for Pladform embeds - mobj = re.search( - r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage) - if mobj is not None: - return self.url_result(mobj.group('url'), 'Pladform') + pladform_url = PladformIE._extract_url(webpage) + if pladform_url: + return self.url_result(pladform_url) # Look for Playwire embeds mobj = re.search( @@ -1607,7 +1777,7 @@ class GenericIE(InfoExtractor): # Look for UDN embeds mobj = re.search( - r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._VALID_URL, webpage) + r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage) if mobj is not None: return self.url_result( compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed') @@ -1632,6 +1802,11 @@ class GenericIE(InfoExtractor): if snagfilms_url: return self.url_result(snagfilms_url) + # Look for ScreenwaveMedia embeds + mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage) + if mobj is not None: + return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia') + # Look for AdobeTVVideo embeds mobj = re.search( r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]', @@ -1669,7 +1844,7 @@ class GenericIE(InfoExtractor): if not found: # Broaden the findall a little bit: JWPlayer JS loader found = filter_video(re.findall( - r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)) + r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)) if not found: # Flow player found = filter_video(re.findall(r'''(?xs) @@ -1695,7 +1870,7 @@ class GenericIE(InfoExtractor): found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)) if not found: # HTML5 video - found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage) + found = re.findall(r'(?s)<(?:video|audio)[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage) if not found: REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)' found = re.search( @@ -1706,6 +1881,9 @@ class GenericIE(InfoExtractor): # Look also in Refresh HTTP header refresh_header = head_response.headers.get('Refresh') if refresh_header: + # In python 2 response HTTP headers are bytestrings + if sys.version_info < (3, 0) and isinstance(refresh_header, str): + refresh_header = refresh_header.decode('iso-8859-1') found = re.search(REDIRECT_REGEX, refresh_header) if found: new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1))) @@ -1719,6 +1897,7 @@ class GenericIE(InfoExtractor): entries = [] for video_url in found: + video_url = video_url.replace('\\/', '/') video_url = compat_urlparse.urljoin(url, video_url) video_id = compat_urllib_parse_unquote(os.path.basename(video_url)) @@ -1730,22 +1909,24 @@ class GenericIE(InfoExtractor): # here's a fun little line of code for you: video_id = os.path.splitext(video_id)[0] - if determine_ext(video_url) == 'smil': - entries.append({ - 'id': video_id, - 'formats': self._extract_smil_formats(video_url, video_id), - 'uploader': video_uploader, - 'title': video_title, - 'age_limit': age_limit, - }) + entry_info_dict = { + 'id': video_id, + 'uploader': video_uploader, + 'title': video_title, + 'age_limit': age_limit, + } + + ext = determine_ext(video_url) + if ext == 'smil': + entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id) + elif ext == 'xspf': + return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id) + elif ext == 'm3u8': + entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4') else: - entries.append({ - 'id': video_id, - 'url': video_url, - 'uploader': video_uploader, - 'title': video_title, - 'age_limit': age_limit, - }) + entry_info_dict['url'] = video_url + + entries.append(entry_info_dict) if len(entries) == 1: return entries[0] diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py index 8a95793ca..c65ef6bcf 100644 --- a/youtube_dl/extractor/globo.py +++ b/youtube_dl/extractor/globo.py @@ -13,79 +13,59 @@ from ..compat import ( from ..utils import ( ExtractorError, float_or_none, + int_or_none, + str_or_none, ) class GloboIE(InfoExtractor): - _VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)' + _VALID_URL = '(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})' _API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist' _SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s' - _VIDEOID_REGEXES = [ - r'\bdata-video-id="(\d+)"', - r'\bdata-player-videosids="(\d+)"', - r'<div[^>]+\bid="(\d+)"', - ] - _RESIGN_EXPIRATION = 86400 - _TESTS = [ - { - 'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/', - 'md5': '03ebf41cb7ade43581608b7d9b71fab0', - 'info_dict': { - 'id': '3654973', - 'ext': 'mp4', - 'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão', - 'duration': 251.585, - 'uploader': 'SporTV', - 'uploader_id': 698, - 'like_count': int, - } + _TESTS = [{ + 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', + 'md5': 'b3ccc801f75cd04a914d51dadb83a78d', + 'info_dict': { + 'id': '3607726', + 'ext': 'mp4', + 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', + 'duration': 103.204, + 'uploader': 'Globo.com', + 'uploader_id': '265', }, - { - 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', - 'md5': 'b3ccc801f75cd04a914d51dadb83a78d', - 'info_dict': { - 'id': '3607726', - 'ext': 'mp4', - 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', - 'duration': 103.204, - 'uploader': 'Globo.com', - 'uploader_id': 265, - 'like_count': int, - } + }, { + 'url': 'http://globoplay.globo.com/v/4581987/', + 'md5': 'f36a1ecd6a50da1577eee6dd17f67eff', + 'info_dict': { + 'id': '4581987', + 'ext': 'mp4', + 'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP', + 'duration': 137.973, + 'uploader': 'Rede Globo', + 'uploader_id': '196', }, - { - 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', - 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b', - 'info_dict': { - 'id': '3652183', - 'ext': 'mp4', - 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião', - 'duration': 110.711, - 'uploader': 'Rede Globo', - 'uploader_id': 196, - 'like_count': int, - } - }, - { - 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', - 'md5': 'c1defca721ce25b2354e927d3e4b3dec', - 'info_dict': { - 'id': '3928201', - 'ext': 'mp4', - 'title': 'Ator e diretor argentino, Ricado Darín fala sobre utopias e suas perdas', - 'duration': 1472.906, - 'uploader': 'Canal Brasil', - 'uploader_id': 705, - 'like_count': int, - } - }, - ] - - class MD5(): + }, { + 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html', + 'only_matching': True, + }, { + 'url': 'http://globosatplay.globo.com/globonews/v/4472924/', + 'only_matching': True, + }, { + 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/', + 'only_matching': True, + }, { + 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', + 'only_matching': True, + }, { + 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html', + 'only_matching': True, + }] + + class MD5: HEX_FORMAT_LOWERCASE = 0 HEX_FORMAT_UPPERCASE = 1 BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = '' @@ -352,23 +332,15 @@ class GloboIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id') - video = self._download_json( self._API_URL_TEMPLATE % video_id, video_id)['videos'][0] title = video['title'] - duration = float_or_none(video['duration'], 1000) - like_count = video['likes'] - uploader = video['channel'] - uploader_id = video['channel_id'] formats = [] - for resource in video['resources']: resource_id = resource.get('_id') - if not resource_id: + if not resource_id or resource_id.endswith('manifest'): continue security = self._download_json( @@ -397,22 +369,70 @@ class GloboIE(InfoExtractor): resource_url = resource['url'] signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash') if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'): - formats.extend(self._extract_m3u8_formats(signed_url, resource_id, 'mp4')) + m3u8_formats = self._extract_m3u8_formats( + signed_url, resource_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) else: formats.append({ 'url': signed_url, - 'format_id': resource_id, - 'height': resource.get('height'), + 'format_id': 'http-%s' % resource_id, + 'height': int_or_none(resource.get('height')), }) self._sort_formats(formats) + duration = float_or_none(video.get('duration'), 1000) + uploader = video.get('channel') + uploader_id = str_or_none(video.get('channel_id')) + return { 'id': video_id, 'title': title, 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, - 'like_count': like_count, 'formats': formats } + + +class GloboArticleIE(InfoExtractor): + _VALID_URL = 'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/]+)\.html' + + _VIDEOID_REGEXES = [ + r'\bdata-video-id=["\'](\d{7,})', + r'\bdata-player-videosids=["\'](\d{7,})', + r'\bvideosIDs\s*:\s*["\'](\d{7,})', + r'\bdata-id=["\'](\d{7,})', + r'<div[^>]+\bid=["\'](\d{7,})', + ] + + _TESTS = [{ + 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', + 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b', + 'info_dict': { + 'id': '3652183', + 'ext': 'mp4', + 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião', + 'duration': 110.711, + 'uploader': 'Rede Globo', + 'uploader_id': '196', + } + }, { + 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html', + 'only_matching': True, + }, { + 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html', + 'only_matching': True, + }] + + @classmethod + def suitable(cls, url): + return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id') + return self.url_result('globo:%s' % video_id, 'Globo') diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py index fcefe54cd..731bacd67 100644 --- a/youtube_dl/extractor/googleplus.py +++ b/youtube_dl/extractor/googleplus.py @@ -61,7 +61,7 @@ class GooglePlusIE(InfoExtractor): 'width': int(width), 'height': int(height), } for width, height, video_url in re.findall( - r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)] + r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)] self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/gputechconf.py b/youtube_dl/extractor/gputechconf.py new file mode 100644 index 000000000..145b55bf3 --- /dev/null +++ b/youtube_dl/extractor/gputechconf.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + xpath_element, + xpath_text, + int_or_none, + parse_duration, +) + + +class GPUTechConfIE(InfoExtractor): + _VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html' + _TEST = { + 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html', + 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798', + 'info_dict': { + 'id': '5156', + 'ext': 'mp4', + 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis', + 'duration': 1219, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + root_path = self._search_regex(r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', 'http://evt.dispeak.com/nvidia/events/gtc15/') + xml_file_id = self._search_regex(r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id') + + doc = self._download_xml('%sxml/%s.xml' % (root_path, xml_file_id), video_id) + + metadata = xpath_element(doc, 'metadata') + http_host = xpath_text(metadata, 'httpHost', 'http host', True) + mbr_videos = xpath_element(metadata, 'MBRVideos') + + formats = [] + for mbr_video in mbr_videos.findall('MBRVideo'): + stream_name = xpath_text(mbr_video, 'streamName') + if stream_name: + formats.append({ + 'url': 'http://%s/%s' % (http_host, stream_name.replace('mp4:', '')), + 'tbr': int_or_none(xpath_text(mbr_video, 'bitrate')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': xpath_text(metadata, 'title'), + 'duration': parse_duration(xpath_text(metadata, 'endTime')), + 'creator': xpath_text(metadata, 'speaker'), + 'formats': formats, + } diff --git a/youtube_dl/extractor/groupon.py b/youtube_dl/extractor/groupon.py index 8b9e0e2f8..63c05b6a6 100644 --- a/youtube_dl/extractor/groupon.py +++ b/youtube_dl/extractor/groupon.py @@ -18,6 +18,8 @@ class GrouponIE(InfoExtractor): 'id': 'tubGNycTo_9Uxg82uESj4i61EYX8nyuf', 'ext': 'mp4', 'title': 'Bikram Yoga Huntington Beach | Orange County', + 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', + 'duration': 44.961, }, }], 'params': { diff --git a/youtube_dl/extractor/hearthisat.py b/youtube_dl/extractor/hearthisat.py index a19b31ac0..7d8698655 100644 --- a/youtube_dl/extractor/hearthisat.py +++ b/youtube_dl/extractor/hearthisat.py @@ -4,12 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( HEADRequest, + sanitized_Request, str_to_int, urlencode_postdata, urlhandle_detect_ext, @@ -47,7 +45,7 @@ class HearThisAtIE(InfoExtractor): r'intTrackId\s*=\s*(\d+)', webpage, 'track ID') payload = urlencode_postdata({'tracks[]': track_id}) - req = compat_urllib_request.Request(self._PLAYLIST_URL, payload) + req = sanitized_Request(self._PLAYLIST_URL, payload) req.add_header('Content-type', 'application/x-www-form-urlencoded') track = self._download_json(req, track_id, 'Downloading playlist')[0] diff --git a/youtube_dl/extractor/hostingbulk.py b/youtube_dl/extractor/hostingbulk.py deleted file mode 100644 index a3154cfde..000000000 --- a/youtube_dl/extractor/hostingbulk.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) -from ..utils import ( - ExtractorError, - int_or_none, - urlencode_postdata, -) - - -class HostingBulkIE(InfoExtractor): - _VALID_URL = r'''(?x) - https?://(?:www\.)?hostingbulk\.com/ - (?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html''' - _FILE_DELETED_REGEX = r'<b>File Not Found</b>' - _TEST = { - 'url': 'http://hostingbulk.com/n0ulw1hv20fm.html', - 'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f', - 'info_dict': { - 'id': 'n0ulw1hv20fm', - 'ext': 'mp4', - 'title': 'md5:5afeba33f48ec87219c269e054afd622', - 'filesize': 6816081, - 'thumbnail': 're:^http://.*\.jpg$', - } - } - - def _real_extract(self, url): - video_id = self._match_id(url) - url = 'http://hostingbulk.com/{0:}.html'.format(video_id) - - # Custom request with cookie to set language to English, so our file - # deleted regex would work. - request = compat_urllib_request.Request( - url, headers={'Cookie': 'lang=english'}) - webpage = self._download_webpage(request, video_id) - - if re.search(self._FILE_DELETED_REGEX, webpage) is not None: - raise ExtractorError('Video %s does not exist' % video_id, - expected=True) - - title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title') - filesize = int_or_none( - self._search_regex( - r'<small>\((\d+)\sbytes?\)</small>', - webpage, - 'filesize', - fatal=False - ) - ) - thumbnail = self._search_regex( - r'<img src="([^"]+)".+?class="pic"', - webpage, 'thumbnail', fatal=False) - - fields = self._hidden_inputs(webpage) - - request = compat_urllib_request.Request(url, urlencode_postdata(fields)) - request.add_header('Content-type', 'application/x-www-form-urlencoded') - response = self._request_webpage(request, video_id, - 'Submiting download request') - video_url = response.geturl() - - formats = [{ - 'format_id': 'sd', - 'filesize': filesize, - 'url': video_url, - }] - - return { - 'id': video_id, - 'title': title, - 'thumbnail': thumbnail, - 'formats': formats, - } diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py index 651784b73..31e219945 100644 --- a/youtube_dl/extractor/hotnewhiphop.py +++ b/youtube_dl/extractor/hotnewhiphop.py @@ -3,13 +3,11 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, HEADRequest, + sanitized_Request, ) @@ -41,7 +39,7 @@ class HotNewHipHopIE(InfoExtractor): ('mediaType', 's'), ('mediaId', video_id), ]) - r = compat_urllib_request.Request( + r = sanitized_Request( 'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata) r.add_header('Content-Type', 'application/x-www-form-urlencoded') mkd = self._download_json( diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 16677f179..e8f51e545 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -16,6 +16,7 @@ class HowcastIE(InfoExtractor): 'description': 'md5:dbe792e5f6f1489027027bf2eba188a3', 'timestamp': 1276081287, 'upload_date': '20100609', + 'duration': 56.823, }, 'params': { # m3u8 download diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py index aa0724a02..b3706fe6d 100644 --- a/youtube_dl/extractor/hypem.py +++ b/youtube_dl/extractor/hypem.py @@ -4,12 +4,10 @@ import json import time from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -30,15 +28,12 @@ class HypemIE(InfoExtractor): track_id = self._match_id(url) data = {'ax': 1, 'ts': time.time()} - data_encoded = compat_urllib_parse.urlencode(data) - complete_url = url + "?" + data_encoded - request = compat_urllib_request.Request(complete_url) + request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data)) response, urlh = self._download_webpage_handle( request, track_id, 'Downloading webpage with the url') - cookie = urlh.headers.get('Set-Cookie', '') html_tracks = self._html_search_regex( - r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>', + r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>', response, 'tracks') try: track_list = json.loads(html_tracks) @@ -48,15 +43,14 @@ class HypemIE(InfoExtractor): key = track['key'] track_id = track['id'] - artist = track['artist'] title = track['song'] - serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key) - request = compat_urllib_request.Request( - serve_url, '', {'Content-Type': 'application/json'}) - request.add_header('cookie', cookie) + request = sanitized_Request( + 'http://hypem.com/serve/source/%s/%s' % (track_id, key), + '', {'Content-Type': 'application/json'}) song_data = self._download_json(request, track_id, 'Downloading metadata') - final_url = song_data["url"] + final_url = song_data['url'] + artist = track.get('artist') return { 'id': track_id, diff --git a/youtube_dl/extractor/iconosquare.py b/youtube_dl/extractor/iconosquare.py index 70e4c0d41..a39f422e9 100644 --- a/youtube_dl/extractor/iconosquare.py +++ b/youtube_dl/extractor/iconosquare.py @@ -1,7 +1,11 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import ( + int_or_none, + get_element_by_id, + remove_end, +) class IconosquareIE(InfoExtractor): @@ -12,7 +16,7 @@ class IconosquareIE(InfoExtractor): 'info_dict': { 'id': '522207370455279102_24101272', 'ext': 'mp4', - 'title': 'Instagram media by @aguynamedpatrick (Patrick Janelle)', + 'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)', 'description': 'md5:644406a9ec27457ed7aa7a9ebcd4ce3d', 'timestamp': 1376471991, 'upload_date': '20130814', @@ -29,8 +33,7 @@ class IconosquareIE(InfoExtractor): webpage = self._download_webpage(url, video_id) media = self._parse_json( - self._search_regex( - r'window\.media\s*=\s*({.+?});\n', webpage, 'media'), + get_element_by_id('mediaJson', webpage), video_id) formats = [{ @@ -41,9 +44,7 @@ class IconosquareIE(InfoExtractor): } for format_id, f in media['videos'].items()] self._sort_formats(formats) - title = self._html_search_regex( - r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>', - webpage, 'title') + title = remove_end(self._og_search_title(webpage), ' - via Iconosquare') timestamp = int_or_none(media.get('created_time') or media.get('caption', {}).get('created_time')) description = media.get('caption', {}).get('text') @@ -61,6 +62,14 @@ class IconosquareIE(InfoExtractor): 'height': int_or_none(t.get('height')) } for thumbnail_id, t in media.get('images', {}).items()] + comments = [{ + 'id': comment.get('id'), + 'text': comment['text'], + 'timestamp': int_or_none(comment.get('created_time')), + 'author': comment.get('from', {}).get('full_name'), + 'author_id': comment.get('from', {}).get('username'), + } for comment in media.get('comments', {}).get('data', []) if 'text' in comment] + return { 'id': video_id, 'title': title, @@ -72,4 +81,5 @@ class IconosquareIE(InfoExtractor): 'comment_count': comment_count, 'like_count': like_count, 'formats': formats, + 'comments': comments, } diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py index 4bb574cf3..02e1e428e 100644 --- a/youtube_dl/extractor/imdb.py +++ b/youtube_dl/extractor/imdb.py @@ -4,8 +4,8 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urlparse, +from ..utils import ( + qualities, ) @@ -30,24 +30,33 @@ class ImdbIE(InfoExtractor): descr = self._html_search_regex( r'(?s)<span itemprop="description">(.*?)</span>', webpage, 'description', fatal=False) - available_formats = re.findall( - r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage, - flags=re.MULTILINE) + player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id + player_page = self._download_webpage( + player_url, video_id, 'Downloading player page') + # the player page contains the info for the default format, we have to + # fetch other pages for the rest of the formats + extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page) + format_pages = [ + self._download_webpage( + f_url, video_id, 'Downloading info for %s format' % f_name) + for f_url, f_name in extra_formats] + format_pages.append(player_page) + + quality = qualities(['SD', '480p', '720p']) formats = [] - for f_id, f_path in available_formats: - f_path = f_path.strip() - format_page = self._download_webpage( - compat_urlparse.urljoin(url, f_path), - 'Downloading info for %s format' % f_id) + for format_page in format_pages: json_data = self._search_regex( r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>', format_page, 'json data', flags=re.DOTALL) info = json.loads(json_data) format_info = info['videoPlayerObject']['video'] + f_id = format_info['ffname'] formats.append({ 'format_id': f_id, 'url': format_info['videoInfoList'][0]['videoUrl'], + 'quality': quality(f_id), }) + self._sort_formats(formats) return { 'id': video_id, diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py index d692ea79a..70c8ca64e 100644 --- a/youtube_dl/extractor/imgur.py +++ b/youtube_dl/extractor/imgur.py @@ -13,7 +13,7 @@ from ..utils import ( class ImgurIE(InfoExtractor): - _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)' + _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!gallery)(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'https://i.imgur.com/A61SaA1.gifv', @@ -97,3 +97,28 @@ class ImgurIE(InfoExtractor): 'description': self._og_search_description(webpage), 'title': self._og_search_title(webpage), } + + +class ImgurAlbumIE(InfoExtractor): + _VALID_URL = r'https?://(?:i\.)?imgur\.com/gallery/(?P<id>[a-zA-Z0-9]+)' + + _TEST = { + 'url': 'http://imgur.com/gallery/Q95ko', + 'info_dict': { + 'id': 'Q95ko', + }, + 'playlist_count': 25, + } + + def _real_extract(self, url): + album_id = self._match_id(url) + + album_images = self._download_json( + 'http://imgur.com/gallery/%s/album_images/hit.json?all=true' % album_id, + album_id)['data']['images'] + + entries = [ + self.url_result('http://imgur.com/%s' % image['hash']) + for image in album_images if image.get('hash')] + + return self.playlist_result(entries, album_id) diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py new file mode 100644 index 000000000..12fb5e8e1 --- /dev/null +++ b/youtube_dl/extractor/indavideo.py @@ -0,0 +1,142 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_age_limit, + parse_iso8601, +) + + +class IndavideoEmbedIE(InfoExtractor): + _VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)' + _TESTS = [{ + 'url': 'http://indavideo.hu/player/video/1bdc3c6d80/', + 'md5': 'f79b009c66194acacd40712a6778acfa', + 'info_dict': { + 'id': '1837039', + 'ext': 'mp4', + 'title': 'Cicatánc', + 'description': '', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'cukiajanlo', + 'uploader_id': '83729', + 'timestamp': 1439193826, + 'upload_date': '20150810', + 'duration': 72, + 'age_limit': 0, + 'tags': ['tánc', 'cica', 'cuki', 'cukiajanlo', 'newsroom'], + }, + }, { + 'url': 'http://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1', + 'only_matching': True, + }, { + 'url': 'http://assets.indavideo.hu/swf/player.swf?v=fe25e500&vID=1bdc3c6d80&autostart=1&hide=1&i=1', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + video = self._download_json( + 'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id, + video_id)['data'] + + title = video['title'] + + video_urls = video.get('video_files', []) + video_file = video.get('video_file') + if video: + video_urls.append(video_file) + video_urls = list(set(video_urls)) + + video_prefix = video_urls[0].rsplit('/', 1)[0] + + for flv_file in video.get('flv_files', []): + flv_url = '%s/%s' % (video_prefix, flv_file) + if flv_url not in video_urls: + video_urls.append(flv_url) + + formats = [{ + 'url': video_url, + 'height': self._search_regex(r'\.(\d{3,4})\.mp4$', video_url, 'height', default=None), + } for video_url in video_urls] + self._sort_formats(formats) + + timestamp = video.get('date') + if timestamp: + # upload date is in CEST + timestamp = parse_iso8601(timestamp + ' +0200', ' ') + + thumbnails = [{ + 'url': self._proto_relative_url(thumbnail) + } for thumbnail in video.get('thumbnails', [])] + + tags = [tag['title'] for tag in video.get('tags', [])] + + return { + 'id': video.get('id') or video_id, + 'title': title, + 'description': video.get('description'), + 'thumbnails': thumbnails, + 'uploader': video.get('user_name'), + 'uploader_id': video.get('user_id'), + 'timestamp': timestamp, + 'duration': int_or_none(video.get('length')), + 'age_limit': parse_age_limit(video.get('age_limit')), + 'tags': tags, + 'formats': formats, + } + + +class IndavideoIE(InfoExtractor): + _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)' + _TESTS = [{ + 'url': 'http://indavideo.hu/video/Vicces_cica_1', + 'md5': '8c82244ba85d2a2310275b318eb51eac', + 'info_dict': { + 'id': '1335611', + 'display_id': 'Vicces_cica_1', + 'ext': 'mp4', + 'title': 'Vicces cica', + 'description': 'Játszik a tablettel. :D', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'Jet_Pack', + 'uploader_id': '491217', + 'timestamp': 1390821212, + 'upload_date': '20140127', + 'duration': 7, + 'age_limit': 0, + 'tags': ['vicces', 'macska', 'cica', 'ügyes', 'nevetés', 'játszik', 'Cukiság', 'Jet_Pack'], + }, + }, { + 'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz', + 'only_matching': True, + }, { + 'url': 'http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko', + 'only_matching': True, + }, { + 'url': 'http://erotika.indavideo.hu/video/Amator_tini_punci', + 'only_matching': True, + }, { + 'url': 'http://film.indavideo.hu/video/f_hrom_nagymamm_volt', + 'only_matching': True, + }, { + 'url': 'http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes', + 'only_matching': True, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + embed_url = self._search_regex( + r'<link[^>]+rel="video_src"[^>]+href="(.+?)"', webpage, 'embed url') + + return { + '_type': 'url_transparent', + 'ie_key': 'IndavideoEmbed', + 'url': embed_url, + 'display_id': display_id, + } diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py index 71cfd12c5..016af2084 100644 --- a/youtube_dl/extractor/infoq.py +++ b/youtube_dl/extractor/infoq.py @@ -1,3 +1,5 @@ +# coding: utf-8 + from __future__ import unicode_literals import base64 @@ -5,8 +7,9 @@ import base64 from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, - compat_urlparse, + compat_parse_qs, ) +from ..utils import determine_ext class InfoQIE(InfoExtractor): @@ -16,7 +19,7 @@ class InfoQIE(InfoExtractor): 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things', 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2', 'info_dict': { - 'id': '12-jan-pythonthings', + 'id': 'A-Few-of-My-Favorite-Python-Things', 'ext': 'mp4', 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.', 'title': 'A Few of My Favorite [Python] Things', @@ -24,40 +27,84 @@ class InfoQIE(InfoExtractor): }, { 'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript', 'only_matching': True, + }, { + 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery', + 'md5': '4918d0cca1497f2244572caf626687ef', + 'info_dict': { + 'id': 'openstack-continued-delivery', + 'title': 'OpenStack持续交付之路', + 'ext': 'flv', + 'description': 'md5:308d981fb28fa42f49f9568322c683ff', + }, }] - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + def _extract_bokecc_videos(self, webpage, video_id): + # TODO: bokecc.com is a Chinese video cloud platform + # It should have an independent extractor but I don't have other + # examples using bokecc + player_params_str = self._html_search_regex( + r'<script[^>]+src="http://p\.bokecc\.com/player\?([^"]+)', + webpage, 'player params', default=None) - video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title') - video_description = self._html_search_meta('description', webpage, 'description') + player_params = compat_parse_qs(player_params_str) + + info_xml = self._download_xml( + 'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % ( + player_params['siteid'][0], player_params['vid'][0]), video_id) + + return [{ + 'format_id': 'bokecc', + 'url': quality.find('./copy').attrib['playurl'], + 'preference': int(quality.attrib['value']), + } for quality in info_xml.findall('./video/quality')] + def _extract_rtmp_videos(self, webpage): # The server URL is hardcoded video_url = 'rtmpe://video.infoq.com/cfx/st/' # Extract video URL encoded_id = self._search_regex( - r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id') + r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None) + real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8')) playpath = 'mp4:' + real_id - video_filename = playpath.split('/')[-1] - video_id, extension = video_filename.split('.') - - http_base = self._search_regex( - r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage, - 'HTTP base URL') - - formats = [{ + return [{ 'format_id': 'rtmp', 'url': video_url, - 'ext': extension, + 'ext': determine_ext(playpath), 'play_path': playpath, - }, { + }] + + def _extract_http_videos(self, webpage): + http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL') + + policy = self._search_regex(r'InfoQConstants.scp\s*=\s*\'([^\']+)\'', webpage, 'policy') + signature = self._search_regex(r'InfoQConstants.scs\s*=\s*\'([^\']+)\'', webpage, 'signature') + key_pair_id = self._search_regex(r'InfoQConstants.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id') + + return [{ 'format_id': 'http', - 'url': compat_urlparse.urljoin(url, http_base) + real_id, + 'url': http_video_url, + 'http_headers': { + 'Cookie': 'CloudFront-Policy=%s; CloudFront-Signature=%s; CloudFront-Key-Pair-Id=%s' % ( + policy, signature, key_pair_id), + }, }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title') + video_description = self._html_search_meta('description', webpage, 'description') + + if '/cn/' in url: + # for China videos, HTTP video URL exists but always fails with 403 + formats = self._extract_bokecc_videos(webpage, video_id) + else: + formats = self._extract_rtmp_videos(webpage) + self._extract_http_videos(webpage) + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index 3d78f78c4..c158f2064 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -10,8 +10,8 @@ from ..utils import ( class InstagramIE(InfoExtractor): - _VALID_URL = r'https://instagram\.com/p/(?P<id>[\da-zA-Z]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+)' + _TESTS = [{ 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc', 'md5': '0d2da106a9d2631273e192b372806516', 'info_dict': { @@ -21,7 +21,10 @@ class InstagramIE(InfoExtractor): 'title': 'Video by naomipq', 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', } - } + }, { + 'url': 'https://instagram.com/p/-Cmh1cukG2/', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index 821c8ec10..36baf3245 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -6,12 +6,10 @@ from random import random from math import floor from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, remove_end, + sanitized_Request, ) @@ -61,7 +59,7 @@ class IPrimaIE(InfoExtractor): (floor(random() * 1073741824), floor(random() * 1073741824)) ) - req = compat_urllib_request.Request(player_url) + req = sanitized_Request(player_url) req.add_header('Referer', url) playerpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py index afb7f4e61..c3731a110 100644 --- a/youtube_dl/extractor/iqiyi.py +++ b/youtube_dl/extractor/iqiyi.py @@ -95,6 +95,10 @@ class IqiyiIE(InfoExtractor): ('10', 'h1'), ] + @staticmethod + def md5_text(text): + return hashlib.md5(text.encode('utf-8')).hexdigest() + def construct_video_urls(self, data, video_id, _uuid): def do_xor(x, y): a = y % 3 @@ -121,7 +125,7 @@ class IqiyiIE(InfoExtractor): note='Download path key of segment %d for format %s' % (segment_index + 1, format_id) )['t'] t = str(int(math.floor(int(tm) / (600.0)))) - return hashlib.md5((t + mg + x).encode('utf8')).hexdigest() + return self.md5_text(t + mg + x) video_urls_dict = {} for format_item in data['vp']['tkl'][0]['vs']: @@ -179,20 +183,19 @@ class IqiyiIE(InfoExtractor): def get_raw_data(self, tvid, video_id, enc_key, _uuid): tm = str(int(time.time())) + tail = tm + tvid param = { 'key': 'fvip', - 'src': hashlib.md5(b'youtube-dl').hexdigest(), + 'src': self.md5_text('youtube-dl'), 'tvId': tvid, 'vid': video_id, 'vinfo': 1, 'tm': tm, - 'enc': hashlib.md5( - (enc_key + tm + tvid).encode('utf8')).hexdigest(), + 'enc': self.md5_text(enc_key + tail), 'qyid': _uuid, 'tn': random.random(), 'um': 0, - 'authkey': hashlib.md5( - (tm + tvid).encode('utf8')).hexdigest() + 'authkey': self.md5_text(self.md5_text('') + tail), } api_url = 'http://cache.video.qiyi.com/vms' + '?' + \ @@ -201,7 +204,9 @@ class IqiyiIE(InfoExtractor): return raw_data def get_enc_key(self, swf_url, video_id): - enc_key = '8e29ab5666d041c3a1ea76e06dabdffb' + # TODO: automatic key extraction + # last update at 2015-12-18 for Zombie::bite + enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1] return enc_key def _real_extract(self, url): diff --git a/youtube_dl/extractor/ir90tv.py b/youtube_dl/extractor/ir90tv.py new file mode 100644 index 000000000..214bcd5b5 --- /dev/null +++ b/youtube_dl/extractor/ir90tv.py @@ -0,0 +1,42 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import remove_start + + +class Ir90TvIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?90tv\.ir/video/(?P<id>[0-9]+)/.*' + _TESTS = [{ + 'url': 'http://90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218', + 'md5': '411dbd94891381960cb9e13daa47a869', + 'info_dict': { + 'id': '95719', + 'ext': 'mp4', + 'title': 'شایعات نقل و انتقالات مهم فوتبال اروپا 94/02/18', + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'url': 'http://www.90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = remove_start(self._html_search_regex( + r'<title>([^<]+)</title>', webpage, 'title'), '90tv.ir :: ') + + video_url = self._search_regex( + r'<source[^>]+src="([^"]+)"', webpage, 'video url') + + thumbnail = self._search_regex(r'poster="([^"]+)"', webpage, 'thumbnail url', fatal=False) + + return { + 'url': video_url, + 'id': video_id, + 'title': title, + 'video_url': video_url, + 'thumbnail': thumbnail, + } diff --git a/youtube_dl/extractor/ivi.py b/youtube_dl/extractor/ivi.py index e82594444..029878d24 100644 --- a/youtube_dl/extractor/ivi.py +++ b/youtube_dl/extractor/ivi.py @@ -5,11 +5,9 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -78,7 +76,7 @@ class IviIE(InfoExtractor): ] } - request = compat_urllib_request.Request(api_url, json.dumps(data)) + request = sanitized_Request(api_url, json.dumps(data)) video_json_page = self._download_webpage( request, video_id, 'Downloading video JSON') diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py index 1df084d87..eef7daa29 100644 --- a/youtube_dl/extractor/jeuxvideo.py +++ b/youtube_dl/extractor/jeuxvideo.py @@ -28,7 +28,7 @@ class JeuxVideoIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) title = mobj.group(1) webpage = self._download_webpage(url, title) - title = self._html_search_meta('name', webpage) + title = self._html_search_meta('name', webpage) or self._og_search_title(webpage) config_url = self._html_search_regex( r'data-src="(/contenu/medias/video.php.*?)"', webpage, 'config URL') diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py index d28730492..583b1a5ad 100644 --- a/youtube_dl/extractor/kaltura.py +++ b/youtube_dl/extractor/kaltura.py @@ -2,23 +2,41 @@ from __future__ import unicode_literals import re +import base64 from .common import InfoExtractor -from ..compat import compat_urllib_parse +from ..compat import ( + compat_urllib_parse, + compat_urlparse, +) from ..utils import ( + clean_html, ExtractorError, int_or_none, + unsmuggle_url, ) class KalturaIE(InfoExtractor): _VALID_URL = r'''(?x) - (?:kaltura:| - https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_ - )(?P<partner_id>\d+) - (?::| - /(?:[^/]+/)*?entry_id/ - )(?P<id>[0-9a-z_]+)''' + (?: + kaltura:(?P<partner_id_s>\d+):(?P<id_s>[0-9a-z_]+)| + https?:// + (:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/ + (?: + (?: + # flash player + index\.php/kwidget/ + (?:[^/]+/)*?wid/_(?P<partner_id>\d+)/ + (?:[^/]+/)*?entry_id/(?P<id>[0-9a-z_]+)| + # html5 player + html5/html5lib/ + (?:[^/]+/)*?entry_id/(?P<id_html5>[0-9a-z_]+) + .*\?.*\bwid=_(?P<partner_id_html5>\d+) + ) + ) + ) + ''' _API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?' _TESTS = [ { @@ -43,6 +61,10 @@ class KalturaIE(InfoExtractor): 'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3', 'only_matching': True, }, + { + 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342', + 'only_matching': True, + } ] def _kaltura_api_call(self, video_id, actions, *args, **kwargs): @@ -105,31 +127,47 @@ class KalturaIE(InfoExtractor): video_id, actions, note='Downloading video info JSON') def _real_extract(self, url): - video_id = self._match_id(url) + url, smuggled_data = unsmuggle_url(url, {}) + mobj = re.match(self._VALID_URL, url) - partner_id, entry_id = mobj.group('partner_id'), mobj.group('id') + partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5') + entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5') info, source_data = self._get_video_info(entry_id, partner_id) - formats = [{ - 'format_id': '%(fileExt)s-%(bitrate)s' % f, - 'ext': f['fileExt'], - 'tbr': f['bitrate'], - 'fps': f.get('frameRate'), - 'filesize_approx': int_or_none(f.get('size'), invscale=1024), - 'container': f.get('containerFormat'), - 'vcodec': f.get('videoCodecId'), - 'height': f.get('height'), - 'width': f.get('width'), - 'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']), - } for f in source_data['flavorAssets']] + source_url = smuggled_data.get('source_url') + if source_url: + referrer = base64.b64encode( + '://'.join(compat_urlparse.urlparse(source_url)[:2]) + .encode('utf-8')).decode('utf-8') + else: + referrer = None + + formats = [] + for f in source_data['flavorAssets']: + video_url = '%s/flavorId/%s' % (info['dataUrl'], f['id']) + if referrer: + video_url += '?referrer=%s' % referrer + formats.append({ + 'format_id': '%(fileExt)s-%(bitrate)s' % f, + 'ext': f.get('fileExt'), + 'tbr': int_or_none(f['bitrate']), + 'fps': int_or_none(f.get('frameRate')), + 'filesize_approx': int_or_none(f.get('size'), invscale=1024), + 'container': f.get('containerFormat'), + 'vcodec': f.get('videoCodecId'), + 'height': int_or_none(f.get('height')), + 'width': int_or_none(f.get('width')), + 'url': video_url, + }) + self._check_formats(formats, entry_id) self._sort_formats(formats) return { - 'id': video_id, + 'id': entry_id, 'title': info['name'], 'formats': formats, - 'description': info.get('description'), + 'description': clean_html(info.get('description')), 'thumbnail': info.get('thumbnailUrl'), 'duration': info.get('duration'), 'timestamp': info.get('createdAt'), diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py index c0956ba09..94a03d277 100644 --- a/youtube_dl/extractor/keek.py +++ b/youtube_dl/extractor/keek.py @@ -1,46 +1,39 @@ +# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class KeekIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)' + _VALID_URL = r'https?://(?:www\.)?keek\.com/keek/(?P<id>\w+)' IE_NAME = 'keek' _TEST = { - 'url': 'https://www.keek.com/ytdl/keeks/NODfbab', - 'md5': '09c5c109067536c1cec8bac8c21fea05', + 'url': 'https://www.keek.com/keek/NODfbab', + 'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83', 'info_dict': { 'id': 'NODfbab', 'ext': 'mp4', - 'uploader': 'youtube-dl project', - 'uploader_id': 'ytdl', - 'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .', + 'title': 'md5:35d42050a3ece241d5ddd7fdcc6fd896', + 'uploader': 'ytdl', + 'uploader_id': 'eGT5bab', }, } def _real_extract(self, url): video_id = self._match_id(url) - video_url = 'http://cdn.keek.com/keek/video/%s' % video_id - thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id webpage = self._download_webpage(url, video_id) - raw_desc = self._html_search_meta('description', webpage) - if raw_desc: - uploader = self._html_search_regex( - r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False) - uploader_id = self._html_search_regex( - r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False) - else: - uploader = None - uploader_id = None - return { 'id': video_id, - 'url': video_url, + 'url': self._og_search_video_url(webpage), 'ext': 'mp4', - 'title': self._og_search_title(webpage), - 'thumbnail': thumbnail, - 'uploader': uploader, - 'uploader_id': uploader_id, + 'title': self._og_search_description(webpage).strip(), + 'thumbnail': self._og_search_thumbnail(webpage), + 'uploader': self._search_regex( + r'data-username=(["\'])(?P<uploader>.+?)\1', webpage, + 'uploader', fatal=False, group='uploader'), + 'uploader_id': self._search_regex( + r'data-user-id=(["\'])(?P<uploader_id>.+?)\1', webpage, + 'uploader id', fatal=False, group='uploader_id'), } diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py index 82eddec51..126ca13df 100644 --- a/youtube_dl/extractor/keezmovies.py +++ b/youtube_dl/extractor/keezmovies.py @@ -1,12 +1,11 @@ from __future__ import unicode_literals -import os import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, +from ..utils import ( + sanitized_Request, + url_basename, ) @@ -14,19 +13,20 @@ class KeezMoviesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)' _TEST = { 'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711', - 'md5': '6e297b7e789329923fcf83abb67c9289', + 'md5': '1c1e75d22ffa53320f45eeb07bc4cdc0', 'info_dict': { 'id': '1214711', 'ext': 'mp4', 'title': 'Petite Asian Lady Mai Playing In Bathtub', 'age_limit': 18, + 'thumbnail': 're:^https?://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -38,21 +38,29 @@ class KeezMoviesIE(InfoExtractor): video_title = self._html_search_regex( r'<h1 [^>]*>([^<]+)', webpage, 'title') - video_url = self._html_search_regex( - r'(?s)html5VideoPlayer = .*?src="([^"]+)"', webpage, 'video URL') - path = compat_urllib_parse_urlparse(video_url).path - extension = os.path.splitext(path)[1][1:] - format = path.split('/')[4].split('_')[:2] - format = "-".join(format) + flashvars = self._parse_json(self._search_regex( + r'var\s+flashvars\s*=\s*([^;]+);', webpage, 'flashvars'), video_id) + + formats = [] + for height in (180, 240, 480): + if flashvars.get('quality_%dp' % height): + video_url = flashvars['quality_%dp' % height] + a_format = { + 'url': video_url, + 'height': height, + 'format_id': '%dp' % height, + } + filename_parts = url_basename(video_url).split('_') + if len(filename_parts) >= 2 and re.match(r'\d+[Kk]', filename_parts[1]): + a_format['tbr'] = int(filename_parts[1][:-1]) + formats.append(a_format) age_limit = self._rta_search(webpage) return { 'id': video_id, 'title': video_title, - 'url': video_url, - 'ext': extension, - 'format': format, - 'format_id': format, + 'formats': formats, 'age_limit': age_limit, + 'thumbnail': flashvars.get('image_url') } diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py index 720bc939b..a59c529f4 100644 --- a/youtube_dl/extractor/kontrtube.py +++ b/youtube_dl/extractor/kontrtube.py @@ -4,7 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import ( + int_or_none, + parse_duration, +) class KontrTubeIE(InfoExtractor): @@ -34,33 +37,28 @@ class KontrTubeIE(InfoExtractor): webpage = self._download_webpage( url, display_id, 'Downloading page') - video_url = self._html_search_regex( + video_url = self._search_regex( r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL') - thumbnail = self._html_search_regex( - r"preview_url\s*:\s*'(.+?)/?',", webpage, 'video thumbnail', fatal=False) + thumbnail = self._search_regex( + r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False) title = self._html_search_regex( - r'<title>(.+?)</title>', webpage, 'video title') + r'(?s)<h2>(.+?)</h2>', webpage, 'title') description = self._html_search_meta( - 'description', webpage, 'video description') + 'description', webpage, 'description') - mobj = re.search( - r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>', - webpage) - duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None + duration = self._search_regex( + r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False) + if duration: + duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec')) - view_count = self._html_search_regex( - r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', + view_count = self._search_regex( + r'Просмотров: <em>([^<]+)</em>', webpage, 'view count', fatal=False) + if view_count: + view_count = int_or_none(view_count.replace(' ', '')) - comment_count = None - comment_str = self._html_search_regex( - r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count', fatal=False) - if comment_str.startswith('комментариев нет'): - comment_count = 0 - else: - mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str) - if mobj: - comment_count = mobj.group('total') + comment_count = int_or_none(self._search_regex( + r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False)) return { 'id': video_id, diff --git a/youtube_dl/extractor/krasview.py b/youtube_dl/extractor/krasview.py index 96f95979a..0ae8ebd68 100644 --- a/youtube_dl/extractor/krasview.py +++ b/youtube_dl/extractor/krasview.py @@ -25,6 +25,9 @@ class KrasViewIE(InfoExtractor): 'duration': 27, 'thumbnail': 're:^https?://.*\.jpg', }, + 'params': { + 'skip_download': 'Not accessible from Travis CI server', + }, } def _real_extract(self, url): diff --git a/youtube_dl/extractor/kuwo.py b/youtube_dl/extractor/kuwo.py index 1077846f2..0c8ed5d07 100644 --- a/youtube_dl/extractor/kuwo.py +++ b/youtube_dl/extractor/kuwo.py @@ -57,6 +57,7 @@ class KuwoIE(KuwoBaseIE): 'upload_date': '20080122', 'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c' }, + 'skip': 'this song has been offline because of copyright issues', }, { 'url': 'http://www.kuwo.cn/yinyue/6446136/', 'info_dict': { @@ -76,9 +77,11 @@ class KuwoIE(KuwoBaseIE): webpage = self._download_webpage( url, song_id, note='Download song detail info', errnote='Unable to get song detail info') + if '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage: + raise ExtractorError('this song has been offline because of copyright issues', expected=True) song_name = self._html_search_regex( - r'<h1[^>]+title="([^"]+)">', webpage, 'song name') + r'(?s)class="(?:[^"\s]+\s+)*title(?:\s+[^"\s]+)*".*?<h1[^>]+title="([^"]+)"', webpage, 'song name') singer_name = self._html_search_regex( r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"', webpage, 'singer name', fatal=False) @@ -202,6 +205,7 @@ class KuwoSingerIE(InfoExtractor): 'title': 'Ali', }, 'playlist_mincount': 95, + 'skip': 'Regularly stalls travis build', # See https://travis-ci.org/rg3/youtube-dl/jobs/78878540 }] def _real_extract(self, url): diff --git a/youtube_dl/extractor/letv.py b/youtube_dl/extractor/letv.py index ba2ae8085..be648000e 100644 --- a/youtube_dl/extractor/letv.py +++ b/youtube_dl/extractor/letv.py @@ -8,13 +8,15 @@ import time from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, - compat_urlparse, + compat_ord, ) from ..utils import ( determine_ext, ExtractorError, parse_iso8601, + sanitized_Request, + int_or_none, + encode_data_uri, ) @@ -24,15 +26,16 @@ class LetvIE(InfoExtractor): _TESTS = [{ 'url': 'http://www.letv.com/ptv/vplay/22005890.html', - 'md5': 'cab23bd68d5a8db9be31c9a222c1e8df', + 'md5': 'edadcfe5406976f42f9f266057ee5e40', 'info_dict': { 'id': '22005890', 'ext': 'mp4', 'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家', - 'timestamp': 1424747397, - 'upload_date': '20150224', 'description': 'md5:a9cb175fd753e2962176b7beca21a47c', - } + }, + 'params': { + 'hls_prefer_native': True, + }, }, { 'url': 'http://www.letv.com/ptv/vplay/1415246.html', 'info_dict': { @@ -41,16 +44,22 @@ class LetvIE(InfoExtractor): 'title': '美人天下01', 'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda', }, + 'params': { + 'hls_prefer_native': True, + }, }, { 'note': 'This video is available only in Mainland China, thus a proxy is needed', 'url': 'http://www.letv.com/ptv/vplay/1118082.html', - 'md5': 'f80936fbe20fb2f58648e81386ff7927', + 'md5': '2424c74948a62e5f31988438979c5ad1', 'info_dict': { 'id': '1118082', 'ext': 'mp4', 'title': '与龙共舞 完整版', 'description': 'md5:7506a5eeb1722bb9d4068f85024e3986', }, + 'params': { + 'hls_prefer_native': True, + }, 'skip': 'Only available in China', }] @@ -73,6 +82,27 @@ class LetvIE(InfoExtractor): _loc3_ = self.ror(_loc3_, _loc2_ % 17) return _loc3_ + # see M3U8Encryption class in KLetvPlayer.swf + @staticmethod + def decrypt_m3u8(encrypted_data): + if encrypted_data[:5].decode('utf-8').lower() != 'vc_01': + return encrypted_data + encrypted_data = encrypted_data[5:] + + _loc4_ = bytearray() + while encrypted_data: + b = compat_ord(encrypted_data[0]) + _loc4_.extend([b // 16, b & 0x0f]) + encrypted_data = encrypted_data[1:] + idx = len(_loc4_) - 11 + _loc4_ = _loc4_[idx:] + _loc4_[:idx] + _loc7_ = bytearray() + while _loc4_: + _loc7_.append(_loc4_[0] * 16 + _loc4_[1]) + _loc4_ = _loc4_[2:] + + return bytes(_loc7_) + def _real_extract(self, url): media_id = self._match_id(url) page = self._download_webpage(url, media_id) @@ -84,7 +114,7 @@ class LetvIE(InfoExtractor): 'tkey': self.calc_time_key(int(time.time())), 'domain': 'www.letv.com' } - play_json_req = compat_urllib_request.Request( + play_json_req = sanitized_Request( 'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params) ) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') @@ -114,27 +144,32 @@ class LetvIE(InfoExtractor): for format_id in formats: if format_id in dispatch: media_url = playurl['domain'][0] + dispatch[format_id][0] - - # Mimic what flvxz.com do - url_parts = list(compat_urlparse.urlparse(media_url)) - qs = dict(compat_urlparse.parse_qs(url_parts[4])) - qs.update({ - 'platid': '14', - 'splatid': '1401', - 'tss': 'no', - 'retry': 1 + media_url += '&' + compat_urllib_parse.urlencode({ + 'm3v': 1, + 'format': 1, + 'expect': 3, + 'rateid': format_id, }) - url_parts[4] = compat_urllib_parse.urlencode(qs) - media_url = compat_urlparse.urlunparse(url_parts) + + nodes_data = self._download_json( + media_url, media_id, + 'Download JSON metadata for format %s' % format_id) + + req = self._request_webpage( + nodes_data['nodelist'][0]['location'], media_id, + note='Downloading m3u8 information for format %s' % format_id) + + m3u8_data = self.decrypt_m3u8(req.read()) url_info_dict = { - 'url': media_url, + 'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'), 'ext': determine_ext(dispatch[format_id][1]), 'format_id': format_id, + 'protocol': 'm3u8', } if format_id[-1:] == 'p': - url_info_dict['height'] = format_id[:-1] + url_info_dict['height'] = int_or_none(format_id[:-1]) urls.append(url_info_dict) diff --git a/youtube_dl/extractor/libsyn.py b/youtube_dl/extractor/libsyn.py index 9ab1416f5..d375695f5 100644 --- a/youtube_dl/extractor/libsyn.py +++ b/youtube_dl/extractor/libsyn.py @@ -8,9 +8,9 @@ from ..utils import unified_strdate class LibsynIE(InfoExtractor): - _VALID_URL = r'https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+)' + _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))' - _TEST = { + _TESTS = [{ 'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/', 'md5': '443360ee1b58007bc3dcf09b41d093bb', 'info_dict': { @@ -19,12 +19,24 @@ class LibsynIE(InfoExtractor): 'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart", 'description': 'md5:601cb790edd05908957dae8aaa866465', 'upload_date': '20150220', + 'thumbnail': 're:^https?://.*', }, - } + }, { + 'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/', + 'md5': '6c5cb21acd622d754d3b1a92b582ce42', + 'info_dict': { + 'id': '3727166', + 'ext': 'mp3', + 'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career', + 'upload_date': '20150818', + 'thumbnail': 're:^https?://.*', + } + }] def _real_extract(self, url): - video_id = self._match_id(url) - + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + url = m.group('mainurl') webpage = self._download_webpage(url, video_id) formats = [{ @@ -32,20 +44,18 @@ class LibsynIE(InfoExtractor): } for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))] podcast_title = self._search_regex( - r'<h2>([^<]+)</h2>', webpage, 'title') + r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None) episode_title = self._search_regex( - r'<h3>([^<]+)</h3>', webpage, 'title', default=None) + r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title') title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title description = self._html_search_regex( r'<div id="info_text_body">(.+?)</div>', webpage, - 'description', fatal=False) - + 'description', default=None) thumbnail = self._search_regex( r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"', webpage, 'thumbnail', fatal=False) - release_date = unified_strdate(self._search_regex( r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False)) diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py new file mode 100644 index 000000000..fb03dd527 --- /dev/null +++ b/youtube_dl/extractor/limelight.py @@ -0,0 +1,229 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + float_or_none, + int_or_none, +) + + +class LimelightBaseIE(InfoExtractor): + _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' + _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json' + + def _call_playlist_service(self, item_id, method, fatal=True): + return self._download_json( + self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), + item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal) + + def _call_api(self, organization_id, item_id, method): + return self._download_json( + self._API_URL % (organization_id, self._API_PATH, item_id, method), + item_id, 'Downloading API %s JSON' % method) + + def _extract(self, item_id, pc_method, mobile_method, meta_method): + pc = self._call_playlist_service(item_id, pc_method) + metadata = self._call_api(pc['orgId'], item_id, meta_method) + mobile = self._call_playlist_service(item_id, mobile_method, fatal=False) + return pc, mobile, metadata + + def _extract_info(self, streams, mobile_urls, properties): + video_id = properties['media_id'] + formats = [] + + for stream in streams: + stream_url = stream.get('url') + if not stream_url: + continue + if '.f4m' in stream_url: + formats.extend(self._extract_f4m_formats(stream_url, video_id)) + else: + fmt = { + 'url': stream_url, + 'abr': float_or_none(stream.get('audioBitRate')), + 'vbr': float_or_none(stream.get('videoBitRate')), + 'fps': float_or_none(stream.get('videoFrameRate')), + 'width': int_or_none(stream.get('videoWidthInPixels')), + 'height': int_or_none(stream.get('videoHeightInPixels')), + 'ext': determine_ext(stream_url) + } + rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', stream_url) + if rtmp: + format_id = 'rtmp' + if stream.get('videoBitRate'): + format_id += '-%d' % int_or_none(stream['videoBitRate']) + fmt.update({ + 'url': rtmp.group('url'), + 'play_path': rtmp.group('playpath'), + 'app': rtmp.group('app'), + 'ext': 'flv', + 'format_id': format_id, + }) + formats.append(fmt) + + for mobile_url in mobile_urls: + media_url = mobile_url.get('mobileUrl') + if not media_url: + continue + format_id = mobile_url.get('targetMediaPlatform') + if determine_ext(media_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, video_id, 'mp4', entry_protocol='m3u8_native', + preference=-1, m3u8_id=format_id)) + else: + formats.append({ + 'url': media_url, + 'format_id': format_id, + 'preference': -1, + }) + + self._sort_formats(formats) + + title = properties['title'] + description = properties.get('description') + timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date')) + duration = float_or_none(properties.get('duration_in_milliseconds'), 1000) + filesize = int_or_none(properties.get('total_storage_in_bytes')) + categories = [properties.get('category')] + tags = properties.get('tags', []) + thumbnails = [{ + 'url': thumbnail['url'], + 'width': int_or_none(thumbnail.get('width')), + 'height': int_or_none(thumbnail.get('height')), + } for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')] + + subtitles = {} + for caption in properties.get('captions', {}): + lang = caption.get('language_code') + subtitles_url = caption.get('url') + if lang and subtitles_url: + subtitles[lang] = [{ + 'url': subtitles_url, + }] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + 'timestamp': timestamp, + 'duration': duration, + 'filesize': filesize, + 'categories': categories, + 'tags': tags, + 'thumbnails': thumbnails, + 'subtitles': subtitles, + } + + +class LimelightMediaIE(LimelightBaseIE): + IE_NAME = 'limelight' + _VALID_URL = r'(?:limelight:media:|http://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})' + _TESTS = [{ + 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86', + 'info_dict': { + 'id': '3ffd040b522b4485b6d84effc750cd86', + 'ext': 'flv', + 'title': 'HaP and the HB Prince Trailer', + 'description': 'md5:8005b944181778e313d95c1237ddb640', + 'thumbnail': 're:^https?://.*\.jpeg$', + 'duration': 144.23, + 'timestamp': 1244136834, + 'upload_date': '20090604', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # video with subtitles + 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335', + 'info_dict': { + 'id': 'a3e00274d4564ec4a9b29b9466432335', + 'ext': 'flv', + 'title': '3Play Media Overview Video', + 'description': '', + 'thumbnail': 're:^https?://.*\.jpeg$', + 'duration': 78.101, + 'timestamp': 1338929955, + 'upload_date': '20120605', + 'subtitles': 'mincount:9', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }] + _PLAYLIST_SERVICE_PATH = 'media' + _API_PATH = 'media' + + def _real_extract(self, url): + video_id = self._match_id(url) + + pc, mobile, metadata = self._extract( + video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties') + + return self._extract_info( + pc['playlistItems'][0].get('streams', []), + mobile['mediaList'][0].get('mobileUrls', []) if mobile else [], + metadata) + + +class LimelightChannelIE(LimelightBaseIE): + IE_NAME = 'limelight:channel' + _VALID_URL = r'(?:limelight:channel:|http://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})' + _TEST = { + 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082', + 'info_dict': { + 'id': 'ab6a524c379342f9b23642917020c082', + 'title': 'Javascript Sample Code', + }, + 'playlist_mincount': 3, + } + _PLAYLIST_SERVICE_PATH = 'channel' + _API_PATH = 'channels' + + def _real_extract(self, url): + channel_id = self._match_id(url) + + pc, mobile, medias = self._extract( + channel_id, 'getPlaylistByChannelId', + 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media') + + entries = [ + self._extract_info( + pc['playlistItems'][i].get('streams', []), + mobile['mediaList'][i].get('mobileUrls', []) if mobile else [], + medias['media_list'][i]) + for i in range(len(medias['media_list']))] + + return self.playlist_result(entries, channel_id, pc['title']) + + +class LimelightChannelListIE(LimelightBaseIE): + IE_NAME = 'limelight:channel_list' + _VALID_URL = r'(?:limelight:channel_list:|http://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})' + _TEST = { + 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b', + 'info_dict': { + 'id': '301b117890c4465c8179ede21fd92e2b', + 'title': 'Website - Hero Player', + }, + 'playlist_mincount': 2, + } + _PLAYLIST_SERVICE_PATH = 'channel_list' + + def _real_extract(self, url): + channel_list_id = self._match_id(url) + + channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById') + + entries = [ + self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel') + for channel in channel_list['channelList']] + + return self.playlist_result(entries, channel_list_id, channel_list['title']) diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py index a00f6e5e5..d4e1ae99d 100644 --- a/youtube_dl/extractor/lynda.py +++ b/youtube_dl/extractor/lynda.py @@ -7,17 +7,17 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( ExtractorError, + clean_html, int_or_none, + sanitized_Request, ) class LyndaBaseIE(InfoExtractor): _LOGIN_URL = 'https://www.lynda.com/login/login.aspx' - _SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true' _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.' _NETRC_MACHINE = 'lynda' @@ -25,7 +25,7 @@ class LyndaBaseIE(InfoExtractor): self._login() def _login(self): - (username, password) = self._get_login_info() + username, password = self._get_login_info() if username is None: return @@ -35,13 +35,13 @@ class LyndaBaseIE(InfoExtractor): 'remember': 'false', 'stayPut': 'false' } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) login_page = self._download_webpage( request, None, 'Logging in as %s' % username) # Not (yet) logged in - m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page) + m = re.search(r'loginResultJson\s*=\s*\'(?P<json>[^\']+)\';', login_page) if m is not None: response = m.group('json') response_json = json.loads(response) @@ -64,15 +64,33 @@ class LyndaBaseIE(InfoExtractor): 'remember': 'false', 'stayPut': 'false', } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8')) login_page = self._download_webpage( request, None, 'Confirming log in and log out from another device') - if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None: + if all(not re.search(p, login_page) for p in ('isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')): + if 'login error' in login_page: + mobj = re.search( + r'(?s)<h1[^>]+class="topmost">(?P<title>[^<]+)</h1>\s*<div>(?P<description>.+?)</div>', + login_page) + if mobj: + raise ExtractorError( + 'lynda returned error: %s - %s' + % (mobj.group('title'), clean_html(mobj.group('description'))), + expected=True) raise ExtractorError('Unable to log in') + def _logout(self): + username, _ = self._get_login_info() + if username is None: + return + + self._download_webpage( + 'http://www.lynda.com/ajax/logout.aspx', None, + 'Logging out', 'Unable to log out', fatal=False) + class LyndaIE(LyndaBaseIE): IE_NAME = 'lynda' @@ -99,52 +117,47 @@ class LyndaIE(LyndaBaseIE): def _real_extract(self, url): video_id = self._match_id(url) - page = self._download_webpage( + video = self._download_json( 'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id, 'Downloading video JSON') - video_json = json.loads(page) - if 'Status' in video_json: + if 'Status' in video: raise ExtractorError( - 'lynda returned error: %s' % video_json['Message'], expected=True) + 'lynda returned error: %s' % video['Message'], expected=True) - if video_json['HasAccess'] is False: - raise ExtractorError( - 'Video %s is only available for members. ' - % video_id + self._ACCOUNT_CREDENTIALS_HINT, expected=True) + if video.get('HasAccess') is False: + self.raise_login_required('Video %s is only available for members' % video_id) - video_id = compat_str(video_json['ID']) - duration = video_json['DurationInSeconds'] - title = video_json['Title'] + video_id = compat_str(video.get('ID') or video_id) + duration = int_or_none(video.get('DurationInSeconds')) + title = video['Title'] formats = [] - fmts = video_json.get('Formats') + fmts = video.get('Formats') if fmts: - formats.extend([ - { - 'url': fmt['Url'], - 'ext': fmt['Extension'], - 'width': fmt['Width'], - 'height': fmt['Height'], - 'filesize': fmt['FileSize'], - 'format_id': str(fmt['Resolution']) - } for fmt in fmts]) - - prioritized_streams = video_json.get('PrioritizedStreams') + formats.extend([{ + 'url': f['Url'], + 'ext': f.get('Extension'), + 'width': int_or_none(f.get('Width')), + 'height': int_or_none(f.get('Height')), + 'filesize': int_or_none(f.get('FileSize')), + 'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None, + } for f in fmts if f.get('Url')]) + + prioritized_streams = video.get('PrioritizedStreams') if prioritized_streams: - formats.extend([ - { + for prioritized_stream_id, prioritized_stream in prioritized_streams.items(): + formats.extend([{ 'url': video_url, 'width': int_or_none(format_id), - 'format_id': format_id, - } for format_id, video_url in prioritized_streams['0'].items() - ]) + 'format_id': '%s-%s' % (prioritized_stream_id, format_id), + } for format_id, video_url in prioritized_stream.items()]) self._check_formats(formats, video_id) self._sort_formats(formats) - subtitles = self.extract_subtitles(video_id, page) + subtitles = self.extract_subtitles(video_id) return { 'id': video_id, @@ -175,7 +188,7 @@ class LyndaIE(LyndaBaseIE): if srt: return srt - def _get_subtitles(self, video_id, webpage): + def _get_subtitles(self, video_id): url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id subs = self._download_json(url, None, False) if subs: @@ -197,12 +210,13 @@ class LyndaCourseIE(LyndaBaseIE): course_path = mobj.group('coursepath') course_id = mobj.group('courseid') - page = self._download_webpage( + course = self._download_json( 'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, course_id, 'Downloading course JSON') - course_json = json.loads(page) - if 'Status' in course_json and course_json['Status'] == 'NotFound': + self._logout() + + if course.get('Status') == 'NotFound': raise ExtractorError( 'Course %s does not exist' % course_id, expected=True) @@ -212,12 +226,13 @@ class LyndaCourseIE(LyndaBaseIE): # Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided # by single video API anymore - for chapter in course_json['Chapters']: - for video in chapter['Videos']: - if video['HasAccess'] is False: + for chapter in course['Chapters']: + for video in chapter.get('Videos', []): + if video.get('HasAccess') is False: unaccessible_videos += 1 continue - videos.append(video['ID']) + if video.get('ID'): + videos.append(video['ID']) if unaccessible_videos > 0: self._downloader.report_warning( @@ -230,6 +245,6 @@ class LyndaCourseIE(LyndaBaseIE): 'Lynda') for video_id in videos] - course_title = course_json['Title'] + course_title = course.get('Title') return self.playlist_result(entries, course_id, course_title) diff --git a/youtube_dl/extractor/mailru.py b/youtube_dl/extractor/mailru.py index 54a14cb94..ab1300185 100644 --- a/youtube_dl/extractor/mailru.py +++ b/youtube_dl/extractor/mailru.py @@ -25,6 +25,7 @@ class MailRuIE(InfoExtractor): 'uploader_id': 'sonypicturesrus@mail.ru', 'duration': 184, }, + 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html', @@ -39,6 +40,7 @@ class MailRuIE(InfoExtractor): 'uploader_id': 'hitech@corp.mail.ru', 'duration': 245, }, + 'skip': 'Not accessible from Travis CI server', }, ] diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py index 5fdd19027..88334889e 100644 --- a/youtube_dl/extractor/mdr.py +++ b/youtube_dl/extractor/mdr.py @@ -1,64 +1,169 @@ +# coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + determine_ext, + int_or_none, + parse_duration, + parse_iso8601, + xpath_text, +) class MDRIE(InfoExtractor): - _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)' + IE_DESC = 'MDR.DE and KiKA' + _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html' - # No tests, MDR regularily deletes its videos - _TEST = { + _TESTS = [{ + # MDR regularily deletes its videos 'url': 'http://www.mdr.de/fakt/video189002.html', 'only_matching': True, - } + }, { + # audio + 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html', + 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa', + 'info_dict': { + 'id': '1312272', + 'ext': 'mp3', + 'title': 'Feuilleton vom 30. Oktober 2015', + 'duration': 250, + 'uploader': 'MITTELDEUTSCHER RUNDFUNK', + }, + }, { + 'url': 'http://www.kika.de/baumhaus/videos/video19636.html', + 'md5': '4930515e36b06c111213e80d1e4aad0e', + 'info_dict': { + 'id': '19636', + 'ext': 'mp4', + 'title': 'Baumhaus vom 30. Oktober 2015', + 'duration': 134, + 'uploader': 'KIKA', + }, + }, { + 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html', + 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357', + 'info_dict': { + 'id': '8182', + 'ext': 'mp4', + 'title': 'Beutolomäus und der geheime Weihnachtswunsch', + 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd', + 'timestamp': 1419047100, + 'upload_date': '20141220', + 'duration': 4628, + 'uploader': 'KIKA', + }, + }, { + 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html', + 'only_matching': True, + }, { + 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html', + 'only_matching': True, + }] def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - video_id = m.group('video_id') - domain = m.group('domain') + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + data_url = self._search_regex( + r'dataURL\s*:\s*(["\'])(?P<url>/.+/(?:video|audio)[0-9]+-avCustom\.xml)\1', + webpage, 'data url', group='url') - # determine title and media streams from webpage - html = self._download_webpage(url, video_id) + doc = self._download_xml( + compat_urlparse.urljoin(url, data_url), video_id) - title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title') - xmlurl = self._search_regex( - r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL') + title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True) - doc = self._download_xml(domain + xmlurl, video_id) formats = [] - for a in doc.findall('./assets/asset'): - url_el = a.find('.//progressiveDownloadUrl') - if url_el is None: - continue - abr = int(a.find('bitrateAudio').text) // 1000 - media_type = a.find('mediaType').text - format = { - 'abr': abr, - 'filesize': int(a.find('fileSize').text), - 'url': url_el.text, - } - - vbr_el = a.find('bitrateVideo') - if vbr_el is None: - format.update({ - 'vcodec': 'none', - 'format_id': '%s-%d' % (media_type, abr), - }) - else: - vbr = int(vbr_el.text) // 1000 - format.update({ - 'vbr': vbr, - 'width': int(a.find('frameWidth').text), - 'height': int(a.find('frameHeight').text), - 'format_id': '%s-%d' % (media_type, vbr), - }) - formats.append(format) + processed_urls = [] + for asset in doc.findall('./assets/asset'): + for source in ( + 'progressiveDownload', + 'dynamicHttpStreamingRedirector', + 'adaptiveHttpStreamingRedirector'): + url_el = asset.find('./%sUrl' % source) + if url_el is None: + continue + + video_url = url_el.text + if video_url in processed_urls: + continue + + processed_urls.append(video_url) + + vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) + abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) + + ext = determine_ext(url_el.text) + if ext == 'm3u8': + url_formats = self._extract_m3u8_formats( + video_url, video_id, 'mp4', entry_protocol='m3u8_native', + preference=0, m3u8_id='HLS', fatal=False) + elif ext == 'f4m': + url_formats = self._extract_f4m_formats( + video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, + preference=0, f4m_id='HDS', fatal=False) + else: + media_type = xpath_text(asset, './mediaType', 'media type', default='MP4') + vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) + abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) + filesize = int_or_none(xpath_text(asset, './fileSize', 'file size')) + + f = { + 'url': video_url, + 'format_id': '%s-%d' % (media_type, vbr or abr), + 'filesize': filesize, + 'abr': abr, + 'preference': 1, + } + + if vbr: + width = int_or_none(xpath_text(asset, './frameWidth', 'width')) + height = int_or_none(xpath_text(asset, './frameHeight', 'height')) + f.update({ + 'vbr': vbr, + 'width': width, + 'height': height, + }) + + url_formats = [f] + + if not url_formats: + continue + + if not vbr: + for f in url_formats: + abr = f.get('tbr') or abr + if 'tbr' in f: + del f['tbr'] + f.update({ + 'abr': abr, + 'vcodec': 'none', + }) + + formats.extend(url_formats) + self._sort_formats(formats) + description = xpath_text(doc, './broadcast/broadcastDescription', 'description') + timestamp = parse_iso8601( + xpath_text( + doc, [ + './broadcast/broadcastDate', + './broadcast/broadcastStartDate', + './broadcast/broadcastEndDate'], + 'timestamp', default=None)) + duration = parse_duration(xpath_text(doc, './duration', 'duration')) + uploader = xpath_text(doc, './rights', 'uploader') + return { 'id': video_id, 'title': title, + 'description': description, + 'timestamp': timestamp, + 'duration': duration, + 'uploader': uploader, 'formats': formats, } diff --git a/youtube_dl/extractor/megavideoz.py b/youtube_dl/extractor/megavideoz.py deleted file mode 100644 index af7ff07ea..000000000 --- a/youtube_dl/extractor/megavideoz.py +++ /dev/null @@ -1,56 +0,0 @@ -# encoding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import ( - ExtractorError, - float_or_none, - xpath_text, -) - - -class MegaVideozIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?megavideoz\.eu/video/(?P<id>[^/]+)(?:/(?P<display_id>[^/]+))?' - _TEST = { - 'url': 'http://megavideoz.eu/video/WM6UB919XMXH/SMPTE-Universal-Film-Leader', - 'info_dict': { - 'id': '48723', - 'display_id': 'SMPTE-Universal-Film-Leader', - 'ext': 'mp4', - 'title': 'SMPTE Universal Film Leader', - 'thumbnail': 're:https?://.*?\.jpg', - 'duration': 10.93, - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - display_id = mobj.group('display_id') or video_id - - webpage = self._download_webpage(url, display_id) - - if any(p in webpage for p in ('>Video Not Found<', '>404 Error<')): - raise ExtractorError('Video %s does not exist' % video_id, expected=True) - - config = self._download_xml( - self._search_regex( - r"var\s+cnf\s*=\s*'([^']+)'", webpage, 'cnf url'), - display_id) - - video_url = xpath_text(config, './file', 'video url', fatal=True) - title = xpath_text(config, './title', 'title', fatal=True) - thumbnail = xpath_text(config, './image', 'thumbnail') - duration = float_or_none(xpath_text(config, './duration', 'duration')) - video_id = xpath_text(config, './mediaid', 'video id') or video_id - - return { - 'id': video_id, - 'display_id': display_id, - 'url': video_url, - 'title': title, - 'thumbnail': thumbnail, - 'duration': duration - } diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index 6e2e73a51..67d6271e1 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -7,12 +7,12 @@ from ..compat import ( compat_parse_qs, compat_urllib_parse, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, + sanitized_Request, ) @@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor): 'filters': '0', 'submit': "Continue - I'm over 18", } - request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) + request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') self.report_age_confirmation() self._download_webpage(request, None, False, 'Unable to confirm age') @@ -142,7 +142,7 @@ class MetacafeIE(InfoExtractor): return self.url_result('theplatform:%s' % ext_id, 'ThePlatform') # Retrieve video webpage to extract further information - req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) + req = sanitized_Request('http://www.metacafe.com/watch/%s/' % video_id) # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file @@ -154,10 +154,10 @@ class MetacafeIE(InfoExtractor): # Extract URL, uploader and title from webpage self.report_extraction(video_id) video_url = None - mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) + mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage) if mobj is not None: mediaURL = compat_urllib_parse_unquote(mobj.group(1)) - video_ext = mediaURL[-3:] + video_ext = determine_ext(mediaURL) # Extract gdaKey if available mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) @@ -229,7 +229,7 @@ class MetacafeIE(InfoExtractor): age_limit = ( 18 - if re.search(r'"contentRating":"restricted"', webpage) + if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage) else 0) if isinstance(video_url, list): diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py index 14934b7ec..e46b23a6f 100644 --- a/youtube_dl/extractor/minhateca.py +++ b/youtube_dl/extractor/minhateca.py @@ -2,14 +2,12 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( int_or_none, parse_duration, parse_filesize, + sanitized_Request, ) @@ -39,7 +37,7 @@ class MinhatecaIE(InfoExtractor): ('fileId', video_id), ('__RequestVerificationToken', token), ] - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://minhateca.com.br/action/License/Download', data=compat_urllib_parse.urlencode(token_data)) req.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/miomio.py b/youtube_dl/extractor/miomio.py index a784fc5fb..170ebd9eb 100644 --- a/youtube_dl/extractor/miomio.py +++ b/youtube_dl/extractor/miomio.py @@ -8,6 +8,7 @@ from ..utils import ( xpath_text, int_or_none, ExtractorError, + sanitized_Request, ) @@ -51,6 +52,8 @@ class MioMioIE(InfoExtractor): mioplayer_path = self._search_regex( r'src="(/mioplayer/[^"]+)"', webpage, 'ref_path') + http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path} + xml_config = self._search_regex( r'flashvars="type=(?:sina|video)&(.+?)&', webpage, 'xml config') @@ -60,14 +63,12 @@ class MioMioIE(InfoExtractor): 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)), video_id) - # the following xml contains the actual configuration information on the video file(s) - vid_config = self._download_xml( + vid_config_request = sanitized_Request( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config), - video_id) + headers=http_headers) - http_headers = { - 'Referer': 'http://www.miomio.tv%s' % mioplayer_path, - } + # the following xml contains the actual configuration information on the video file(s) + vid_config = self._download_xml(vid_config_request, video_id) if not int_or_none(xpath_text(vid_config, 'timelength')): raise ExtractorError('Unable to load videos!', expected=True) diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py index d7ab6a9ae..29ca45778 100644 --- a/youtube_dl/extractor/mit.py +++ b/youtube_dl/extractor/mit.py @@ -18,12 +18,12 @@ class TechTVMITIE(InfoExtractor): _TEST = { 'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set', - 'md5': '1f8cb3e170d41fd74add04d3c9330e5f', + 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7', 'info_dict': { 'id': '25418', 'ext': 'mp4', - 'title': 'MIT DNA Learning Center Set', - 'description': 'md5:82313335e8a8a3f243351ba55bc1b474', + 'title': 'MIT DNA and Protein Sets', + 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d', }, } @@ -33,8 +33,8 @@ class TechTVMITIE(InfoExtractor): 'http://techtv.mit.edu/videos/%s' % video_id, video_id) clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page) - base_url = self._search_regex( - r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url') + base_url = self._proto_relative_url(self._search_regex( + r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:') formats_json = self._search_regex( r'bitrates: (\[.+?\])', raw_page, 'video formats') formats_mit = json.loads(formats_json) @@ -86,7 +86,7 @@ class MITIE(TechTVMITIE): webpage = self._download_webpage(url, page_title) embed_url = self._search_regex( r'<iframe .*?src="(.+?)"', webpage, 'embed url') - return self.url_result(embed_url, ie='TechTVMIT') + return self.url_result(embed_url) class OCWMITIE(InfoExtractor): diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py index 852d72266..c595f2077 100644 --- a/youtube_dl/extractor/mitele.py +++ b/youtube_dl/extractor/mitele.py @@ -1,74 +1,89 @@ from __future__ import unicode_literals -import json - from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_parse_unquote, compat_urlparse, ) from ..utils import ( + encode_dict, get_element_by_attribute, - parse_duration, - strip_jsonp, + int_or_none, ) class MiTeleIE(InfoExtractor): - IE_NAME = 'mitele.es' + IE_DESC = 'mitele.es' _VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/' _TESTS = [{ 'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/', + 'md5': '0ff1a13aebb35d9bc14081ff633dd324', 'info_dict': { - 'id': '0fce117d', - 'ext': 'mp4', - 'title': 'Programa 144 - Tor, la web invisible', - 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f', + 'id': '0NF1jJnxS1Wu3pHrmvFyw2', 'display_id': 'programa-144', + 'ext': 'flv', + 'title': 'Tor, la web invisible', + 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f', + 'thumbnail': 're:(?i)^https?://.*\.jpg$', 'duration': 2913, }, - 'params': { - # m3u8 download - 'skip_download': True, - }, }] def _real_extract(self, url): - episode = self._match_id(url) - webpage = self._download_webpage(url, episode) - embed_data_json = self._search_regex( - r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data', - ).replace('\'', '"') - embed_data = json.loads(embed_data_json) + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + config_url = self._search_regex( + r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url') + config_url = compat_urlparse.urljoin(url, config_url) - domain = embed_data['mediaUrl'] - if not domain.startswith('http'): - # only happens in telecinco.es videos - domain = 'http://' + domain - info_url = compat_urlparse.urljoin( - domain, - compat_urllib_parse_unquote(embed_data['flashvars']['host']) - ) - info_el = self._download_xml(info_url, episode).find('./video/info') + config = self._download_json( + config_url, display_id, 'Downloading config JSON') - video_link = info_el.find('videoUrl/link').text - token_query = compat_urllib_parse.urlencode({'id': video_link}) - token_info = self._download_json( - embed_data['flashvars']['ov_tk'] + '?' + token_query, - episode, - transform_source=strip_jsonp - ) - formats = self._extract_m3u8_formats( - token_info['tokenizedUrl'], episode, ext='mp4') + mmc = self._download_json( + config['services']['mmc'], display_id, 'Downloading mmc JSON') + + formats = [] + for location in mmc['locations']: + gat = self._proto_relative_url(location.get('gat'), 'http:') + bas = location.get('bas') + loc = location.get('loc') + ogn = location.get('ogn') + if None in (gat, bas, loc, ogn): + continue + token_data = { + 'bas': bas, + 'icd': loc, + 'ogn': ogn, + 'sta': '0', + } + media = self._download_json( + '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))), + display_id, 'Downloading %s JSON' % location['loc']) + file_ = media.get('file') + if not file_: + continue + formats.extend(self._extract_f4m_formats( + file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18', + display_id, f4m_id=loc)) + + title = self._search_regex( + r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title') + + video_id = self._search_regex( + r'data-media-id\s*=\s*"([^"]+)"', webpage, + 'data media id', default=None) or display_id + thumbnail = config.get('poster', {}).get('imageUrl') + duration = int_or_none(mmc.get('duration')) return { - 'id': embed_data['videoId'], - 'display_id': episode, - 'title': info_el.find('title').text, - 'formats': formats, + 'id': video_id, + 'display_id': display_id, + 'title': title, 'description': get_element_by_attribute('class', 'text', webpage), - 'thumbnail': info_el.find('thumb').text, - 'duration': parse_duration(info_el.find('duration').text), + 'thumbnail': thumbnail, + 'duration': duration, + 'formats': formats, } diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index d47aeceda..c2b7ed9ab 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -64,7 +64,8 @@ class MixcloudIE(InfoExtractor): preview_url = self._search_regex( r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url') - song_url = preview_url.replace('/previews/', '/c/originals/') + song_url = re.sub(r'audiocdn(\d+)', r'stream\1', preview_url) + song_url = song_url.replace('/previews/', '/c/originals/') if not self._check_url(song_url, track_id, 'mp3'): song_url = song_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/') if not self._check_url(song_url, track_id, 'm4a'): diff --git a/youtube_dl/extractor/moevideo.py b/youtube_dl/extractor/moevideo.py index 5a66302f6..d930b9634 100644 --- a/youtube_dl/extractor/moevideo.py +++ b/youtube_dl/extractor/moevideo.py @@ -5,13 +5,11 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -80,7 +78,7 @@ class MoeVideoIE(InfoExtractor): ] r_json = json.dumps(r) post = compat_urllib_parse.urlencode({'r': r_json}) - req = compat_urllib_request.Request(self._API_URL, post) + req = sanitized_Request(self._API_URL, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') response = self._download_json(req, video_id) diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py index 9bf99a54a..f8226cbb2 100644 --- a/youtube_dl/extractor/mofosex.py +++ b/youtube_dl/extractor/mofosex.py @@ -7,8 +7,8 @@ from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, - compat_urllib_request, ) +from ..utils import sanitized_Request class MofosexIE(InfoExtractor): @@ -29,7 +29,7 @@ class MofosexIE(InfoExtractor): video_id = mobj.group('id') url = 'http://www.' + mobj.group('url') - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py index 88dcd4f73..f6bf94f2f 100644 --- a/youtube_dl/extractor/moniker.py +++ b/youtube_dl/extractor/moniker.py @@ -5,16 +5,17 @@ import os.path import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + remove_start, + sanitized_Request, ) -from ..utils import ExtractorError class MonikerIE(InfoExtractor): IE_DESC = 'allmyvideos.net and vidspot.net' - _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)' + _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?:(?:2|v)/v-)?(?P<id>[a-zA-Z0-9_-]+)' _TESTS = [{ 'url': 'http://allmyvideos.net/jih3nce3x6wn', @@ -25,6 +26,14 @@ class MonikerIE(InfoExtractor): 'title': 'youtube-dl test video', }, }, { + 'url': 'http://allmyvideos.net/embed-jih3nce3x6wn', + 'md5': '710883dee1bfc370ecf9fa6a89307c88', + 'info_dict': { + 'id': 'jih3nce3x6wn', + 'ext': 'mp4', + 'title': 'youtube-dl test video', + }, + }, { 'url': 'http://vidspot.net/l2ngsmhs8ci5', 'md5': '710883dee1bfc370ecf9fa6a89307c88', 'info_dict': { @@ -35,10 +44,25 @@ class MonikerIE(InfoExtractor): }, { 'url': 'https://www.vidspot.net/l2ngsmhs8ci5', 'only_matching': True, + }, { + 'url': 'http://vidspot.net/2/v-ywDf99', + 'md5': '5f8254ce12df30479428b0152fb8e7ba', + 'info_dict': { + 'id': 'ywDf99', + 'ext': 'mp4', + 'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)', + 'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.', + }, + }, { + 'url': 'http://allmyvideos.net/v/v-HXZm5t', + 'only_matching': True, }] def _real_extract(self, url): - video_id = self._match_id(url) + orig_video_id = self._match_id(url) + video_id = remove_start(orig_video_id, 'embed-') + url = url.replace(orig_video_id, video_id) + assert re.match(self._VALID_URL, url) is not None orig_webpage = self._download_webpage(url, video_id) if '>File Not Found<' in orig_webpage: @@ -50,18 +74,30 @@ class MonikerIE(InfoExtractor): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) - fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) - data = dict(fields) + builtin_url = self._search_regex( + r'<iframe[^>]+src=(["\'])(?P<url>.+?/builtin-.+?)\1', + orig_webpage, 'builtin URL', default=None, group='url') - post = compat_urllib_parse.urlencode(data) - headers = { - b'Content-Type': b'application/x-www-form-urlencoded', - } - req = compat_urllib_request.Request(url, post, headers) - webpage = self._download_webpage( - req, video_id, note='Downloading video page ...') + if builtin_url: + req = sanitized_Request(builtin_url) + req.add_header('Referer', url) + webpage = self._download_webpage(req, video_id, 'Downloading builtin page') + title = self._og_search_title(orig_webpage).strip() + description = self._og_search_description(orig_webpage).strip() + else: + fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) + data = dict(fields) + + post = compat_urllib_parse.urlencode(data) + headers = { + b'Content-Type': b'application/x-www-form-urlencoded', + } + req = sanitized_Request(url, post, headers) + webpage = self._download_webpage( + req, video_id, note='Downloading video page ...') - title = os.path.splitext(data['fname'])[0] + title = os.path.splitext(data['fname'])[0] + description = None # Could be several links with different quality links = re.findall(r'"file" : "?(.+?)",', webpage) @@ -75,5 +111,6 @@ class MonikerIE(InfoExtractor): return { 'id': video_id, 'title': title, + 'description': description, 'formats': formats, } diff --git a/youtube_dl/extractor/mooshare.py b/youtube_dl/extractor/mooshare.py index 7603af5e2..7cc7f054f 100644 --- a/youtube_dl/extractor/mooshare.py +++ b/youtube_dl/extractor/mooshare.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -59,7 +57,7 @@ class MooshareIE(InfoExtractor): 'hash': hash_key, } - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/movieclips.py b/youtube_dl/extractor/movieclips.py index 04e17d055..1564cb71f 100644 --- a/youtube_dl/extractor/movieclips.py +++ b/youtube_dl/extractor/movieclips.py @@ -1,80 +1,40 @@ +# coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..compat import ( - compat_str, -) -from ..utils import ( - ExtractorError, - clean_html, -) +from ..utils import sanitized_Request class MovieClipsIE(InfoExtractor): - _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?' + _VALID_URL = r'https?://(?:www.)?movieclips\.com/videos/(?P<id>[^/?#]+)' _TEST = { - 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/', + 'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597?autoPlay=true&playlistId=5', 'info_dict': { - 'id': 'Wy7ZU', - 'display_id': 'my-week-with-marilyn-movie-do-you-love-me', + 'id': 'pKIGmG83AqD9', + 'display_id': 'warcraft-trailer-1-561180739597', 'ext': 'mp4', - 'title': 'My Week with Marilyn - Do You Love Me?', - 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb', + 'title': 'Warcraft Trailer 1', + 'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.', 'thumbnail': 're:^https?://.*\.jpg$', }, - 'params': { - # rtmp download - 'skip_download': True, - } + 'add_ie': ['ThePlatform'], } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - display_id = mobj.group('display_id') - show_id = display_id or video_id - - config = self._download_xml( - 'http://config.movieclips.com/player/config/%s' % video_id, - show_id, 'Downloading player config') - - if config.find('./country-region').text == 'false': - raise ExtractorError( - '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True) - - properties = config.find('./video/properties') - smil_file = properties.attrib['smil_file'] + display_id = self._match_id(url) - smil = self._download_xml(smil_file, show_id, 'Downloading SMIL') - base_url = smil.find('./head/meta').attrib['base'] - - formats = [] - for video in smil.findall('./body/switch/video'): - vbr = int(video.attrib['system-bitrate']) / 1000 - src = video.attrib['src'] - formats.append({ - 'url': base_url, - 'play_path': src, - 'ext': src.split(':')[0], - 'vbr': vbr, - 'format_id': '%dk' % vbr, - }) - - self._sort_formats(formats) - - title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title']) - description = clean_html(compat_str(properties.attrib['clip_description'])) - thumbnail = properties.attrib['image'] - categories = properties.attrib['clip_categories'].split(',') + req = sanitized_Request(url) + # it doesn't work if it thinks the browser it's too old + req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)') + webpage = self._download_webpage(req, display_id) + theplatform_link = self._html_search_regex(r'src="(http://player.theplatform.com/p/.*?)"', webpage, 'theplatform link') + title = self._html_search_regex(r'<title[^>]*>([^>]+)-\s*\d+\s*|\s*Movieclips.com</title>', webpage, 'title') + description = self._html_search_meta('description', webpage) return { - 'id': video_id, - 'display_id': display_id, + '_type': 'url_transparent', + 'url': theplatform_link, 'title': title, + 'display_id': display_id, 'description': description, - 'thumbnail': thumbnail, - 'categories': categories, - 'formats': formats, } diff --git a/youtube_dl/extractor/movshare.py b/youtube_dl/extractor/movshare.py deleted file mode 100644 index 6101063f2..000000000 --- a/youtube_dl/extractor/movshare.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class MovShareIE(NovaMovIE): - IE_NAME = 'movshare' - IE_DESC = 'MovShare' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'movshare\.(?:net|sx|ag)'} - - _HOST = 'www.movshare.net' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>' - _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>' - - _TEST = { - 'url': 'http://www.movshare.net/video/559e28be54d96', - 'md5': 'abd31a2132947262c50429e1d16c1bfd', - 'info_dict': { - 'id': '559e28be54d96', - 'ext': 'flv', - 'title': 'dissapeared image', - 'description': 'optical illusion dissapeared image magic illusion', - } - } diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index b48fac5e3..d887583e6 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -5,7 +5,6 @@ import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_str, ) from ..utils import ( @@ -13,6 +12,7 @@ from ..utils import ( find_xpath_attr, fix_xml_ampersands, HEADRequest, + sanitized_Request, unescapeHTML, url_basename, RegexNotFoundError, @@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor): def _extract_mobile_video_formats(self, mtvn_id): webpage_url = self._MOBILE_TEMPLATE % mtvn_id - req = compat_urllib_request.Request(webpage_url) + req = sanitized_Request(webpage_url) # Otherwise we get a webpage that would execute some javascript req.add_header('User-Agent', 'curl/7') webpage = self._download_webpage(req, mtvn_id, @@ -67,7 +67,7 @@ class MTVServicesInfoExtractor(InfoExtractor): return [{'url': url, 'ext': 'mp4'}] def _extract_video_formats(self, mdoc, mtvn_id): - if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None: + if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None: if mtvn_id is not None and self._MOBILE_TEMPLATE is not None: self.to_screen('The normal version is not available from your ' 'country, trying with the mobile version') @@ -114,7 +114,8 @@ class MTVServicesInfoExtractor(InfoExtractor): # Remove the templates, like &device={device} mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url) if 'acceptMethods' not in mediagen_url: - mediagen_url += '&acceptMethods=fms' + mediagen_url += '&' if '?' in mediagen_url else '?' + mediagen_url += 'acceptMethods=fms' mediagen_doc = self._download_xml(mediagen_url, video_id, 'Downloading video urls') @@ -141,7 +142,7 @@ class MTVServicesInfoExtractor(InfoExtractor): if title_el is None: title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title') if title_el is None: - title_el = itemdoc.find('.//title') + title_el = itemdoc.find('.//title') or itemdoc.find('./title') if title_el.text is None: title_el = None @@ -174,8 +175,11 @@ class MTVServicesInfoExtractor(InfoExtractor): if self._LANG: info_url += 'lang=%s&' % self._LANG info_url += data + return self._get_videos_info_from_url(info_url, video_id) + + def _get_videos_info_from_url(self, url, video_id): idoc = self._download_xml( - info_url, video_id, + url, video_id, 'Downloading info', transform_source=fix_xml_ampersands) return self.playlist_result( [self._get_video_info(item) for item in idoc.findall('.//item')]) @@ -196,7 +200,13 @@ class MTVServicesInfoExtractor(InfoExtractor): if mgid is None or ':' not in mgid: mgid = self._search_regex( [r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'], - webpage, 'mgid') + webpage, 'mgid', default=None) + + if not mgid: + sm4_embed = self._html_search_meta( + 'sm4:video:embed', webpage, 'sm4 embed', default='') + mgid = self._search_regex( + r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid') videos_info = self._get_videos_info(mgid) return videos_info @@ -218,6 +228,13 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor): }, } + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage) + if mobj: + return mobj.group('url') + def _get_feed_url(self, uri): video_id = self._id_from_uri(uri) site_id = uri.replace(video_id, '') @@ -288,3 +305,65 @@ class MTVIggyIE(MTVServicesInfoExtractor): } } _FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/' + + +class MTVDEIE(MTVServicesInfoExtractor): + IE_NAME = 'mtv.de' + _VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$' + _TESTS = [{ + 'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum', + 'info_dict': { + 'id': 'music_video-a50bc5f0b3aa4b3190aa', + 'ext': 'mp4', + 'title': 'MusicVideo_cro-traum', + 'description': 'Cro - Traum', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97) + 'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen', + 'info_dict': { + 'id': 'local_playlist-f5ae778b9832cc837189', + 'ext': 'mp4', + 'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # single video in pagePlaylist with different id + 'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3', + 'info_dict': { + 'id': 'local_playlist-4e760566473c4c8c5344', + 'ext': 'mp4', + 'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1', + 'description': 'MTV Movies Supercut', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + playlist = self._parse_json( + self._search_regex( + r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'), + video_id) + + # news pages contain single video in playlist with different id + if len(playlist) == 1: + return self._get_videos_info_from_url(playlist[0]['mrss'], video_id) + + for item in playlist: + item_id = item.get('id') + if item_id and compat_str(item_id) == video_id: + return self._get_videos_info_from_url(item['mrss'], video_id) diff --git a/youtube_dl/extractor/musicvault.py b/youtube_dl/extractor/musicvault.py deleted file mode 100644 index 0e46ac7c1..000000000 --- a/youtube_dl/extractor/musicvault.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor - - -class MusicVaultIE(InfoExtractor): - _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html' - _TEST = { - 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html', - 'md5': '3adcbdb3dcc02d647539e53f284ba171', - 'info_dict': { - 'id': '1010863', - 'ext': 'mp4', - 'uploader_id': 'the-allman-brothers-band', - 'title': 'Straight from the Heart', - 'duration': 244, - 'uploader': 'The Allman Brothers Band', - 'thumbnail': 're:^https?://.*/thumbnail/.*', - 'upload_date': '20131219', - 'location': 'Capitol Theatre (Passaic, NJ)', - 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981', - 'timestamp': int, - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - display_id = mobj.group('display_id') - webpage = self._download_webpage(url, display_id) - - thumbnail = self._search_regex( - r'<meta itemprop="thumbnail" content="([^"]+)"', - webpage, 'thumbnail', fatal=False) - - data_div = self._search_regex( - r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields') - uploader = self._html_search_regex( - r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False) - title = self._html_search_regex( - r'<h2.*?>(.*?)</h2>', data_div, 'title') - location = self._html_search_regex( - r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False) - - kaltura_id = self._search_regex( - r'<div id="video-detail-player" data-kaltura-id="([^"]+)"', - webpage, 'kaltura ID') - wid = self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid') - - return { - 'id': mobj.group('id'), - '_type': 'url_transparent', - 'url': 'kaltura:%s:%s' % (wid, kaltura_id), - 'ie_key': 'Kaltura', - 'display_id': display_id, - 'uploader_id': mobj.group('uploader_id'), - 'thumbnail': thumbnail, - 'description': self._html_search_meta('description', webpage), - 'location': location, - 'title': title, - 'uploader': uploader, - } diff --git a/youtube_dl/extractor/mwave.py b/youtube_dl/extractor/mwave.py new file mode 100644 index 000000000..66b523197 --- /dev/null +++ b/youtube_dl/extractor/mwave.py @@ -0,0 +1,58 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + int_or_none, + parse_duration, +) + + +class MwaveIE(InfoExtractor): + _VALID_URL = r'https?://mwave\.interest\.me/mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859', + 'md5': 'c930e27b7720aaa3c9d0018dfc8ff6cc', + 'info_dict': { + 'id': '168859', + 'ext': 'flv', + 'title': '[M COUNTDOWN] SISTAR - SHAKE IT', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'M COUNTDOWN', + 'duration': 206, + 'view_count': int, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + vod_info = self._download_json( + 'http://mwave.interest.me/onair/vod_info.m?vodtype=CL§orid=&endinfo=Y&id=%s' % video_id, + video_id, 'Download vod JSON') + + formats = [] + for num, cdn_info in enumerate(vod_info['cdn']): + stream_url = cdn_info.get('url') + if not stream_url: + continue + stream_name = cdn_info.get('name') or compat_str(num) + f4m_stream = self._download_json( + stream_url, video_id, + 'Download %s stream JSON' % stream_name) + f4m_url = f4m_stream.get('fileurl') + if not f4m_url: + continue + formats.extend( + self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name)) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': vod_info['title'], + 'thumbnail': vod_info.get('cover'), + 'uploader': vod_info.get('program_title'), + 'duration': parse_duration(vod_info.get('time')), + 'view_count': int_or_none(vod_info.get('hit')), + 'formats': formats, + } diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py index c96f472a3..36ab388b2 100644 --- a/youtube_dl/extractor/myvideo.py +++ b/youtube_dl/extractor/myvideo.py @@ -11,10 +11,10 @@ from ..compat import ( compat_ord, compat_urllib_parse, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -83,7 +83,7 @@ class MyVideoIE(InfoExtractor): mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage) if mobj is not None: - request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') + request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') response = self._download_webpage(request, video_id, 'Downloading video info') info = json.loads(base64.b64decode(response).decode('utf-8')) diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py index 925967753..1f5fc2145 100644 --- a/youtube_dl/extractor/naver.py +++ b/youtube_dl/extractor/naver.py @@ -10,7 +10,6 @@ from ..compat import ( ) from ..utils import ( ExtractorError, - clean_html, ) @@ -46,11 +45,11 @@ class NaverIE(InfoExtractor): m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', webpage) if m_id is None: - m_error = re.search( - r'(?s)<div class="(?:nation_error|nation_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', - webpage) - if m_error: - raise ExtractorError(clean_html(m_error.group('msg')), expected=True) + error = self._html_search_regex( + r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', + webpage, 'error', default=None) + if error: + raise ExtractorError(error, expected=True) raise ExtractorError('couldn\'t extract vid and key') vid = m_id.group(1) key = m_id.group(2) diff --git a/youtube_dl/extractor/nba.py b/youtube_dl/extractor/nba.py index 944096e1c..7c6b7841d 100644 --- a/youtube_dl/extractor/nba.py +++ b/youtube_dl/extractor/nba.py @@ -1,63 +1,102 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( - remove_end, parse_duration, + int_or_none, + xpath_text, + xpath_attr, ) class NBAIE(InfoExtractor): - _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$' + _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)?video/(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$' _TESTS = [{ 'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html', - 'md5': 'c0edcfc37607344e2ff8f13c378c88a4', + 'md5': '9e7729d3010a9c71506fd1248f74e4f4', 'info_dict': { - 'id': '0021200253-okc-bkn-recap.nba', - 'ext': 'mp4', + 'id': '0021200253-okc-bkn-recap', + 'ext': 'flv', 'title': 'Thunder vs. Nets', 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.', 'duration': 181, + 'timestamp': 1354638466, + 'upload_date': '20121204', }, }, { 'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/', 'only_matching': True, }, { - 'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', + 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', + 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4', 'info_dict': { - 'id': '0041400301-cle-atl-recap.nba', + 'id': '0041400301-cle-atl-recap', 'ext': 'mp4', - 'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1', + 'title': 'Hawks vs. Cavaliers Game 1', 'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d', 'duration': 228, - }, - 'params': { - 'skip_download': True, + 'timestamp': 1432134543, + 'upload_date': '20150520', } }] def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' + path, video_id = re.match(self._VALID_URL, url).groups() + if path.startswith('nba/'): + path = path[3:] + video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id) + video_id = xpath_text(video_info, 'slug') + title = xpath_text(video_info, 'headline') + description = xpath_text(video_info, 'description') + duration = parse_duration(xpath_text(video_info, 'length')) + timestamp = int_or_none(xpath_attr(video_info, 'dateCreated', 'uts')) - shortened_video_id = video_id.rpartition('/')[2] - title = remove_end( - self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com') + thumbnails = [] + for image in video_info.find('images'): + thumbnails.append({ + 'id': image.attrib.get('cut'), + 'url': image.text, + 'width': int_or_none(image.attrib.get('width')), + 'height': int_or_none(image.attrib.get('height')), + }) - description = self._og_search_description(webpage) - duration_str = self._html_search_meta( - 'duration', webpage, 'duration', default=None) - if not duration_str: - duration_str = self._html_search_regex( - r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False) - duration = parse_duration(duration_str) + formats = [] + for video_file in video_info.findall('.//file'): + video_url = video_file.text + if video_url.startswith('/'): + continue + if video_url.endswith('.m3u8'): + m3u8_formats = self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif video_url.endswith('.f4m'): + f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.4.1.1', video_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + else: + key = video_file.attrib.get('bitrate') + format_info = { + 'format_id': key, + 'url': video_url, + } + mobj = re.search(r'(\d+)x(\d+)(?:_(\d+))?', key) + if mobj: + format_info.update({ + 'width': int(mobj.group(1)), + 'height': int(mobj.group(2)), + 'tbr': int_or_none(mobj.group(3)), + }) + formats.append(format_info) + self._sort_formats(formats) return { - 'id': shortened_video_id, - 'url': video_url, + 'id': video_id, 'title': title, 'description': description, 'duration': duration, + 'timestamp': timestamp, + 'thumbnails': thumbnails, + 'formats': formats, } diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py index dc2091be0..340c922bd 100644 --- a/youtube_dl/extractor/nbc.py +++ b/youtube_dl/extractor/nbc.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_str, - compat_HTTPError, -) +from ..compat import compat_HTTPError from ..utils import ( ExtractorError, find_xpath_attr, lowercase_escape, + smuggle_url, unescapeHTML, ) @@ -62,12 +60,13 @@ class NBCIE(InfoExtractor): theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex( [ r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"', + r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"', r'"embedURL"\s*:\s*"([^"]+)"' ], webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/'))) if theplatform_url.startswith('//'): theplatform_url = 'http:' + theplatform_url - return self.url_result(theplatform_url) + return self.url_result(smuggle_url(theplatform_url, {'source_url': url})) class NBCSportsVPlayerIE(InfoExtractor): @@ -124,7 +123,7 @@ class NBCSportsIE(InfoExtractor): class NBCNewsIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)?nbcnews\.com/ (?:video/.+?/(?P<id>\d+)| - (?:feature|nightly-news)/[^/]+/(?P<title>.+)) + (?:watch|feature|nightly-news)/[^/]+/(?P<title>.+)) ''' _TESTS = [ @@ -169,6 +168,10 @@ class NBCNewsIE(InfoExtractor): 'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5', }, }, + { + 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952', + 'only_matching': True, + }, ] def _real_extract(self, url): @@ -183,7 +186,7 @@ class NBCNewsIE(InfoExtractor): 'title': info.find('headline').text, 'ext': 'flv', 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text, - 'description': compat_str(info.find('caption').text), + 'description': info.find('caption').text, 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text, } else: @@ -232,3 +235,28 @@ class NBCNewsIE(InfoExtractor): 'url': info['videoAssets'][-1]['publicUrl'], 'ie_key': 'ThePlatform', } + + +class MSNBCIE(InfoExtractor): + # https URLs redirect to corresponding http ones + _VALID_URL = r'http://www\.msnbc\.com/[^/]+/watch/(?P<id>[^/]+)' + _TEST = { + 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924', + 'md5': '6d236bf4f3dddc226633ce6e2c3f814d', + 'info_dict': { + 'id': 'n_hayes_Aimm_140801_272214', + 'ext': 'mp4', + 'title': 'The chaotic GOP immigration vote', + 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.', + 'thumbnail': 're:^https?://.*\.jpg$', + 'timestamp': 1406937606, + 'upload_date': '20140802', + 'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'], + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + embed_url = self._html_search_meta('embedURL', webpage) + return self.url_result(embed_url) diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py index 79a13958b..894c51399 100644 --- a/youtube_dl/extractor/ndr.py +++ b/youtube_dl/extractor/ndr.py @@ -1,130 +1,387 @@ -# encoding: utf-8 +# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( - ExtractorError, + determine_ext, int_or_none, + parse_iso8601, qualities, - parse_duration, ) class NDRBaseIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + display_id = next(group for group in mobj.groups() if group) + webpage = self._download_webpage(url, display_id) + return self._extract_embed(webpage, display_id) - page = self._download_webpage(url, video_id, 'Downloading page') - title = self._og_search_title(page).strip() - description = self._og_search_description(page) - if description: - description = description.strip() +class NDRIE(NDRBaseIE): + IE_NAME = 'ndr' + IE_DESC = 'NDR.de - Norddeutscher Rundfunk' + _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' + _TESTS = [{ + # httpVideo, same content id + 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', + 'md5': '6515bc255dc5c5f8c85bbc38e035a659', + 'info_dict': { + 'id': 'hafengeburtstag988', + 'display_id': 'Party-Poette-und-Parade', + 'ext': 'mp4', + 'title': 'Party, Pötte und Parade', + 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c', + 'uploader': 'ndrtv', + 'timestamp': 1431108900, + 'upload_date': '20150510', + 'duration': 3498, + }, + 'params': { + 'skip_download': True, + }, + }, { + # httpVideo, different content id + 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html', + 'md5': '1043ff203eab307f0c51702ec49e9a71', + 'info_dict': { + 'id': 'osna272', + 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch', + 'ext': 'mp4', + 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights', + 'description': 'md5:32e9b800b3d2d4008103752682d5dc01', + 'uploader': 'ndrtv', + 'timestamp': 1442059200, + 'upload_date': '20150912', + 'duration': 510, + }, + 'params': { + 'skip_download': True, + }, + }, { + # httpAudio, same content id + 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html', + 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', + 'info_dict': { + 'id': 'audio51535', + 'display_id': 'La-Valette-entgeht-der-Hinrichtung', + 'ext': 'mp3', + 'title': 'La Valette entgeht der Hinrichtung', + 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', + 'uploader': 'ndrinfo', + 'timestamp': 1290626100, + 'upload_date': '20140729', + 'duration': 884, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', + 'only_matching': True, + }] + + def _extract_embed(self, webpage, display_id): + embed_url = self._html_search_meta( + 'embedURL', webpage, 'embed URL', fatal=True) + description = self._search_regex( + r'<p[^>]+itemprop="description">([^<]+)</p>', + webpage, 'description', default=None) or self._og_search_description(webpage) + timestamp = parse_iso8601( + self._search_regex( + r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"', + webpage, 'upload date', fatal=False)) + return { + '_type': 'url_transparent', + 'url': embed_url, + 'display_id': display_id, + 'description': description, + 'timestamp': timestamp, + } - duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', default=None)) - if not duration: - duration = parse_duration(self._html_search_regex( - r'(<span class="min">\d+</span>:<span class="sec">\d+</span>)', - page, 'duration', default=None)) - formats = [] +class NJoyIE(NDRBaseIE): + IE_NAME = 'njoy' + IE_DESC = 'N-JOY' + _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' + _TESTS = [{ + # httpVideo, same content id + 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', + 'md5': 'cb63be60cd6f9dd75218803146d8dc67', + 'info_dict': { + 'id': 'comedycontest2480', + 'display_id': 'Benaissa-beim-NDR-Comedy-Contest', + 'ext': 'mp4', + 'title': 'Benaissa beim NDR Comedy Contest', + 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39', + 'uploader': 'ndrtv', + 'upload_date': '20141129', + 'duration': 654, + }, + 'params': { + 'skip_download': True, + }, + }, { + # httpVideo, different content id + 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html', + 'md5': '417660fffa90e6df2fda19f1b40a64d8', + 'info_dict': { + 'id': 'dockville882', + 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-', + 'ext': 'mp4', + 'title': '"Ich hab noch nie" mit Felix Jaehn', + 'description': 'md5:85dd312d53be1b99e1f998a16452a2f3', + 'uploader': 'njoy', + 'upload_date': '20150822', + 'duration': 211, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', + 'only_matching': True, + }] + + def _extract_embed(self, webpage, display_id): + video_id = self._search_regex( + r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id') + description = self._search_regex( + r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>', + webpage, 'description', fatal=False) + return { + '_type': 'url_transparent', + 'ie_key': 'NDREmbedBase', + 'url': 'ndr:%s' % video_id, + 'display_id': display_id, + 'description': description, + } + + +class NDREmbedBaseIE(InfoExtractor): + IE_NAME = 'ndr:embed:base' + _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)' + _TESTS = [{ + 'url': 'ndr:soundcheck3366', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') or mobj.group('id_s') + + ppjson = self._download_json( + 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id) - mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page) - if mp3_url: - formats.append({ - 'url': mp3_url.group('audio'), - 'format_id': 'mp3', - }) + playlist = ppjson['playlist'] - thumbnail = None + formats = [] + quality_key = qualities(('xs', 's', 'm', 'l', 'xl')) - video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page) - if video_url: - thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page) - if thumbnails: - quality_key = qualities(['xs', 's', 'm', 'l', 'xl']) - largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1])) - thumbnail = 'http://www.ndr.de' + largest[0] + for format_id, f in playlist.items(): + src = f.get('src') + if not src: + continue + ext = determine_ext(src, None) + if ext == 'f4m': + formats.extend(self._extract_f4m_formats( + src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds')) + elif ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + src, video_id, m3u8_id='hls', entry_protocol='m3u8_native')) + else: + quality = f.get('quality') + ff = { + 'url': src, + 'format_id': quality or format_id, + 'quality': quality_key(quality), + } + type_ = f.get('type') + if type_ and type_.split('/')[0] == 'audio': + ff['vcodec'] = 'none' + ff['ext'] = ext or 'mp3' + formats.append(ff) + self._sort_formats(formats) - for format_id in 'lo', 'hi', 'hq': - formats.append({ - 'url': '%s.%s.mp4' % (video_url.group('video'), format_id), - 'format_id': format_id, - }) + config = playlist['config'] - if not formats: - raise ExtractorError('No media links available for %s' % video_id) + live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive'] + title = config['title'] + if live: + title = self._live_title(title) + uploader = ppjson.get('config', {}).get('branding') + upload_date = ppjson.get('config', {}).get('publicationDate') + duration = int_or_none(config.get('duration')) + + thumbnails = [{ + 'id': thumbnail.get('quality') or thumbnail_id, + 'url': thumbnail['src'], + 'preference': quality_key(thumbnail.get('quality')), + } for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')] return { 'id': video_id, 'title': title, - 'description': description, - 'thumbnail': thumbnail, + 'is_live': live, + 'uploader': uploader if uploader != '-' else None, + 'upload_date': upload_date[0:8] if upload_date else None, 'duration': duration, + 'thumbnails': thumbnails, 'formats': formats, } -class NDRIE(NDRBaseIE): - IE_NAME = 'ndr' - IE_DESC = 'NDR.de - Mediathek' - _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html' - - _TESTS = [ - { - 'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html', - 'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c', - 'note': 'Video file', - 'info_dict': { - 'id': '25866', - 'ext': 'mp4', - 'title': 'Kartoffeltage in der Lewitz', - 'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8', - 'duration': 166, - }, - 'skip': '404 Not found', - }, - { - 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', - 'md5': 'dadc003c55ae12a5d2f6bd436cd73f59', - 'info_dict': { - 'id': '988', - 'ext': 'mp4', - 'title': 'Party, Pötte und Parade', - 'description': 'Hunderttausende feiern zwischen Speicherstadt und St. Pauli den 826. Hafengeburtstag. Die NDR Sondersendung zeigt die schönsten und spektakulärsten Bilder vom Auftakt.', - 'duration': 3498, - }, - }, - { - 'url': 'http://www.ndr.de/info/audio51535.html', - 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', - 'note': 'Audio file', - 'info_dict': { - 'id': '51535', - 'ext': 'mp3', - 'title': 'La Valette entgeht der Hinrichtung', - 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', - 'duration': 884, - } - } - ] - +class NDREmbedIE(NDREmbedBaseIE): + IE_NAME = 'ndr:embed' + _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html' + _TESTS = [{ + 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', + 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', + 'info_dict': { + 'id': 'ndraktuell28488', + 'ext': 'mp4', + 'title': 'Norddeutschland begrüßt Flüchtlinge', + 'is_live': False, + 'uploader': 'ndrtv', + 'upload_date': '20150907', + 'duration': 132, + }, + }, { + 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html', + 'md5': '002085c44bae38802d94ae5802a36e78', + 'info_dict': { + 'id': 'soundcheck3366', + 'ext': 'mp4', + 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen', + 'is_live': False, + 'uploader': 'ndr2', + 'upload_date': '20150912', + 'duration': 3554, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.ndr.de/info/audio51535-player.html', + 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', + 'info_dict': { + 'id': 'audio51535', + 'ext': 'mp3', + 'title': 'La Valette entgeht der Hinrichtung', + 'is_live': False, + 'uploader': 'ndrinfo', + 'upload_date': '20140729', + 'duration': 884, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html', + 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c', + 'info_dict': { + 'id': 'visite11010', + 'ext': 'mp4', + 'title': 'Visite - die ganze Sendung', + 'is_live': False, + 'uploader': 'ndrtv', + 'upload_date': '20150902', + 'duration': 3525, + }, + 'params': { + 'skip_download': True, + }, + }, { + # httpVideoLive + 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html', + 'info_dict': { + 'id': 'livestream217', + 'ext': 'flv', + 'title': 're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', + 'is_live': True, + 'upload_date': '20150910', + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html', + 'only_matching': True, + }, { + 'url': 'http://www.ndr.de/fernsehen/doku952-player.html', + 'only_matching': True, + }] -class NJoyIE(NDRBaseIE): - IE_NAME = 'N-JOY' - _VALID_URL = r'https?://www\.n-joy\.de/.+?(?P<id>\d+)\.html' - _TEST = { - 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', - 'md5': 'cb63be60cd6f9dd75218803146d8dc67', +class NJoyEmbedIE(NDREmbedBaseIE): + IE_NAME = 'njoy:embed' + _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' + _TESTS = [{ + # httpVideo + 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', + 'md5': '8483cbfe2320bd4d28a349d62d88bd74', 'info_dict': { - 'id': '2480', + 'id': 'doku948', 'ext': 'mp4', - 'title': 'Benaissa beim NDR Comedy Contest', - 'description': 'Von seinem sehr "behaarten" Leben lässt sich Benaissa trotz aller Schwierigkeiten nicht unterkriegen.', - 'duration': 654, - } - } + 'title': 'Zehn Jahre Reeperbahn Festival - die Doku', + 'is_live': False, + 'upload_date': '20150807', + 'duration': 1011, + }, + }, { + # httpAudio + 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html', + 'md5': 'd989f80f28ac954430f7b8a48197188a', + 'info_dict': { + 'id': 'stefanrichter100', + 'ext': 'mp3', + 'title': 'Interview mit einem Augenzeugen', + 'is_live': False, + 'uploader': 'njoy', + 'upload_date': '20150909', + 'duration': 140, + }, + 'params': { + 'skip_download': True, + }, + }, { + # httpAudioLive, no explicit ext + 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html', + 'info_dict': { + 'id': 'webradioweltweit100', + 'ext': 'mp3', + 'title': 're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', + 'is_live': True, + 'uploader': 'njoy', + 'upload_date': '20150810', + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html', + 'only_matching': True, + }, { + 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html', + 'only_matching': True, + }, { + 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html', + 'only_matching': True, + }] diff --git a/youtube_dl/extractor/neteasemusic.py b/youtube_dl/extractor/neteasemusic.py index a8e0a64ed..15eca825a 100644 --- a/youtube_dl/extractor/neteasemusic.py +++ b/youtube_dl/extractor/neteasemusic.py @@ -8,11 +8,11 @@ import re from .common import InfoExtractor from ..compat import ( - compat_urllib_request, compat_urllib_parse, compat_str, compat_itertools_count, ) +from ..utils import sanitized_Request class NetEaseMusicBaseIE(InfoExtractor): @@ -40,7 +40,7 @@ class NetEaseMusicBaseIE(InfoExtractor): if not details: continue formats.append({ - 'url': 'http://m1.music.126.net/%s/%s.%s' % + 'url': 'http://m5.music.126.net/%s/%s.%s' % (cls._encrypt(details['dfsId']), details['dfsId'], details['extension']), 'ext': details.get('extension'), @@ -56,7 +56,7 @@ class NetEaseMusicBaseIE(InfoExtractor): return int(round(ms / 1000.0)) def query_api(self, endpoint, video_id, note): - req = compat_urllib_request.Request('%s%s' % (self._API_BASE, endpoint)) + req = sanitized_Request('%s%s' % (self._API_BASE, endpoint)) req.add_header('Referer', self._API_BASE) return self._download_json(req, video_id, note) diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py index c10784f6b..d1688457f 100644 --- a/youtube_dl/extractor/nextmedia.py +++ b/youtube_dl/extractor/nextmedia.py @@ -126,7 +126,8 @@ class AppleDailyIE(NextMediaIE): 'thumbnail': 're:^https?://.*\.jpg$', 'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd', 'upload_date': '20150128', - } + }, + 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { # No thumbnail 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/', @@ -140,10 +141,19 @@ class AppleDailyIE(NextMediaIE): }, 'expected_warnings': [ 'video thumbnail', - ] + ], + 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { 'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/', - 'only_matching': True, + 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d', + 'info_dict': { + 'id': '35770334', + 'ext': 'mp4', + 'title': '咖啡占卜測 XU裝熟指數', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748', + 'upload_date': '20140417', + }, }] _URL_PATTERN = r'\{url: \'(.+)\'\}' diff --git a/youtube_dl/extractor/nfb.py b/youtube_dl/extractor/nfb.py index ea077254b..5bd15f7a7 100644 --- a/youtube_dl/extractor/nfb.py +++ b/youtube_dl/extractor/nfb.py @@ -1,10 +1,8 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class NFBIE(InfoExtractor): @@ -40,8 +38,9 @@ class NFBIE(InfoExtractor): uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>', page, 'director name', fatal=False) - request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id, - compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) + request = sanitized_Request( + 'https://www.nfb.ca/film/%s/player_config' % video_id, + compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') diff --git a/youtube_dl/extractor/nfl.py b/youtube_dl/extractor/nfl.py index dc54634a5..200874d68 100644 --- a/youtube_dl/extractor/nfl.py +++ b/youtube_dl/extractor/nfl.py @@ -16,53 +16,118 @@ from ..utils import ( class NFLIE(InfoExtractor): IE_NAME = 'nfl.com' - _VALID_URL = r'''(?x)https?:// - (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/ - (?:.+?/)* - (?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))''' - _TESTS = [ - { - 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights', - 'md5': '394ef771ddcd1354f665b471d78ec4c6', - 'info_dict': { - 'id': '0ap3000000398478', - 'ext': 'mp4', - 'title': 'Week 3: Redskins vs. Eagles highlights', - 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478', - 'upload_date': '20140921', - 'timestamp': 1411337580, - 'thumbnail': 're:^https?://.*\.jpg$', - } + _VALID_URL = r'''(?x) + https?:// + (?P<host> + (?:www\.)? + (?: + (?: + nfl| + buffalobills| + miamidolphins| + patriots| + newyorkjets| + baltimoreravens| + bengals| + clevelandbrowns| + steelers| + houstontexans| + colts| + jaguars| + titansonline| + denverbroncos| + kcchiefs| + raiders| + chargers| + dallascowboys| + giants| + philadelphiaeagles| + redskins| + chicagobears| + detroitlions| + packers| + vikings| + atlantafalcons| + panthers| + neworleanssaints| + buccaneers| + azcardinals| + stlouisrams| + 49ers| + seahawks + )\.com| + .+?\.clubs\.nfl\.com + ) + )/ + (?:.+?/)* + (?P<id>[^/#?&]+) + ''' + _TESTS = [{ + 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights', + 'md5': '394ef771ddcd1354f665b471d78ec4c6', + 'info_dict': { + 'id': '0ap3000000398478', + 'ext': 'mp4', + 'title': 'Week 3: Redskins vs. Eagles highlights', + 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478', + 'upload_date': '20140921', + 'timestamp': 1411337580, + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266', + 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c', + 'info_dict': { + 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266', + 'ext': 'mp4', + 'title': 'LIVE: Post Game vs. Browns', + 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8', + 'upload_date': '20131229', + 'timestamp': 1388354455, + 'thumbnail': 're:^https?://.*\.jpg$', + } + }, { + 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish', + 'info_dict': { + 'id': '0ap3000000467607', + 'ext': 'mp4', + 'title': 'Frustrations flare on the field', + 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.', + 'timestamp': 1422850320, + 'upload_date': '20150202', }, - { - 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266', - 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c', - 'info_dict': { - 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266', - 'ext': 'mp4', - 'title': 'LIVE: Post Game vs. Browns', - 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8', - 'upload_date': '20131229', - 'timestamp': 1388354455, - 'thumbnail': 're:^https?://.*\.jpg$', - } + }, { + 'url': 'http://www.patriots.com/video/2015/09/18/10-days-gillette', + 'md5': '4c319e2f625ffd0b481b4382c6fc124c', + 'info_dict': { + 'id': 'n-238346', + 'ext': 'mp4', + 'title': '10 Days at Gillette', + 'description': 'md5:8cd9cd48fac16de596eadc0b24add951', + 'timestamp': 1442618809, + 'upload_date': '20150918', }, - { - 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish', - 'info_dict': { - 'id': '0ap3000000467607', - 'ext': 'mp4', - 'title': 'Frustrations flare on the field', - 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.', - 'timestamp': 1422850320, - 'upload_date': '20150202', - }, + }, { + # lowercase data-contentid + 'url': 'http://www.steelers.com/news/article-1/Tomlin-on-Ben-getting-Vick-ready/56399c96-4160-48cf-a7ad-1d17d4a3aef7', + 'info_dict': { + 'id': '12693586-6ea9-4743-9c1c-02c59e4a5ef2', + 'ext': 'mp4', + 'title': 'Tomlin looks ahead to Ravens on a short week', + 'description': 'md5:32f3f7b139f43913181d5cbb24ecad75', + 'timestamp': 1443459651, + 'upload_date': '20150928', }, - { - 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood', - 'only_matching': True, - } - ] + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood', + 'only_matching': True, + }, { + 'url': 'http://www.buffalobills.com/video/videos/Rex_Ryan_Show_World_Wide_Rex/b1dcfab2-3190-4bb1-bfc0-d6e603d6601a', + 'only_matching': True, + }] @staticmethod def prepend_host(host, url): @@ -95,13 +160,14 @@ class NFLIE(InfoExtractor): webpage = self._download_webpage(url, video_id) config_url = NFLIE.prepend_host(host, self._search_regex( - r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL', - default='static/content/static/config/video/config.json')) + r'(?:(?:config|configURL)\s*:\s*|<nflcs:avplayer[^>]+data-config\s*=\s*)(["\'])(?P<config>.+?)\1', + webpage, 'config URL', default='static/content/static/config/video/config.json', + group='config')) # For articles, the id in the url is not the video id video_id = self._search_regex( - r'contentId\s*:\s*"([^"]+)"', webpage, 'video id', default=video_id) - config = self._download_json(config_url, video_id, - note='Downloading player config') + r'(?:<nflcs:avplayer[^>]+data-content[Ii]d\s*=\s*|content[Ii]d\s*:\s*)(["\'])(?P<id>.+?)\1', + webpage, 'video id', default=video_id, group='id') + config = self._download_json(config_url, video_id, 'Downloading player config') url_template = NFLIE.prepend_host( host, '{contentURLTemplate:}'.format(**config)) video_data = self._download_json( diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py index 279b18386..e98a5ef89 100644 --- a/youtube_dl/extractor/nhl.py +++ b/youtube_dl/extractor/nhl.py @@ -72,7 +72,7 @@ class NHLBaseInfoExtractor(InfoExtractor): class NHLIE(NHLBaseInfoExtractor): IE_NAME = 'nhl.com' - _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console)?(?:\?(?:.*?[?&])?)(?:id|hlg)=(?P<id>[-0-9a-zA-Z,]+)' + _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)' _TESTS = [{ 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614', @@ -136,6 +136,9 @@ class NHLIE(NHLBaseInfoExtractor): 'params': { 'skip_download': True, # Requires rtmpdump } + }, { + 'url': 'http://video.nhl.com/videocenter/embed?playlist=836127', + 'only_matching': True, }] def _real_extract(self, url): @@ -146,9 +149,9 @@ class NHLIE(NHLBaseInfoExtractor): class NHLNewsIE(NHLBaseInfoExtractor): IE_NAME = 'nhl.com:news' IE_DESC = 'NHL news' - _VALID_URL = r'https?://(?:www\.)?nhl\.com/ice/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)' + _VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.nhl.com/ice/news.htm?id=750727', 'md5': '4b3d1262e177687a3009937bd9ec0be8', 'info_dict': { @@ -159,13 +162,26 @@ class NHLNewsIE(NHLBaseInfoExtractor): 'duration': 37, 'upload_date': '20150128', }, - } + }, { + # iframe embed + 'url': 'http://sabres.nhl.com/club/news.htm?id=780189', + 'md5': '9f663d1c006c90ac9fb82777d4294e12', + 'info_dict': { + 'id': '836127', + 'ext': 'mp4', + 'title': 'Morning Skate: OTT vs. BUF (9/23/15)', + 'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.", + 'duration': 93, + 'upload_date': '20150923', + }, + }] def _real_extract(self, url): news_id = self._match_id(url) webpage = self._download_webpage(url, news_id) video_id = self._search_regex( - [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'"], + [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'", + r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'], webpage, 'video id') return self._real_extract_video(video_id) diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py index 0f8aa5ada..586e52a4a 100644 --- a/youtube_dl/extractor/niconico.py +++ b/youtube_dl/extractor/niconico.py @@ -8,14 +8,15 @@ import datetime from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, int_or_none, parse_duration, parse_iso8601, + sanitized_Request, xpath_text, determine_ext, ) @@ -100,11 +101,8 @@ class NiconicoIE(InfoExtractor): 'mail': username, 'password': password, } - # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode - # chokes on unicode - login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items()) - login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8') - request = compat_urllib_request.Request( + login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') + request = sanitized_Request( 'https://secure.nicovideo.jp/secure/login', login_data) login_results = self._download_webpage( request, None, note='Logging in', errnote='Unable to log in') @@ -147,7 +145,7 @@ class NiconicoIE(InfoExtractor): 'k': thumb_play_key, 'v': video_id }) - flv_info_request = compat_urllib_request.Request( + flv_info_request = sanitized_Request( 'http://ext.nicovideo.jp/thumb_watch', flv_info_data, {'Content-Type': 'application/x-www-form-urlencoded'}) flv_info_webpage = self._download_webpage( diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py index 7f842b5c2..a06d38afd 100644 --- a/youtube_dl/extractor/ninegag.py +++ b/youtube_dl/extractor/ninegag.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import re -import json from .common import InfoExtractor from ..utils import str_to_int @@ -9,61 +8,93 @@ from ..utils import str_to_int class NineGagIE(InfoExtractor): IE_NAME = '9gag' - _VALID_URL = r'''(?x)^https?://(?:www\.)?9gag\.tv/ - (?: - v/(?P<numid>[0-9]+)| - p/(?P<id>[a-zA-Z0-9]+)/(?P<display_id>[^?#/]+) - ) - ''' + _VALID_URL = r'https?://(?:www\.)?9gag(?:\.com/tv|\.tv)/(?:p|embed)/(?P<id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^?#/]+))?' _TESTS = [{ - "url": "http://9gag.tv/v/1912", - "info_dict": { - "id": "1912", - "ext": "mp4", - "description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)", - "title": "\"People Are Awesome 2013\" Is Absolutely Awesome", + 'url': 'http://9gag.com/tv/p/Kk2X5/people-are-awesome-2013-is-absolutely-awesome', + 'info_dict': { + 'id': 'Kk2X5', + 'ext': 'mp4', + 'description': 'This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)', + 'title': '\"People Are Awesome 2013\" Is Absolutely Awesome', 'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA', 'uploader': 'CompilationChannel', 'upload_date': '20131110', - "view_count": int, - "thumbnail": "re:^https?://", + 'view_count': int, }, - 'add_ie': ['Youtube'] + 'add_ie': ['Youtube'], }, { - 'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar', + 'url': 'http://9gag.com/tv/p/aKolP3', 'info_dict': { - 'id': 'KklwM', + 'id': 'aKolP3', 'ext': 'mp4', - 'display_id': 'alternate-banned-opening-scene-of-gravity', - "description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.", - 'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie", - 'uploader': 'Krishna Shenoi', - 'upload_date': '20140401', - 'uploader_id': 'krishnashenoi93', + 'title': 'This Guy Travelled 11 countries In 44 days Just To Make This Amazing Video', + 'description': "I just saw more in 1 minute than I've seen in 1 year. This guy's video is epic!!", + 'uploader_id': 'rickmereki', + 'uploader': 'Rick Mereki', + 'upload_date': '20110803', + 'view_count': int, }, + 'add_ie': ['Vimeo'], + }, { + 'url': 'http://9gag.com/tv/p/KklwM', + 'only_matching': True, + }, { + 'url': 'http://9gag.tv/p/Kk2X5', + 'only_matching': True, + }, { + 'url': 'http://9gag.com/tv/embed/a5Dmvl', + 'only_matching': True, }] + _EXTERNAL_VIDEO_PROVIDER = { + '1': { + 'url': '%s', + 'ie_key': 'Youtube', + }, + '2': { + 'url': 'http://player.vimeo.com/video/%s', + 'ie_key': 'Vimeo', + }, + '3': { + 'url': 'http://instagram.com/p/%s', + 'ie_key': 'Instagram', + }, + '4': { + 'url': 'http://vine.co/v/%s', + 'ie_key': 'Vine', + }, + } + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('numid') or mobj.group('id') + video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage = self._download_webpage(url, display_id) - post_view = json.loads(self._html_search_regex( - r'var postView = new app\.PostView\({\s*post:\s*({.+?}),\s*posts:\s*prefetchedCurrentPost', webpage, 'post view')) + post_view = self._parse_json( + self._search_regex( + r'var\s+postView\s*=\s*new\s+app\.PostView\({\s*post:\s*({.+?})\s*,\s*posts:\s*prefetchedCurrentPost', + webpage, 'post view'), + display_id) - youtube_id = post_view['videoExternalId'] + ie_key = None + source_url = post_view.get('sourceUrl') + if not source_url: + external_video_id = post_view['videoExternalId'] + external_video_provider = post_view['videoExternalProvider'] + source_url = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['url'] % external_video_id + ie_key = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['ie_key'] title = post_view['title'] - description = post_view['description'] - view_count = str_to_int(post_view['externalView']) + description = post_view.get('description') + view_count = str_to_int(post_view.get('externalView')) thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w') return { '_type': 'url_transparent', - 'url': youtube_id, - 'ie_key': 'Youtube', + 'url': source_url, + 'ie_key': ie_key, 'id': video_id, 'display_id': display_id, 'title': title, diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py index a53e27b27..d440313d5 100644 --- a/youtube_dl/extractor/noco.py +++ b/youtube_dl/extractor/noco.py @@ -9,7 +9,7 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, + compat_urlparse, ) from ..utils import ( clean_html, @@ -17,6 +17,7 @@ from ..utils import ( int_or_none, float_or_none, parse_iso8601, + sanitized_Request, ) @@ -74,7 +75,7 @@ class NocoIE(InfoExtractor): 'username': username, 'password': password, } - request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) + request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') login = self._download_json(request, None, 'Logging in as %s' % username) @@ -82,14 +83,21 @@ class NocoIE(InfoExtractor): if 'erreur' in login: raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True) + @staticmethod + def _ts(): + return int(time.time() * 1000) + def _call_api(self, path, video_id, note, sub_lang=None): - ts = compat_str(int(time.time() * 1000)) + ts = compat_str(self._ts() + self._ts_offset) tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest() url = self._API_URL_TEMPLATE % (path, ts, tk) if sub_lang: url += self._SUB_LANG_TEMPLATE % sub_lang - resp = self._download_json(url, video_id, note) + request = sanitized_Request(url) + request.add_header('Referer', self._referer) + + resp = self._download_json(request, video_id, note) if isinstance(resp, dict) and resp.get('error'): self._raise_error(resp['error'], resp['description']) @@ -102,8 +110,22 @@ class NocoIE(InfoExtractor): expected=True) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + # Timestamp adjustment offset between server time and local time + # must be calculated in order to use timestamps closest to server's + # in all API requests (see https://github.com/rg3/youtube-dl/issues/7864) + webpage = self._download_webpage(url, video_id) + + player_url = self._search_regex( + r'(["\'])(?P<player>https?://noco\.tv/(?:[^/]+/)+NocoPlayer.+?\.swf.*?)\1', + webpage, 'noco player', group='player', + default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf') + + qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query) + ts = int_or_none(qs.get('ts', [None])[0]) + self._ts_offset = ts - self._ts() if ts else 0 + self._referer = player_url medias = self._call_api( 'shows/%s/medias' % video_id, @@ -155,8 +177,8 @@ class NocoIE(InfoExtractor): 'format_id': format_id_extended, 'width': int_or_none(fmt.get('res_width')), 'height': int_or_none(fmt.get('res_lines')), - 'abr': int_or_none(fmt.get('audiobitrate')), - 'vbr': int_or_none(fmt.get('videobitrate')), + 'abr': int_or_none(fmt.get('audiobitrate'), 1000), + 'vbr': int_or_none(fmt.get('videobitrate'), 1000), 'filesize': int_or_none(fmt.get('filesize')), 'format_note': qualities[format_id].get('quality_name'), 'quality': qualities[format_id].get('priority'), diff --git a/youtube_dl/extractor/nosvideo.py b/youtube_dl/extractor/nosvideo.py index f5ef856db..eab816e49 100644 --- a/youtube_dl/extractor/nosvideo.py +++ b/youtube_dl/extractor/nosvideo.py @@ -4,11 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, urlencode_postdata, xpath_text, xpath_with_ns, @@ -41,7 +39,7 @@ class NosVideoIE(InfoExtractor): 'op': 'download1', 'method_free': 'Continue to Video', } - req = compat_urllib_request.Request(url, urlencode_postdata(fields)) + req = sanitized_Request(url, urlencode_postdata(fields)) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, 'Downloading download page') diff --git a/youtube_dl/extractor/novamov.py b/youtube_dl/extractor/novamov.py index 04d779890..d68c1ad79 100644 --- a/youtube_dl/extractor/novamov.py +++ b/youtube_dl/extractor/novamov.py @@ -3,11 +3,13 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( ExtractorError, + NO_DEFAULT, + encode_dict, + sanitized_Request, + urlencode_postdata, ) @@ -15,15 +17,16 @@ class NovaMovIE(InfoExtractor): IE_NAME = 'novamov' IE_DESC = 'NovaMov' - _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})' + _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video|mobile/#/videos)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})' _VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'} _HOST = 'www.novamov.com' _FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>' - _FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";' + _FILEKEY_REGEX = r'flashvars\.filekey=(?P<filekey>"?[^"]+"?);' _TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>' _DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>' + _URL_TEMPLATE = 'http://%s/video/%s' _TEST = { 'url': 'http://www.novamov.com/video/4rurhn9x446jj', @@ -37,20 +40,50 @@ class NovaMovIE(InfoExtractor): 'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)' } - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - page = self._download_webpage( - 'http://%s/video/%s' % (self._HOST, video_id), video_id, 'Downloading video page') - - if re.search(self._FILE_DELETED_REGEX, page) is not None: + def _check_existence(self, webpage, video_id): + if re.search(self._FILE_DELETED_REGEX, webpage) is not None: raise ExtractorError('Video %s does not exist' % video_id, expected=True) - filekey = self._search_regex(self._FILEKEY_REGEX, page, 'filekey') - - title = self._html_search_regex(self._TITLE_REGEX, page, 'title', fatal=False) - description = self._html_search_regex(self._DESCRIPTION_REGEX, page, 'description', default='', fatal=False) + def _real_extract(self, url): + video_id = self._match_id(url) + + url = self._URL_TEMPLATE % (self._HOST, video_id) + + webpage = self._download_webpage( + url, video_id, 'Downloading video page') + + self._check_existence(webpage, video_id) + + def extract_filekey(default=NO_DEFAULT): + filekey = self._search_regex( + self._FILEKEY_REGEX, webpage, 'filekey', default=default) + if filekey is not default and (filekey[0] != '"' or filekey[-1] != '"'): + return self._search_regex( + r'var\s+%s\s*=\s*"([^"]+)"' % re.escape(filekey), webpage, 'filekey', default=default) + else: + return filekey + + filekey = extract_filekey(default=None) + + if not filekey: + fields = self._hidden_inputs(webpage) + post_url = self._search_regex( + r'<form[^>]+action=(["\'])(?P<url>.+?)\1', webpage, + 'post url', default=url, group='url') + if not post_url.startswith('http'): + post_url = compat_urlparse.urljoin(url, post_url) + request = sanitized_Request( + post_url, urlencode_postdata(encode_dict(fields))) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + request.add_header('Referer', post_url) + webpage = self._download_webpage( + request, video_id, 'Downloading continue to the video page') + self._check_existence(webpage, video_id) + + filekey = extract_filekey() + + title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title', fatal=False) + description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False) api_response = self._download_webpage( 'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id, @@ -69,3 +102,89 @@ class NovaMovIE(InfoExtractor): 'title': title, 'description': description } + + +class WholeCloudIE(NovaMovIE): + IE_NAME = 'wholecloud' + IE_DESC = 'WholeCloud' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': '(?:wholecloud\.net|movshare\.(?:net|sx|ag))'} + + _HOST = 'www.wholecloud.net' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>' + _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>' + + _TEST = { + 'url': 'http://www.wholecloud.net/video/559e28be54d96', + 'md5': 'abd31a2132947262c50429e1d16c1bfd', + 'info_dict': { + 'id': '559e28be54d96', + 'ext': 'flv', + 'title': 'dissapeared image', + 'description': 'optical illusion dissapeared image magic illusion', + } + } + + +class NowVideoIE(NovaMovIE): + IE_NAME = 'nowvideo' + IE_DESC = 'NowVideo' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'} + + _HOST = 'www.nowvideo.to' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<h4>([^<]+)</h4>' + _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' + + _TEST = { + 'url': 'http://www.nowvideo.sx/video/f1d6fce9a968b', + 'md5': '12c82cad4f2084881d8bc60ee29df092', + 'info_dict': { + 'id': 'f1d6fce9a968b', + 'ext': 'flv', + 'title': 'youtubedl test video BaWjenozKc', + 'description': 'Description', + }, + } + + +class VideoWeedIE(NovaMovIE): + IE_NAME = 'videoweed' + IE_DESC = 'VideoWeed' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'} + + _HOST = 'www.videoweed.es' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>' + _URL_TEMPLATE = 'http://%s/file/%s' + + _TEST = { + 'url': 'http://www.videoweed.es/file/b42178afbea14', + 'md5': 'abd31a2132947262c50429e1d16c1bfd', + 'info_dict': { + 'id': 'b42178afbea14', + 'ext': 'flv', + 'title': 'optical illusion dissapeared image magic illusion', + 'description': '' + }, + } + + +class CloudTimeIE(NovaMovIE): + IE_NAME = 'cloudtime' + IE_DESC = 'CloudTime' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'cloudtime\.to'} + + _HOST = 'www.cloudtime.to' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>' + + _TEST = None diff --git a/youtube_dl/extractor/nowness.py b/youtube_dl/extractor/nowness.py index 6b2f3f55a..446f5901c 100644 --- a/youtube_dl/extractor/nowness.py +++ b/youtube_dl/extractor/nowness.py @@ -1,64 +1,140 @@ # encoding: utf-8 from __future__ import unicode_literals -import re - -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .common import InfoExtractor -from ..utils import ExtractorError +from ..compat import compat_str +from ..utils import ( + ExtractorError, + sanitized_Request, +) + + +class NownessBaseIE(InfoExtractor): + def _extract_url_result(self, post): + if post['type'] == 'video': + for media in post['media']: + if media['type'] == 'video': + video_id = media['content'] + source = media['source'] + if source == 'brightcove': + player_code = self._download_webpage( + 'http://www.nowness.com/iframe?id=%s' % video_id, video_id, + note='Downloading player JavaScript', + errnote='Unable to download player JavaScript') + bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code) + if bc_url: + return self.url_result(bc_url, BrightcoveLegacyIE.ie_key()) + bc_url = BrightcoveNewIE._extract_url(player_code) + if bc_url: + return self.url_result(bc_url, BrightcoveNewIE.ie_key()) + raise ExtractorError('Could not find player definition') + elif source == 'vimeo': + return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo') + elif source == 'youtube': + return self.url_result(video_id, 'Youtube') + elif source == 'cinematique': + # youtube-dl currently doesn't support cinematique + # return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique') + pass + def _api_request(self, url, request_path): + display_id = self._match_id(url) + request = sanitized_Request( + 'http://api.nowness.com/api/' + request_path % display_id, + headers={ + 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us', + }) + return display_id, self._download_json(request, display_id) -class NownessIE(InfoExtractor): - _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])' - _TESTS = [ - { - 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation', - 'md5': '068bc0202558c2e391924cb8cc470676', - 'info_dict': { - 'id': '2520295746001', - 'ext': 'mp4', - 'title': 'Candor: The Art of Gesticulation', - 'description': 'Candor: The Art of Gesticulation', - 'thumbnail': 're:^https?://.*\.jpg', - 'uploader': 'Nowness', - } +class NownessIE(NownessBaseIE): + IE_NAME = 'nowness' + _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])' + _TESTS = [{ + 'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation', + 'md5': '068bc0202558c2e391924cb8cc470676', + 'info_dict': { + 'id': '2520295746001', + 'ext': 'mp4', + 'title': 'Candor: The Art of Gesticulation', + 'description': 'Candor: The Art of Gesticulation', + 'thumbnail': 're:^https?://.*\.jpg', + 'uploader': 'Nowness', }, - { - 'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr', - 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3', - 'info_dict': { - 'id': '3716354522001', - 'ext': 'mp4', - 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', - 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', - 'thumbnail': 're:^https?://.*\.jpg', - 'uploader': 'Nowness', - } + }, { + 'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr', + 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3', + 'info_dict': { + 'id': '3716354522001', + 'ext': 'mp4', + 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', + 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR', + 'thumbnail': 're:^https?://.*\.jpg', + 'uploader': 'Nowness', }, - ] + }, { + # vimeo + 'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut', + 'md5': '9a5a6a8edf806407e411296ab6bc2a49', + 'info_dict': { + 'id': '130020913', + 'ext': 'mp4', + 'title': 'Bleu, Blanc, Rouge - A Godard Supercut', + 'description': 'md5:f0ea5f1857dffca02dbd37875d742cec', + 'thumbnail': 're:^https?://.*\.jpg', + 'upload_date': '20150607', + 'uploader': 'Cinema Sem Lei', + 'uploader_id': 'cinemasemlei', + }, + }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('slug') + _, post = self._api_request(url, 'post/getBySlug/%s') + return self._extract_url_result(post) - webpage = self._download_webpage(url, video_id) - player_url = self._search_regex( - r'"([^"]+/content/issue-[0-9.]+.js)"', webpage, 'player URL') - real_id = self._search_regex( - r'\sdata-videoId="([0-9]+)"', webpage, 'internal video ID') - player_code = self._download_webpage( - player_url, video_id, - note='Downloading player JavaScript', - errnote='Player download failed') - player_code = player_code.replace("'+d+'", real_id) +class NownessPlaylistIE(NownessBaseIE): + IE_NAME = 'nowness:playlist' + _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)' + _TEST = { + 'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues', + 'info_dict': { + 'id': '3286', + }, + 'playlist_mincount': 8, + } - bc_url = BrightcoveIE._extract_brightcove_url(player_code) - if bc_url is None: - raise ExtractorError('Could not find player definition') - return { - '_type': 'url', - 'url': bc_url, - 'ie_key': 'Brightcove', - } + def _real_extract(self, url): + playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s') + entries = [self._extract_url_result(item) for item in playlist['items']] + return self.playlist_result(entries, playlist_id) + + +class NownessSeriesIE(NownessBaseIE): + IE_NAME = 'nowness:series' + _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])' + _TEST = { + 'url': 'https://www.nowness.com/series/60-seconds', + 'info_dict': { + 'id': '60', + 'title': '60 Seconds', + 'description': 'One-minute wisdom in a new NOWNESS series', + }, + 'playlist_mincount': 4, + } + + def _real_extract(self, url): + display_id, series = self._api_request(url, 'series/getBySlug/%s') + entries = [self._extract_url_result(post) for post in series['posts']] + series_title = None + series_description = None + translations = series.get('translations', []) + if translations: + series_title = translations[0].get('title') or translations[0]['seoTitle'] + series_description = translations[0].get('seoDescription') + return self.playlist_result( + entries, compat_str(series['id']), series_title, series_description) diff --git a/youtube_dl/extractor/nowtv.py b/youtube_dl/extractor/nowtv.py index 0b5ff4760..fd107aca2 100644 --- a/youtube_dl/extractor/nowtv.py +++ b/youtube_dl/extractor/nowtv.py @@ -7,6 +7,7 @@ from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, + determine_ext, int_or_none, parse_iso8601, parse_duration, @@ -14,8 +15,63 @@ from ..utils import ( ) -class NowTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?nowtv\.de/(?P<station>rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/player' +class NowTVBaseIE(InfoExtractor): + _VIDEO_FIELDS = ( + 'id', 'title', 'free', 'geoblocked', 'articleLong', 'articleShort', + 'broadcastStartDate', 'seoUrl', 'duration', 'files', + 'format.defaultImage169Format', 'format.defaultImage169Logo') + + def _extract_video(self, info, display_id=None): + video_id = compat_str(info['id']) + + files = info['files'] + if not files: + if info.get('geoblocked', False): + raise ExtractorError( + 'Video %s is not available from your location due to geo restriction' % video_id, + expected=True) + if not info.get('free', True): + raise ExtractorError( + 'Video %s is not available for free' % video_id, expected=True) + + formats = [] + for item in files['items']: + if determine_ext(item['path']) != 'f4v': + continue + app, play_path = remove_start(item['path'], '/').split('/', 1) + formats.append({ + 'url': 'rtmpe://fms.rtl.de', + 'app': app, + 'play_path': 'mp4:%s' % play_path, + 'ext': 'flv', + 'page_url': 'http://rtlnow.rtl.de', + 'player_url': 'http://cdn.static-fra.de/now/vodplayer.swf', + 'tbr': int_or_none(item.get('bitrate')), + }) + self._sort_formats(formats) + + title = info['title'] + description = info.get('articleLong') or info.get('articleShort') + timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ') + duration = parse_duration(info.get('duration')) + + f = info.get('format', {}) + thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo') + + return { + 'id': video_id, + 'display_id': display_id or info.get('seoUrl'), + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'duration': duration, + 'formats': formats, + } + + +class NowTVIE(NowTVBaseIE): + _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/(?:(?:list/[^/]+|jahr/\d{4}/\d{1,2})/)?(?P<id>[^/]+)/(?:player|preview)' _TESTS = [{ # rtl @@ -23,8 +79,8 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '203519', 'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit', - 'ext': 'mp4', - 'title': 'Die neuen Bauern und eine Hochzeit', + 'ext': 'flv', + 'title': 'Inka Bause stellt die neuen Bauern vor', 'description': 'md5:e234e1ed6d63cf06be5c070442612e7e', 'thumbnail': 're:^https?://.*\.jpg$', 'timestamp': 1432580700, @@ -32,7 +88,7 @@ class NowTVIE(InfoExtractor): 'duration': 2786, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, }, { @@ -41,7 +97,7 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '203481', 'display_id': 'berlin-tag-nacht/berlin-tag-nacht-folge-934', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Berlin - Tag & Nacht (Folge 934)', 'description': 'md5:c85e88c2e36c552dfe63433bc9506dd0', 'thumbnail': 're:^https?://.*\.jpg$', @@ -50,7 +106,7 @@ class NowTVIE(InfoExtractor): 'duration': 2641, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, }, { @@ -59,7 +115,7 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '165780', 'display_id': 'alarm-fuer-cobra-11-die-autobahnpolizei/hals-und-beinbruch-2014-08-23-21-10-00', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Hals- und Beinbruch', 'description': 'md5:b50d248efffe244e6f56737f0911ca57', 'thumbnail': 're:^https?://.*\.jpg$', @@ -68,7 +124,7 @@ class NowTVIE(InfoExtractor): 'duration': 2742, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, }, { @@ -77,7 +133,7 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '99205', 'display_id': 'medicopter-117/angst', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Angst!', 'description': 'md5:30cbc4c0b73ec98bcd73c9f2a8c17c4e', 'thumbnail': 're:^https?://.*\.jpg$', @@ -86,7 +142,7 @@ class NowTVIE(InfoExtractor): 'duration': 3025, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, }, { @@ -95,7 +151,7 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '203521', 'display_id': 'ratgeber-geld/thema-ua-der-erste-blick-die-apple-watch', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Thema u.a.: Der erste Blick: Die Apple Watch', 'description': 'md5:4312b6c9d839ffe7d8caf03865a531af', 'thumbnail': 're:^https?://.*\.jpg$', @@ -104,7 +160,7 @@ class NowTVIE(InfoExtractor): 'duration': 1083, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, }, { @@ -113,7 +169,7 @@ class NowTVIE(InfoExtractor): 'info_dict': { 'id': '128953', 'display_id': 'der-hundeprofi/buero-fall-chihuahua-joel', - 'ext': 'mp4', + 'ext': 'flv', 'title': "Büro-Fall / Chihuahua 'Joel'", 'description': 'md5:e62cb6bf7c3cc669179d4f1eb279ad8d', 'thumbnail': 're:^https?://.*\.jpg$', @@ -122,71 +178,83 @@ class NowTVIE(InfoExtractor): 'duration': 3092, }, 'params': { - # m3u8 download + # rtmp download 'skip_download': True, }, + }, { + 'url': 'http://www.nowtv.de/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview', + 'only_matching': True, + }, { + 'url': 'http://www.nowtv.at/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview?return=/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit', + 'only_matching': True, + }, { + 'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player', + 'only_matching': True, + }, { + 'url': 'http://www.nowtv.de/rtl2/zuhause-im-glueck/jahr/2015/11/eine-erschuetternde-diagnose/player', + 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - display_id = mobj.group('id') - station = mobj.group('station') + display_id = '%s/%s' % (mobj.group('show_id'), mobj.group('id')) info = self._download_json( - 'https://api.nowtv.de/v3/movies/%s?fields=id,title,free,geoblocked,articleLong,articleShort,broadcastStartDate,seoUrl,duration,format,files' % display_id, - display_id) + 'https://api.nowtv.de/v3/movies/%s?fields=%s' + % (display_id, ','.join(self._VIDEO_FIELDS)), display_id) - video_id = compat_str(info['id']) + return self._extract_video(info, display_id) - files = info['files'] - if not files: - if info.get('geoblocked', False): - raise ExtractorError( - 'Video %s is not available from your location due to geo restriction' % video_id, - expected=True) - if not info.get('free', True): - raise ExtractorError( - 'Video %s is not available for free' % video_id, expected=True) - f = info.get('format', {}) - station = f.get('station') or station - - STATIONS = { - 'rtl': 'rtlnow', - 'rtl2': 'rtl2now', - 'vox': 'voxnow', - 'nitro': 'rtlnitronow', - 'ntv': 'n-tvnow', - 'superrtl': 'superrtlnow' - } +class NowTVListIE(NowTVBaseIE): + _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/list/(?P<id>[^?/#&]+)$' - formats = [] - for item in files['items']: - item_path = remove_start(item['path'], '/') - tbr = int_or_none(item['bitrate']) - m3u8_url = 'http://hls.fra.%s.de/hls-vod-enc/%s.m3u8' % (STATIONS[station], item_path) - m3u8_url = m3u8_url.replace('now/', 'now/videos/') - formats.append({ - 'url': m3u8_url, - 'format_id': '%s-%sk' % (item['id'], tbr), - 'ext': 'mp4', - 'tbr': tbr, - }) - self._sort_formats(formats) + _SHOW_FIELDS = ('title', ) + _SEASON_FIELDS = ('id', 'headline', 'seoheadline', ) - title = info['title'] - description = info.get('articleLong') or info.get('articleShort') - timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ') - duration = parse_duration(info.get('duration')) - thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo') + _TESTS = [{ + 'url': 'http://www.nowtv.at/rtl/stern-tv/list/aktuell', + 'info_dict': { + 'id': '17006', + 'title': 'stern TV - Aktuell', + }, + 'playlist_count': 1, + }, { + 'url': 'http://www.nowtv.at/rtl/das-supertalent/list/free-staffel-8', + 'info_dict': { + 'id': '20716', + 'title': 'Das Supertalent - FREE Staffel 8', + }, + 'playlist_count': 14, + }] - return { - 'id': video_id, - 'display_id': display_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'timestamp': timestamp, - 'duration': duration, - 'formats': formats, - } + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + show_id = mobj.group('show_id') + season_id = mobj.group('id') + + fields = [] + fields.extend(self._SHOW_FIELDS) + fields.extend('formatTabs.%s' % field for field in self._SEASON_FIELDS) + fields.extend( + 'formatTabs.formatTabPages.container.movies.%s' % field + for field in self._VIDEO_FIELDS) + + list_info = self._download_json( + 'https://api.nowtv.de/v3/formats/seo?fields=%s&name=%s.php' + % (','.join(fields), show_id), + season_id) + + season = next( + season for season in list_info['formatTabs']['items'] + if season.get('seoheadline') == season_id) + + title = '%s - %s' % (list_info['title'], season['headline']) + + entries = [] + for container in season['formatTabPages']['items']: + for info in ((container.get('container') or {}).get('movies') or {}).get('items') or []: + entries.append(self._extract_video(info)) + + return self.playlist_result( + entries, compat_str(season.get('id') or season_id), title) diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py deleted file mode 100644 index dec09cdfe..000000000 --- a/youtube_dl/extractor/nowvideo.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class NowVideoIE(NovaMovIE): - IE_NAME = 'nowvideo' - IE_DESC = 'NowVideo' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co|li)'} - - _HOST = 'www.nowvideo.ch' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _FILEKEY_REGEX = r'var fkzd="([^"]+)";' - _TITLE_REGEX = r'<h4>([^<]+)</h4>' - _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' - - _TEST = { - 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa', - 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817', - 'info_dict': { - 'id': '0mw0yow7b6dxa', - 'ext': 'flv', - 'title': 'youtubedl test video _BaW_jenozKc.mp4', - 'description': 'Description', - } - } diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 0c2d02c10..eb12fb810 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -407,6 +407,7 @@ class NPORadioFragmentIE(InfoExtractor): class VPROIE(NPOIE): + IE_NAME = 'vpro' _VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html' _TESTS = [ diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index d066a96db..6ff13050d 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -4,7 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import compat_urlparse from ..utils import ( + determine_ext, ExtractorError, float_or_none, parse_duration, @@ -47,12 +49,22 @@ class NRKIE(InfoExtractor): 'http://v8.psapi.nrk.no/mediaelement/%s' % video_id, video_id, 'Downloading media JSON') - if data['usageRights']['isGeoBlocked']: - raise ExtractorError( - 'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge', - expected=True) + media_url = data.get('mediaUrl') - video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81' + if not media_url: + if data['usageRights']['isGeoBlocked']: + raise ExtractorError( + 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge', + expected=True) + + if determine_ext(media_url) == 'f4m': + formats = self._extract_f4m_formats( + media_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id, f4m_id='hds') + else: + formats = [{ + 'url': media_url, + 'ext': 'flv', + }] duration = parse_duration(data.get('duration')) @@ -66,12 +78,11 @@ class NRKIE(InfoExtractor): return { 'id': video_id, - 'url': video_url, - 'ext': 'flv', 'title': data['title'], 'description': data['description'], 'duration': duration, 'thumbnail': thumbnail, + 'formats': formats, } @@ -196,20 +207,6 @@ class NRKTVIE(InfoExtractor): } ] - def _debug_print(self, txt): - if self._downloader.params.get('verbose', False): - self.to_screen('[debug] %s' % txt) - - def _get_subtitles(self, subtitlesurl, video_id, baseurl): - url = "%s%s" % (baseurl, subtitlesurl) - self._debug_print('%s: Subtitle url: %s' % (video_id, url)) - captions = self._download_xml( - url, video_id, 'Downloading subtitles') - lang = captions.get('lang', 'no') - return {lang: [ - {'ext': 'ttml', 'url': url}, - ]} - def _extract_f4m(self, manifest_url, video_id): return self._extract_f4m_formats( manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id, f4m_id='hds') @@ -218,7 +215,7 @@ class NRKTVIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') part_id = mobj.group('part_id') - baseurl = mobj.group('baseurl') + base_url = mobj.group('baseurl') webpage = self._download_webpage(url, video_id) @@ -278,11 +275,14 @@ class NRKTVIE(InfoExtractor): self._sort_formats(formats) subtitles_url = self._html_search_regex( - r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"', - webpage, 'subtitle URL', default=None) - subtitles = None + r'data-subtitlesurl\s*=\s*(["\'])(?P<url>.+?)\1', + webpage, 'subtitle URL', default=None, group='url') + subtitles = {} if subtitles_url: - subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl) + subtitles['no'] = [{ + 'ext': 'ttml', + 'url': compat_urlparse.urljoin(base_url, subtitles_url), + }] return { 'id': video_id, diff --git a/youtube_dl/extractor/nuvid.py b/youtube_dl/extractor/nuvid.py index 57928f2ae..9fa7cefad 100644 --- a/youtube_dl/extractor/nuvid.py +++ b/youtube_dl/extractor/nuvid.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_duration, + sanitized_Request, unified_strdate, ) @@ -33,7 +31,7 @@ class NuvidIE(InfoExtractor): formats = [] for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]: - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://m.nuvid.com/play/%s' % video_id) request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed) webpage = self._download_webpage( diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py index 215ffe87b..184c7a323 100644 --- a/youtube_dl/extractor/odnoklassniki.py +++ b/youtube_dl/extractor/odnoklassniki.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote from ..utils import ( + ExtractorError, unified_strdate, int_or_none, qualities, @@ -12,20 +13,23 @@ from ..utils import ( class OdnoklassnikiIE(InfoExtractor): - _VALID_URL = r'https?://(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)' + _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)' _TESTS = [{ # metadata in JSON 'url': 'http://ok.ru/video/20079905452', - 'md5': '8e24ad2da6f387948e7a7d44eb8668fe', + 'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc', 'info_dict': { 'id': '20079905452', 'ext': 'mp4', 'title': 'Культура меняет нас (прекрасный ролик!))', 'duration': 100, + 'upload_date': '20141207', 'uploader_id': '330537914540', 'uploader': 'Виталий Добровольский', 'like_count': int, + 'age_limit': 0, }, + 'skip': 'Video has been blocked', }, { # metadataUrl 'url': 'http://ok.ru/video/63567059965189-0', @@ -35,13 +39,36 @@ class OdnoklassnikiIE(InfoExtractor): 'ext': 'mp4', 'title': 'Девушка без комплексов ...', 'duration': 191, + 'upload_date': '20150518', 'uploader_id': '534380003155', - 'uploader': 'Андрей Мещанинов', + 'uploader': '☭ Андрей Мещанинов ☭', 'like_count': int, + 'age_limit': 0, + }, + }, { + # YouTube embed (metadataUrl, provider == USER_YOUTUBE) + 'url': 'http://ok.ru/video/64211978996595-1', + 'md5': '5d7475d428845cd2e13bae6f1a992278', + 'info_dict': { + 'id': '64211978996595-1', + 'ext': 'mp4', + 'title': 'Космическая среда от 26 августа 2015', + 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0', + 'duration': 440, + 'upload_date': '20150826', + 'uploader_id': '750099571', + 'uploader': 'Алина П', + 'age_limit': 0, }, }, { 'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452', 'only_matching': True, + }, { + 'url': 'http://www.ok.ru/video/20648036891', + 'only_matching': True, + }, { + 'url': 'http://www.ok.ru/videoembed/20648036891', + 'only_matching': True, }] def _real_extract(self, url): @@ -50,9 +77,16 @@ class OdnoklassnikiIE(InfoExtractor): webpage = self._download_webpage( 'http://ok.ru/video/%s' % video_id, video_id) + error = self._search_regex( + r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<', + webpage, 'error', default=None) + if error: + raise ExtractorError(error, expected=True) + player = self._parse_json( unescapeHTML(self._search_regex( - r'data-attributes="([^"]+)"', webpage, 'player')), + r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id, + webpage, 'player', group='player')), video_id) flashvars = player['flashvars'] @@ -85,16 +119,7 @@ class OdnoklassnikiIE(InfoExtractor): like_count = int_or_none(metadata.get('likeCount')) - quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd')) - - formats = [{ - 'url': f['url'], - 'ext': 'mp4', - 'format_id': f['name'], - 'quality': quality(f['name']), - } for f in metadata['videos']] - - return { + info = { 'id': video_id, 'title': title, 'thumbnail': thumbnail, @@ -104,5 +129,24 @@ class OdnoklassnikiIE(InfoExtractor): 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': age_limit, - 'formats': formats, } + + if metadata.get('provider') == 'USER_YOUTUBE': + info.update({ + '_type': 'url_transparent', + 'url': movie['contentId'], + }) + return info + + quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd')) + + formats = [{ + 'url': f['url'], + 'ext': 'mp4', + 'format_id': f['name'], + 'quality': quality(f['name']), + } for f in metadata['videos']] + self._sort_formats(formats) + + info['formats'] = formats + return info diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py index a262a9f6d..8603fd692 100644 --- a/youtube_dl/extractor/ooyala.py +++ b/youtube_dl/extractor/ooyala.py @@ -1,108 +1,78 @@ from __future__ import unicode_literals import re -import json import base64 from .common import InfoExtractor from ..utils import ( - unescapeHTML, - ExtractorError, - determine_ext, int_or_none, + float_or_none, + ExtractorError, + unsmuggle_url, ) +from ..compat import compat_urllib_parse class OoyalaBaseIE(InfoExtractor): - def _extract_result(self, info, more_info): - embedCode = info['embedCode'] - video_url = info.get('ipad_url') or info['url'] - - if determine_ext(video_url) == 'm3u8': - formats = self._extract_m3u8_formats(video_url, embedCode, ext='mp4') - else: - formats = [{ - 'url': video_url, - 'ext': 'mp4', - }] - - return { - 'id': embedCode, - 'title': unescapeHTML(info['title']), - 'formats': formats, - 'description': unescapeHTML(more_info['description']), - 'thumbnail': more_info['promo'], + def _extract(self, content_tree_url, video_id, domain='example.org'): + content_tree = self._download_json(content_tree_url, video_id)['content_tree'] + metadata = content_tree[list(content_tree)[0]] + embed_code = metadata['embed_code'] + pcode = metadata.get('asset_pcode') or embed_code + video_info = { + 'id': embed_code, + 'title': metadata['title'], + 'description': metadata.get('description'), + 'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'), + 'duration': float_or_none(metadata.get('duration'), 1000), } - def _extract(self, player_url, video_id): - player = self._download_webpage(player_url, video_id) - mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="', - player, 'mobile player url') - # Looks like some videos are only available for particular devices - # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0 - # is only available for ipad) - # Working around with fetching URLs for all the devices found starting with 'unknown' - # until we succeed or eventually fail for each device. - devices = re.findall(r'device\s*=\s*"([^"]+)";', player) - devices.remove('unknown') - devices.insert(0, 'unknown') - for device in devices: - mobile_player = self._download_webpage( - '%s&device=%s' % (mobile_url, device), video_id, - 'Downloading mobile player JS for %s device' % device) - videos_info = self._search_regex( - r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);', - mobile_player, 'info', fatal=False, default=None) - if videos_info: - break - - if not videos_info: - formats = [] + urls = [] + formats = [] + for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'): auth_data = self._download_json( - 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?domain=www.example.org&supportedFormats=mp4,webm' % (video_id, video_id), - video_id) - - cur_auth_data = auth_data['authorization_data'][video_id] - - for stream in cur_auth_data['streams']: - formats.append({ - 'url': base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8'), - 'ext': stream.get('delivery_type'), - 'format': stream.get('video_codec'), - 'format_id': stream.get('profile'), - 'width': int_or_none(stream.get('width')), - 'height': int_or_none(stream.get('height')), - 'abr': int_or_none(stream.get('audio_bitrate')), - 'vbr': int_or_none(stream.get('video_bitrate')), - }) - if formats: - return { - 'id': video_id, - 'formats': formats, - 'title': 'Ooyala video', - } - - if not cur_auth_data['authorized']: - raise ExtractorError(cur_auth_data['message'], expected=True) - - if not videos_info: - raise ExtractorError('Unable to extract info') - videos_info = videos_info.replace('\\"', '"') - videos_more_info = self._search_regex( - r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"') - videos_info = json.loads(videos_info) - videos_more_info = json.loads(videos_more_info) - - if videos_more_info.get('lineup'): - videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])] - return { - '_type': 'playlist', - 'id': video_id, - 'title': unescapeHTML(videos_more_info['title']), - 'entries': videos, - } - else: - return self._extract_result(videos_info[0], videos_more_info) + 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?' % (pcode, embed_code) + compat_urllib_parse.urlencode({'domain': domain, 'supportedFormats': supported_format}), + video_id, 'Downloading %s JSON' % supported_format) + + cur_auth_data = auth_data['authorization_data'][embed_code] + + if cur_auth_data['authorized']: + for stream in cur_auth_data['streams']: + url = base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8') + if url in urls: + continue + urls.append(url) + delivery_type = stream['delivery_type'] + if delivery_type == 'hls' or '.m3u8' in url: + m3u8_formats = self._extract_m3u8_formats(url, embed_code, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif delivery_type == 'hds' or '.f4m' in url: + f4m_formats = self._extract_f4m_formats(url, embed_code, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + elif '.smil' in url: + smil_formats = self._extract_smil_formats(url, embed_code, fatal=False) + if smil_formats: + formats.extend(smil_formats) + else: + formats.append({ + 'url': url, + 'ext': stream.get('delivery_type'), + 'vcodec': stream.get('video_codec'), + 'format_id': delivery_type, + 'width': int_or_none(stream.get('width')), + 'height': int_or_none(stream.get('height')), + 'abr': int_or_none(stream.get('audio_bitrate')), + 'vbr': int_or_none(stream.get('video_bitrate')), + 'fps': float_or_none(stream.get('framerate')), + }) + else: + raise ExtractorError('%s said: %s' % (self.IE_NAME, cur_auth_data['message']), expected=True) + self._sort_formats(formats) + + video_info['formats'] = formats + return video_info class OoyalaIE(OoyalaBaseIE): @@ -117,6 +87,7 @@ class OoyalaIE(OoyalaBaseIE): 'ext': 'mp4', 'title': 'Explaining Data Recovery from Hard Drives and SSDs', 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.', + 'duration': 853.386, }, }, { # Only available for ipad @@ -125,7 +96,7 @@ class OoyalaIE(OoyalaBaseIE): 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', 'ext': 'mp4', 'title': 'Simulation Overview - Levels of Simulation', - 'description': '', + 'duration': 194.948, }, }, { @@ -136,7 +107,8 @@ class OoyalaIE(OoyalaBaseIE): 'info_dict': { 'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx', 'ext': 'mp4', - 'title': 'Ooyala video', + 'title': 'Divide Tool Path.mp4', + 'duration': 204.405, } } ] @@ -151,9 +123,11 @@ class OoyalaIE(OoyalaBaseIE): ie=cls.ie_key()) def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) embed_code = self._match_id(url) - player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code - return self._extract(player_url, embed_code) + domain = smuggled_data.get('domain') + content_tree_url = 'http://player.ooyala.com/player_api/v1/content_tree/embed_code/%s/%s' % (embed_code, embed_code) + return self._extract(content_tree_url, embed_code, domain) class OoyalaExternalIE(OoyalaBaseIE): @@ -170,7 +144,7 @@ class OoyalaExternalIE(OoyalaBaseIE): .*?&pcode= ) (?P<pcode>.+?) - (&|$) + (?:&|$) ''' _TEST = { @@ -179,7 +153,7 @@ class OoyalaExternalIE(OoyalaBaseIE): 'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG', 'ext': 'mp4', 'title': 'dm_140128_30for30Shorts___JudgingJewellv2', - 'description': '', + 'duration': 1302000, }, 'params': { # m3u8 download @@ -188,9 +162,6 @@ class OoyalaExternalIE(OoyalaBaseIE): } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - partner_id = mobj.group('partner_id') - video_id = mobj.group('id') - pcode = mobj.group('pcode') - player_url = 'http://player.ooyala.com/player.js?externalId=%s:%s&pcode=%s' % (partner_id, video_id, pcode) - return self._extract(player_url, video_id) + partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups() + content_tree_url = 'http://player.ooyala.com/player_api/v1/content_tree/external_id/%s/%s:%s' % (pcode, partner_id, video_id) + return self._extract(content_tree_url, video_id) diff --git a/youtube_dl/extractor/openfilm.py b/youtube_dl/extractor/openfilm.py deleted file mode 100644 index d2ceedd01..000000000 --- a/youtube_dl/extractor/openfilm.py +++ /dev/null @@ -1,70 +0,0 @@ -from __future__ import unicode_literals - -import json - -from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote_plus -from ..utils import ( - parse_iso8601, - parse_age_limit, - int_or_none, -) - - -class OpenFilmIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)openfilm\.com/videos/(?P<id>.+)' - _TEST = { - 'url': 'http://www.openfilm.com/videos/human-resources-remastered', - 'md5': '42bcd88c2f3ec13b65edf0f8ad1cac37', - 'info_dict': { - 'id': '32736', - 'display_id': 'human-resources-remastered', - 'ext': 'mp4', - 'title': 'Human Resources (Remastered)', - 'description': 'Social Engineering in the 20th Century.', - 'thumbnail': 're:^https?://.*\.jpg$', - 'duration': 7164, - 'timestamp': 1334756988, - 'upload_date': '20120418', - 'uploader_id': '41117', - 'view_count': int, - 'age_limit': 0, - }, - } - - def _real_extract(self, url): - display_id = self._match_id(url) - - webpage = self._download_webpage(url, display_id) - - player = compat_urllib_parse_unquote_plus( - self._og_search_video_url(webpage)) - - video = json.loads(self._search_regex( - r'\bp=({.+?})(?:&|$)', player, 'video JSON')) - - video_url = '%s1.mp4' % video['location'] - video_id = video.get('video_id') - display_id = video.get('alias') or display_id - title = video.get('title') - description = video.get('description') - thumbnail = video.get('main_thumb') - duration = int_or_none(video.get('duration')) - timestamp = parse_iso8601(video.get('dt_published'), ' ') - uploader_id = video.get('user_id') - view_count = int_or_none(video.get('views_count')) - age_limit = parse_age_limit(video.get('age_limit')) - - return { - 'id': video_id, - 'display_id': display_id, - 'url': video_url, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'duration': duration, - 'timestamp': timestamp, - 'uploader_id': uploader_id, - 'view_count': view_count, - 'age_limit': age_limit, - } diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py index 6cdc2638b..ec8876c28 100644 --- a/youtube_dl/extractor/patreon.py +++ b/youtube_dl/extractor/patreon.py @@ -2,9 +2,7 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import ( - js_to_json, -) +from ..utils import js_to_json class PatreonIE(InfoExtractor): @@ -65,7 +63,7 @@ class PatreonIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://www.patreon.com/processLogin', compat_urllib_parse.urlencode(login_form).encode('utf-8') ) diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py index a53479aad..744e4a09a 100644 --- a/youtube_dl/extractor/pbs.py +++ b/youtube_dl/extractor/pbs.py @@ -8,22 +8,188 @@ from ..utils import ( ExtractorError, determine_ext, int_or_none, + strip_jsonp, unified_strdate, US_RATINGS, ) class PBSIE(InfoExtractor): + _STATIONS = ( + (r'(?:video|www)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/ + (r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/ + (r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/ + (r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org + (r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org + (r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/ + (r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org + (r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org + (r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/ + (r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm + # (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/ + # (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/ + # (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/ + (r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org + (r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/ + (r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/ + (r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/ + (r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/ + (r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/ + (r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/ + (r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv + (r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/ + (r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/ + (r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org + (r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/ + (r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/ + (r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org + (r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org + (r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/ + (r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/ + (r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org + (r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/ + (r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org + # (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org + # (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org + # (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org + (r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org + (r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org + (r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org + (r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org + (r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/ + (r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/ + (r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org + (r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org + (r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org + (r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/ + # (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/ + (r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/ + (r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org + (r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org + (r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org + (r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/ + (r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net + (r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org + (r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org + (r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/ + # (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org + (r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org + (r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org + (r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org + (r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/ + (r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/ + (r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/ + (r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org + (r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/ + # (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/ + (r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/ + (r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org + (r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/ + (r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org + (r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org + (r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/ + (r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv + (r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/ + # (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/ + (r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/ + (r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org + (r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/ + (r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org + (r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org + (r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/ + (r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/ + (r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/ + (r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/ + (r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net + (r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org + (r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org + # (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/ + (r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org + (r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/ + (r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org + (r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org + (r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org + (r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/ + (r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org + (r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org + (r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org + (r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org + (r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/ + (r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/ + (r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org + # (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org + # (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/ + # (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/ + (r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org + (r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org + (r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/ + (r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/ + (r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5 + (r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/ + (r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org + # (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org + (r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/ + (r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/ + (r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/ + (r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/ + (r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org + (r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org + (r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/ + (r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/ + (r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org + (r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/ + (r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org + (r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/ + (r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu + (r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/ + (r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org + (r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org + # (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/ + (r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/ + (r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org + (r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org + (r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/ + (r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org + (r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org + (r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/ + (r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org + (r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org + (r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org + (r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org + # (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org + (r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/ + (r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/ + # (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org + (r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/ + (r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/ + (r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/ + (r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org + (r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/ + # (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu + # (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org + (r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org + (r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org + # (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org + # (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org + # (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org + (r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/ + (r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/ + (r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org + ) + + IE_NAME = 'pbs' + IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1]) + _VALID_URL = r'''(?x)https?:// (?: # Direct video URL - video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? | + (?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? | # Article with embedded player (or direct video) (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) | # Player - video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/ + (?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/ ) - ''' + ''' % '|'.join(list(zip(*_STATIONS))[0]) _TESTS = [ { @@ -92,6 +258,7 @@ class PBSIE(InfoExtractor): 'duration': 3172, 'thumbnail': 're:^https?://.*\.jpg$', 'upload_date': '20140122', + 'age_limit': 10, }, 'params': { 'skip_download': True, # requires ffmpeg @@ -107,12 +274,12 @@ class PBSIE(InfoExtractor): { 'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/', 'info_dict': { - 'id': '2280706814', + 'id': '2276541483', 'display_id': 'player', 'ext': 'mp4', - 'title': 'American Experience - Death and the Civil War', + 'title': 'American Experience - Death and the Civil War, Chapter 1', 'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.', - 'duration': 6705, + 'duration': 682, 'thumbnail': 're:^https?://.*\.jpg$', }, 'params': { @@ -133,8 +300,57 @@ class PBSIE(InfoExtractor): 'params': { 'skip_download': True, # requires ffmpeg }, + 'skip': 'Expired', + }, + { + # Video embedded in iframe containing angle brackets as attribute's value (e.g. + # "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see + # https://github.com/rg3/youtube-dl/issues/7059) + 'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/', + 'info_dict': { + 'id': '2365546844', + 'display_id': 'a-chefs-life-season-3-episode-5-prickly-business', + 'ext': 'mp4', + 'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business", + 'description': 'md5:61db2ddf27c9912f09c241014b118ed1', + 'duration': 1480, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, + }, + { + # Frontline video embedded via flp2012.js + 'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists', + 'info_dict': { + 'id': '2070868960', + 'display_id': 'the-atomic-artists', + 'ext': 'mp4', + 'title': 'FRONTLINE - The Atomic Artists', + 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e', + 'duration': 723, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, + }, + { + 'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true', + 'only_matching': True, + }, + { + 'url': 'http://watch.knpb.org/video/2365616055/', + 'only_matching': True, } ] + _ERRORS = { + 101: 'We\'re sorry, but this video is not yet available.', + 403: 'We\'re sorry, but this video is not available in your region due to right restrictions.', + 404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.', + 410: 'This video has expired and is no longer available for online streaming.', + } def _extract_webpage(self, url): mobj = re.match(self._VALID_URL, url) @@ -157,6 +373,7 @@ class PBSIE(InfoExtractor): MEDIA_ID_REGEXES = [ r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed r'class="coveplayerid">([^<]+)<', # coveplayer + r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/ r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer ] @@ -165,9 +382,30 @@ class PBSIE(InfoExtractor): if media_id: return media_id, presumptive_id, upload_date - url = self._search_regex( - r'<iframe\s+[^>]*\s+src=["\']([^\'"]+partnerplayer[^\'"]+)["\']', - webpage, 'player URL') + # Fronline video embedded via flp + video_id = self._search_regex( + r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None) + if video_id: + # pkg_id calculation is reverse engineered from + # http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js + prg_id = self._search_regex( + r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:] + if 'q' in prg_id: + prg_id = prg_id.split('q')[1] + prg_id = int(prg_id, 16) + getdir = self._download_json( + 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id, + presumptive_id, 'Downloading getdir JSON', + transform_source=strip_jsonp) + return getdir['mid'], presumptive_id, upload_date + + for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage): + url = self._search_regex( + r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe, + 'player URL', default=None, group='url') + if url: + break + mobj = re.match(self._VALID_URL, url) player_id = mobj.group('player_id') @@ -195,7 +433,7 @@ class PBSIE(InfoExtractor): return self.playlist_result(entries, display_id) info = self._download_json( - 'http://video.pbs.org/videoInfo/%s?format=json&type=partner' % video_id, + 'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id, display_id) formats = [] @@ -212,13 +450,11 @@ class PBSIE(InfoExtractor): 'Downloading %s video url info' % encoding_name) if redirect_info['status'] == 'error': - if redirect_info['http_code'] == 403: - message = ( - 'The video is not available in your region due to ' - 'right restrictions') - else: - message = redirect_info['message'] - raise ExtractorError(message, expected=True) + raise ExtractorError( + '%s said: %s' % ( + self.IE_NAME, + self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])), + expected=True) format_url = redirect_info.get('url') if not format_url: diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py new file mode 100644 index 000000000..63cc764bb --- /dev/null +++ b/youtube_dl/extractor/periscope.py @@ -0,0 +1,82 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import parse_iso8601 + + +class PeriscopeIE(InfoExtractor): + IE_DESC = 'Periscope' + _VALID_URL = r'https?://(?:www\.)?periscope\.tv/[^/]+/(?P<id>[^/?#]+)' + # Alive example URLs can be found here http://onperiscope.com/ + _TESTS = [{ + 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==', + 'md5': '65b57957972e503fcbbaeed8f4fa04ca', + 'info_dict': { + 'id': '56102209', + 'ext': 'mp4', + 'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗', + 'timestamp': 1438978559, + 'upload_date': '20150807', + 'uploader': 'Bec Boop', + 'uploader_id': '1465763', + }, + 'skip': 'Expires in 24 hours', + }, { + 'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv', + 'only_matching': True, + }, { + 'url': 'https://www.periscope.tv/bastaakanoggano/1OdKrlkZZjOJX', + 'only_matching': True, + }] + + def _call_api(self, method, value): + attribute = 'token' if len(value) > 13 else 'broadcast_id' + return self._download_json( + 'https://api.periscope.tv/api/v2/%s?%s=%s' % (method, attribute, value), value) + + def _real_extract(self, url): + token = self._match_id(url) + + broadcast_data = self._call_api('getBroadcastPublic', token) + broadcast = broadcast_data['broadcast'] + status = broadcast['status'] + + uploader = broadcast.get('user_display_name') or broadcast_data.get('user', {}).get('display_name') + uploader_id = broadcast.get('user_id') or broadcast_data.get('user', {}).get('id') + + title = '%s - %s' % (uploader, status) if uploader else status + state = broadcast.get('state').lower() + if state == 'running': + title = self._live_title(title) + timestamp = parse_iso8601(broadcast.get('created_at')) + + thumbnails = [{ + 'url': broadcast[image], + } for image in ('image_url', 'image_url_small') if broadcast.get(image)] + + stream = self._call_api('getAccessPublic', token) + + formats = [] + for format_id in ('replay', 'rtmp', 'hls', 'https_hls'): + video_url = stream.get(format_id + '_url') + if not video_url: + continue + f = { + 'url': video_url, + 'ext': 'flv' if format_id == 'rtmp' else 'mp4', + } + if format_id != 'rtmp': + f['protocol'] = 'm3u8_native' if state == 'ended' else 'm3u8' + formats.append(f) + self._sort_formats(formats) + + return { + 'id': broadcast.get('id') or token, + 'title': title, + 'timestamp': timestamp, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'thumbnails': thumbnails, + 'formats': formats, + } diff --git a/youtube_dl/extractor/pladform.py b/youtube_dl/extractor/pladform.py index 551c8c9f0..bc559d1df 100644 --- a/youtube_dl/extractor/pladform.py +++ b/youtube_dl/extractor/pladform.py @@ -1,6 +1,8 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( ExtractorError, @@ -44,6 +46,13 @@ class PladformIE(InfoExtractor): 'only_matching': True, }] + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<iframe[^>]+src="(?P<url>(?:https?:)?//out\.pladform\.ru/player\?.+?)"', webpage) + if mobj: + return mobj.group('url') + def _real_extract(self, url): video_id = self._match_id(url) diff --git a/youtube_dl/extractor/played.py b/youtube_dl/extractor/played.py index 8a1c296dd..2856af96f 100644 --- a/youtube_dl/extractor/played.py +++ b/youtube_dl/extractor/played.py @@ -5,12 +5,10 @@ import re import os.path from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -46,7 +44,7 @@ class PlayedIE(InfoExtractor): headers = { b'Content-Type': b'application/x-www-form-urlencoded', } - req = compat_urllib_request.Request(url, post, headers) + req = sanitized_Request(url, post, headers) webpage = self._download_webpage( req, video_id, note='Downloading video page ...') diff --git a/youtube_dl/extractor/playtvak.py b/youtube_dl/extractor/playtvak.py new file mode 100644 index 000000000..e360404f7 --- /dev/null +++ b/youtube_dl/extractor/playtvak.py @@ -0,0 +1,181 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, + compat_urllib_parse, +) +from ..utils import ( + ExtractorError, + int_or_none, + parse_iso8601, + qualities, +) + + +class PlaytvakIE(InfoExtractor): + IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz' + _VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)' + _TESTS = [{ + 'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko', + 'md5': '4525ae312c324b4be2f4603cc78ceb4a', + 'info_dict': { + 'id': 'A150730_150323_hodinovy-manzel_kuko', + 'ext': 'mp4', + 'title': 'Vyžeňte vosy a sršně ze zahrady', + 'description': 'md5:f93d398691044d303bc4a3de62f3e976', + 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$', + 'duration': 279, + 'timestamp': 1438732860, + 'upload_date': '20150805', + 'is_live': False, + } + }, { # live video test + 'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat', + 'info_dict': { + 'id': 'A150624_164934_planespotting_cat', + 'ext': 'flv', + 'title': 're:^Přímý přenos iDNES.cz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze', + 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$', + 'is_live': True, + }, + 'params': { + 'skip_download': True, # requires rtmpdump + }, + }, { # idnes.cz + 'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku', + 'md5': '819832ba33cd7016e58a6658577fe289', + 'info_dict': { + 'id': 'A150809_104116_domaci_pku', + 'ext': 'mp4', + 'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se', + 'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2', + 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$', + 'duration': 39, + 'timestamp': 1438969140, + 'upload_date': '20150807', + 'is_live': False, + } + }, { # lidovky.cz + 'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE', + 'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8', + 'info_dict': { + 'id': 'A150808_214044_ln-video_ELE', + 'ext': 'mp4', + 'title': 'Táhni! Demonstrace proti imigrantům budila emoce', + 'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c', + 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$', + 'timestamp': 1439052180, + 'upload_date': '20150808', + 'is_live': False, + } + }, { # metro.cz + 'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row', + 'md5': '84fc1deedcac37b7d4a6ccae7c716668', + 'info_dict': { + 'id': 'A141111_173251_metro-extra_row', + 'ext': 'mp4', + 'title': 'Recesisté udělali z billboardu kolotoč', + 'description': 'md5:7369926049588c3989a66c9c1a043c4c', + 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$', + 'timestamp': 1415725500, + 'upload_date': '20141111', + 'is_live': False, + } + }, { + 'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + info_url = self._html_search_regex( + r'Misc\.videoFLV\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url') + + parsed_url = compat_urlparse.urlparse(info_url) + + qs = compat_urlparse.parse_qs(parsed_url.query) + qs.update({ + 'reklama': ['0'], + 'type': ['js'], + }) + + info_url = compat_urlparse.urlunparse( + parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) + + json_info = self._download_json( + info_url, video_id, + transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) + + item = None + for i in json_info['items']: + if i.get('type') == 'video' or i.get('type') == 'stream': + item = i + break + if not item: + raise ExtractorError('No suitable stream found') + + quality = qualities(('low', 'middle', 'high')) + + formats = [] + for fmt in item['video']: + video_url = fmt.get('file') + if not video_url: + continue + + format_ = fmt['format'] + format_id = '%s_%s' % (format_, fmt['quality']) + preference = None + + if format_ in ('mp4', 'webm'): + ext = format_ + elif format_ == 'rtmp': + ext = 'flv' + elif format_ == 'apple': + ext = 'mp4' + # Some streams have mp3 audio which does not play + # well with ffmpeg filter aac_adtstoasc + preference = -1 + elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests + continue + else: # Other formats not supported yet + continue + + formats.append({ + 'url': video_url, + 'ext': ext, + 'format_id': format_id, + 'quality': quality(fmt.get('quality')), + 'preference': preference, + }) + self._sort_formats(formats) + + title = item['title'] + is_live = item['type'] == 'stream' + if is_live: + title = self._live_title(title) + description = self._og_search_description(webpage, default=None) or self._html_search_meta( + 'description', webpage, 'description') + timestamp = None + duration = None + if not is_live: + duration = int_or_none(item.get('length')) + timestamp = item.get('published') + if timestamp: + timestamp = parse_iso8601(timestamp[:-5]) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': item.get('image'), + 'duration': duration, + 'timestamp': timestamp, + 'is_live': is_live, + 'formats': formats, + } diff --git a/youtube_dl/extractor/playwire.py b/youtube_dl/extractor/playwire.py index bdc71017b..6d138ef25 100644 --- a/youtube_dl/extractor/playwire.py +++ b/youtube_dl/extractor/playwire.py @@ -19,7 +19,7 @@ class PlaywireIE(InfoExtractor): 'id': '3353705', 'ext': 'mp4', 'title': 'S04_RM_UCL_Rus', - 'thumbnail': 're:^http://.*\.png$', + 'thumbnail': 're:^https?://.*\.png$', 'duration': 145.94, }, }, { diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py new file mode 100644 index 000000000..55c11b3bf --- /dev/null +++ b/youtube_dl/extractor/pluralsight.py @@ -0,0 +1,291 @@ +from __future__ import unicode_literals + +import re +import json +import random +import collections + +from .common import InfoExtractor +from ..compat import ( + compat_str, + compat_urllib_parse, + compat_urlparse, +) +from ..utils import ( + ExtractorError, + int_or_none, + parse_duration, + qualities, + sanitized_Request, +) + + +class PluralsightBaseIE(InfoExtractor): + _API_BASE = 'http://app.pluralsight.com' + + +class PluralsightIE(PluralsightBaseIE): + IE_NAME = 'pluralsight' + _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/training/player\?' + _LOGIN_URL = 'https://app.pluralsight.com/id/' + + _NETRC_MACHINE = 'pluralsight' + + _TESTS = [{ + 'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas', + 'md5': '4d458cf5cf4c593788672419a8dd4cf8', + 'info_dict': { + 'id': 'hosting-sql-server-windows-azure-iaas-m7-mgmt-04', + 'ext': 'mp4', + 'title': 'Management of SQL Server - Demo Monitoring', + 'duration': 338, + }, + 'skip': 'Requires pluralsight account credentials', + }, { + 'url': 'https://app.pluralsight.com/training/player?course=angularjs-get-started&author=scott-allen&name=angularjs-get-started-m1-introduction&clip=0&mode=live', + 'only_matching': True, + }, { + # available without pluralsight account + 'url': 'http://app.pluralsight.com/training/player?author=scott-allen&name=angularjs-get-started-m1-introduction&mode=live&clip=0&course=angularjs-get-started', + 'only_matching': True, + }] + + def _real_initialize(self): + self._login() + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + + login_page = self._download_webpage( + self._LOGIN_URL, None, 'Downloading login page') + + login_form = self._hidden_inputs(login_page) + + login_form.update({ + 'Username': username.encode('utf-8'), + 'Password': password.encode('utf-8'), + }) + + post_url = self._search_regex( + r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, + 'post url', default=self._LOGIN_URL, group='url') + + if not post_url.startswith('http'): + post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) + + request = sanitized_Request( + post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8')) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + + response = self._download_webpage( + request, None, 'Logging in as %s' % username) + + error = self._search_regex( + r'<span[^>]+class="field-validation-error"[^>]*>([^<]+)</span>', + response, 'error message', default=None) + if error: + raise ExtractorError('Unable to login: %s' % error, expected=True) + + if all(p not in response for p in ('__INITIAL_STATE__', '"currentUser"')): + raise ExtractorError('Unable to log in') + + def _real_extract(self, url): + qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + + author = qs.get('author', [None])[0] + name = qs.get('name', [None])[0] + clip_id = qs.get('clip', [None])[0] + course = qs.get('course', [None])[0] + + if any(not f for f in (author, name, clip_id, course,)): + raise ExtractorError('Invalid URL', expected=True) + + display_id = '%s-%s' % (name, clip_id) + + webpage = self._download_webpage(url, display_id) + + modules = self._search_regex( + r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)', + webpage, 'modules', default=None) + + if modules: + collection = self._parse_json(modules, display_id) + else: + # Webpage may be served in different layout (see + # https://github.com/rg3/youtube-dl/issues/7607) + collection = self._parse_json( + self._search_regex( + r'var\s+initialState\s*=\s*({.+?});\n', webpage, 'initial state'), + display_id)['course']['modules'] + + module, clip = None, None + + for module_ in collection: + if name in (module_.get('moduleName'), module_.get('name')): + module = module_ + for clip_ in module_.get('clips', []): + clip_index = clip_.get('clipIndex') + if clip_index is None: + clip_index = clip_.get('index') + if clip_index is None: + continue + if compat_str(clip_index) == clip_id: + clip = clip_ + break + + if not clip: + raise ExtractorError('Unable to resolve clip') + + QUALITIES = { + 'low': {'width': 640, 'height': 480}, + 'medium': {'width': 848, 'height': 640}, + 'high': {'width': 1024, 'height': 768}, + 'high-widescreen': {'width': 1280, 'height': 720}, + } + + QUALITIES_PREFERENCE = ('low', 'medium', 'high', 'high-widescreen',) + quality_key = qualities(QUALITIES_PREFERENCE) + + AllowedQuality = collections.namedtuple('AllowedQuality', ['ext', 'qualities']) + + ALLOWED_QUALITIES = ( + AllowedQuality('webm', ['high', ]), + AllowedQuality('mp4', ['low', 'medium', 'high', ]), + ) + + # Some courses also offer widescreen resolution for high quality (see + # https://github.com/rg3/youtube-dl/issues/7766) + widescreen = True if re.search( + r'courseSupportsWidescreenVideoFormats\s*:\s*true', webpage) else False + best_quality = 'high-widescreen' if widescreen else 'high' + if widescreen: + for allowed_quality in ALLOWED_QUALITIES: + allowed_quality.qualities.append(best_quality) + + # In order to minimize the number of calls to ViewClip API and reduce + # the probability of being throttled or banned by Pluralsight we will request + # only single format until formats listing was explicitly requested. + if self._downloader.params.get('listformats', False): + allowed_qualities = ALLOWED_QUALITIES + else: + def guess_allowed_qualities(): + req_format = self._downloader.params.get('format') or 'best' + req_format_split = req_format.split('-', 1) + if len(req_format_split) > 1: + req_ext, req_quality = req_format_split + for allowed_quality in ALLOWED_QUALITIES: + if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities: + return (AllowedQuality(req_ext, (req_quality, )), ) + req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4' + return (AllowedQuality(req_ext, (best_quality, )), ) + allowed_qualities = guess_allowed_qualities() + + formats = [] + for ext, qualities_ in allowed_qualities: + for quality in qualities_: + f = QUALITIES[quality].copy() + clip_post = { + 'a': author, + 'cap': 'false', + 'cn': clip_id, + 'course': course, + 'lc': 'en', + 'm': name, + 'mt': ext, + 'q': '%dx%d' % (f['width'], f['height']), + } + request = sanitized_Request( + '%s/training/Player/ViewClip' % self._API_BASE, + json.dumps(clip_post).encode('utf-8')) + request.add_header('Content-Type', 'application/json;charset=utf-8') + format_id = '%s-%s' % (ext, quality) + clip_url = self._download_webpage( + request, display_id, 'Downloading %s URL' % format_id, fatal=False) + + # Pluralsight tracks multiple sequential calls to ViewClip API and start + # to return 429 HTTP errors after some time (see + # https://github.com/rg3/youtube-dl/pull/6989). Moreover it may even lead + # to account ban (see https://github.com/rg3/youtube-dl/issues/6842). + # To somewhat reduce the probability of these consequences + # we will sleep random amount of time before each call to ViewClip. + self._sleep( + random.randint(2, 5), display_id, + '%(video_id)s: Waiting for %(timeout)s seconds to avoid throttling') + + if not clip_url: + continue + f.update({ + 'url': clip_url, + 'ext': ext, + 'format_id': format_id, + 'quality': quality_key(quality), + }) + formats.append(f) + self._sort_formats(formats) + + # TODO: captions + # http://www.pluralsight.com/training/Player/ViewClip + cap = true + # or + # http://www.pluralsight.com/training/Player/Captions + # { a = author, cn = clip_id, lc = end, m = name } + + return { + 'id': clip['clipName'], + 'title': '%s - %s' % (module['title'], clip['title']), + 'duration': int_or_none(clip.get('duration')) or parse_duration(clip.get('formattedDuration')), + 'creator': author, + 'formats': formats + } + + +class PluralsightCourseIE(PluralsightBaseIE): + IE_NAME = 'pluralsight:course' + _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/(?:library/)?courses/(?P<id>[^/]+)' + _TESTS = [{ + # Free course from Pluralsight Starter Subscription for Microsoft TechNet + # https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz + 'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas', + 'info_dict': { + 'id': 'hosting-sql-server-windows-azure-iaas', + 'title': 'Hosting SQL Server in Microsoft Azure IaaS Fundamentals', + 'description': 'md5:61b37e60f21c4b2f91dc621a977d0986', + }, + 'playlist_count': 31, + }, { + # available without pluralsight account + 'url': 'https://www.pluralsight.com/courses/angularjs-get-started', + 'only_matching': True, + }, { + 'url': 'https://app.pluralsight.com/library/courses/understanding-microsoft-azure-amazon-aws/table-of-contents', + 'only_matching': True, + }] + + def _real_extract(self, url): + course_id = self._match_id(url) + + # TODO: PSM cookie + + course = self._download_json( + '%s/data/course/%s' % (self._API_BASE, course_id), + course_id, 'Downloading course JSON') + + title = course['title'] + description = course.get('description') or course.get('shortDescription') + + course_data = self._download_json( + '%s/data/course/content/%s' % (self._API_BASE, course_id), + course_id, 'Downloading course data JSON') + + entries = [] + for module in course_data: + for clip in module.get('clips', []): + player_parameters = clip.get('playerParameters') + if not player_parameters: + continue + entries.append(self.url_result( + '%s/training/player?%s' % (self._API_BASE, player_parameters), + 'Pluralsight')) + + return self.playlist_result(entries, course_id, title, description) diff --git a/youtube_dl/extractor/porn91.py b/youtube_dl/extractor/porn91.py index 72d1b2718..3e15533e9 100644 --- a/youtube_dl/extractor/porn91.py +++ b/youtube_dl/extractor/porn91.py @@ -22,6 +22,7 @@ class Porn91IE(InfoExtractor): 'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!', 'ext': 'mp4', 'duration': 431, + 'age_limit': 18, } } @@ -68,4 +69,5 @@ class Porn91IE(InfoExtractor): 'url': video_url, 'duration': duration, 'comment_count': comment_count, + 'age_limit': self._rta_search(webpage), } diff --git a/youtube_dl/extractor/pornhd.py b/youtube_dl/extractor/pornhd.py index dbb2c3bd9..57c78ba52 100644 --- a/youtube_dl/extractor/pornhd.py +++ b/youtube_dl/extractor/pornhd.py @@ -36,7 +36,8 @@ class PornHdIE(InfoExtractor): webpage = self._download_webpage(url, display_id or video_id) title = self._html_search_regex( - r'<title>(.+) porn HD.+?</title>', webpage, 'title') + [r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)', + r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title') description = self._html_search_regex( r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False) view_count = int_or_none(self._html_search_regex( diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py index 0b7886840..08275687d 100644 --- a/youtube_dl/extractor/pornhub.py +++ b/youtube_dl/extractor/pornhub.py @@ -8,10 +8,10 @@ from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlparse, - compat_urllib_request, ) from ..utils import ( ExtractorError, + sanitized_Request, str_to_int, ) from ..aes import ( @@ -20,7 +20,7 @@ from ..aes import ( class PornHubIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)' + _VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)' _TESTS = [{ 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', 'md5': '882f488fa1f0026f023f33576004a2ed', @@ -34,6 +34,9 @@ class PornHubIE(InfoExtractor): }, { 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d', 'only_matching': True, + }, { + 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862', + 'only_matching': True, }] @classmethod @@ -50,7 +53,7 @@ class PornHubIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -81,7 +84,7 @@ class PornHubIE(InfoExtractor): comment_count = self._extract_count( r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') - video_urls = list(map(compat_urllib_parse_unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage))) + video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage))) if webpage.find('"encrypted":true') != -1: password = compat_urllib_parse_unquote_plus( self._search_regex(r'"video_title":"([^"]+)', webpage, 'password')) @@ -94,7 +97,7 @@ class PornHubIE(InfoExtractor): format = path.split('/')[5].split('_')[:2] format = "-".join(format) - m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format) + m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format) if m is None: height = None tbr = None @@ -144,7 +147,8 @@ class PornHubPlaylistIE(InfoExtractor): entries = [ self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub') - for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage)) + for video_url in set(re.findall( + r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage)) ] playlist = self._parse_json( diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py index 34735c51e..5398e708b 100644 --- a/youtube_dl/extractor/pornotube.py +++ b/youtube_dl/extractor/pornotube.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, + sanitized_Request, ) @@ -46,7 +44,7 @@ class PornotubeIE(InfoExtractor): 'authenticationSpaceKey': originAuthenticationSpaceKey, 'credentials': 'Clip Application', } - token_req = compat_urllib_request.Request( + token_req = sanitized_Request( 'https://api.aebn.net/auth/v1/token/primal', data=json.dumps(token_req_data).encode('utf-8')) token_req.add_header('Content-Type', 'application/json') @@ -56,7 +54,7 @@ class PornotubeIE(InfoExtractor): token = token_answer['tokenKey'] # Get video URL - delivery_req = compat_urllib_request.Request( + delivery_req = sanitized_Request( 'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id) delivery_req.add_header('Authorization', token) delivery_info = self._download_json( @@ -64,7 +62,7 @@ class PornotubeIE(InfoExtractor): video_url = delivery_info['mediaUrl'] # Get additional info (title etc.) - info_req = compat_urllib_request.Request( + info_req = sanitized_Request( 'https://api.aebn.net/content/v1/clips/%s?expand=' 'title,description,primaryImageNumber,startSecond,endSecond,' 'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,' diff --git a/youtube_dl/extractor/primesharetv.py b/youtube_dl/extractor/primesharetv.py index 304359dc5..85aae9576 100644 --- a/youtube_dl/extractor/primesharetv.py +++ b/youtube_dl/extractor/primesharetv.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + sanitized_Request, ) -from ..utils import ExtractorError class PrimeShareTVIE(InfoExtractor): @@ -41,7 +41,7 @@ class PrimeShareTVIE(InfoExtractor): webpage, 'wait time', default=7)) + 1 self._sleep(wait_time, video_id) - req = compat_urllib_request.Request( + req = sanitized_Request( url, compat_urllib_parse.urlencode(fields), headers) video_page = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/promptfile.py b/youtube_dl/extractor/promptfile.py index 8190ed676..d5357283a 100644 --- a/youtube_dl/extractor/promptfile.py +++ b/youtube_dl/extractor/promptfile.py @@ -4,13 +4,11 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( determine_ext, ExtractorError, + sanitized_Request, ) @@ -37,7 +35,7 @@ class PromptFileIE(InfoExtractor): fields = self._hidden_inputs(webpage) post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py index effcf1db3..baa54a3af 100644 --- a/youtube_dl/extractor/prosiebensat1.py +++ b/youtube_dl/extractor/prosiebensat1.py @@ -20,7 +20,7 @@ from ..utils import ( class ProSiebenSat1IE(InfoExtractor): IE_NAME = 'prosiebensat1' IE_DESC = 'ProSiebenSat.1 Digital' - _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at)|ran\.de|fem\.com)/(?P<id>.+)' + _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)' _TESTS = [ { diff --git a/youtube_dl/extractor/qqmusic.py b/youtube_dl/extractor/qqmusic.py index 1654a641f..1ba3bbddf 100644 --- a/youtube_dl/extractor/qqmusic.py +++ b/youtube_dl/extractor/qqmusic.py @@ -7,11 +7,11 @@ import re from .common import InfoExtractor from ..utils import ( + sanitized_Request, strip_jsonp, unescapeHTML, clean_html, ) -from ..compat import compat_urllib_request class QQMusicIE(InfoExtractor): @@ -25,7 +25,7 @@ class QQMusicIE(InfoExtractor): 'id': '004295Et37taLD', 'ext': 'mp3', 'title': '可惜没如果', - 'upload_date': '20141227', + 'release_date': '20141227', 'creator': '林俊杰', 'description': 'md5:d327722d0361576fde558f1ac68a7065', 'thumbnail': 're:^https?://.*\.jpg$', @@ -38,11 +38,26 @@ class QQMusicIE(InfoExtractor): 'id': '004MsGEo3DdNxV', 'ext': 'mp3', 'title': '如果', - 'upload_date': '20050626', + 'release_date': '20050626', 'creator': '李季美', 'description': 'md5:46857d5ed62bc4ba84607a805dccf437', 'thumbnail': 're:^https?://.*\.jpg$', } + }, { + 'note': 'lyrics not in .lrc format', + 'url': 'http://y.qq.com/#type=song&mid=001JyApY11tIp6', + 'info_dict': { + 'id': '001JyApY11tIp6', + 'ext': 'mp3', + 'title': 'Shadows Over Transylvania', + 'release_date': '19970225', + 'creator': 'Dark Funeral', + 'description': 'md5:ed14d5bd7ecec19609108052c25b2c11', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, + }, }] _FORMATS = { @@ -112,15 +127,27 @@ class QQMusicIE(InfoExtractor): self._check_formats(formats, mid) self._sort_formats(formats) - return { + actual_lrc_lyrics = ''.join( + line + '\n' for line in re.findall( + r'(?m)^(\[[0-9]{2}:[0-9]{2}(?:\.[0-9]{2,})?\][^\n]*|\[[^\]]*\])', lrc_content)) + + info_dict = { 'id': mid, 'formats': formats, 'title': song_name, - 'upload_date': publish_time, + 'release_date': publish_time, 'creator': singer, 'description': lrc_content, - 'thumbnail': thumbnail_url, + 'thumbnail': thumbnail_url } + if actual_lrc_lyrics: + info_dict['subtitles'] = { + 'origin': [{ + 'ext': 'lrc', + 'data': actual_lrc_lyrics, + }] + } + return info_dict class QQPlaylistBaseIE(InfoExtractor): @@ -174,7 +201,7 @@ class QQMusicSingerIE(QQPlaylistBaseIE): singer_desc = None if singer_id: - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id) req.add_header( 'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html') diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py index 1631faf29..7ff1d06c4 100644 --- a/youtube_dl/extractor/rai.py +++ b/youtube_dl/extractor/rai.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse, + compat_urlparse, ) from ..utils import ( parse_duration, @@ -72,6 +73,18 @@ class RaiIE(InfoExtractor): 'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!', 'uploader': 'RaiTre', } + }, + { + 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', + 'md5': '037104d2c14132887e5e4cf114569214', + 'info_dict': { + 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e', + 'ext': 'flv', + 'title': 'Il pacco', + 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', + 'uploader': 'RaiTre', + 'upload_date': '20141221', + }, } ] @@ -90,11 +103,14 @@ class RaiIE(InfoExtractor): relinker_url = self._extract_relinker_url(webpage) if not relinker_url: - iframe_path = self._search_regex( - r'<iframe[^>]+src="/?(dl/[^"]+\?iframe\b[^"]*)"', + iframe_url = self._search_regex( + [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"', + r'drawMediaRaiTV\(["\'](.+?)["\']'], webpage, 'iframe') + if not iframe_url.startswith('http'): + iframe_url = compat_urlparse.urljoin(url, iframe_url) webpage = self._download_webpage( - '%s/%s' % (host, iframe_path), video_id) + iframe_url, video_id) relinker_url = self._extract_relinker_url(webpage) relinker = self._download_json( diff --git a/youtube_dl/extractor/rtbf.py b/youtube_dl/extractor/rtbf.py index e4215d546..e42b319a3 100644 --- a/youtube_dl/extractor/rtbf.py +++ b/youtube_dl/extractor/rtbf.py @@ -9,8 +9,8 @@ from ..utils import ( class RTBFIE(InfoExtractor): - _VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?rtbf\.be/(?:video/[^?]+\?.*\bid=|ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=)(?P<id>\d+)' + _TESTS = [{ 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', 'md5': '799f334ddf2c0a582ba80c44655be570', 'info_dict': { @@ -19,7 +19,14 @@ class RTBFIE(InfoExtractor): 'title': 'Les Diables au coeur (épisode 2)', 'duration': 3099, } - } + }, { + # geo restricted + 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442', + 'only_matching': True, + }, { + 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858', + 'only_matching': True, + }] _QUALITIES = [ ('mobile', 'mobile'), @@ -36,7 +43,7 @@ class RTBFIE(InfoExtractor): data = self._parse_json( unescapeHTML(self._search_regex( - r'data-video="([^"]+)"', webpage, 'data video')), + r'data-media="([^"]+)"', webpage, 'data video')), video_id) if data.get('provider').lower() == 'youtube': diff --git a/youtube_dl/extractor/rte.py b/youtube_dl/extractor/rte.py index 04158b993..d9cfbf180 100644 --- a/youtube_dl/extractor/rte.py +++ b/youtube_dl/extractor/rte.py @@ -9,16 +9,16 @@ from ..utils import ( class RteIE(InfoExtractor): - _VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/' + _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)' _TEST = { - 'url': 'http://www.rte.ie/player/de/show/10363114/', + 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/', 'info_dict': { - 'id': '10363114', + 'id': '10478715', 'ext': 'mp4', - 'title': 'One News', + 'title': 'Watch iWitness online', 'thumbnail': 're:^https?://.*\.jpg$', - 'description': 'The One O\'Clock News followed by Weather.', - 'duration': 436.844, + 'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.', + 'duration': 60.046, }, 'params': { 'skip_download': 'f4m fails with --test atm' diff --git a/youtube_dl/extractor/rtl2.py b/youtube_dl/extractor/rtl2.py index 72cd80498..25f7faf76 100644 --- a/youtube_dl/extractor/rtl2.py +++ b/youtube_dl/extractor/rtl2.py @@ -1,6 +1,7 @@ # encoding: utf-8 from __future__ import unicode_literals +import re from .common import InfoExtractor @@ -8,22 +9,28 @@ class RTL2IE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?rtl2\.de/[^?#]*?/(?P<id>[^?#/]*?)(?:$|/(?:$|[?#]))' _TESTS = [{ 'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0', - 'md5': 'bfcc179030535b08dc2b36b469b5adc7', 'info_dict': { 'id': 'folge-203-0', 'ext': 'f4v', 'title': 'GRIP sucht den Sommerkönig', 'description': 'Matthias, Det und Helge treten gegeneinander an.' }, + 'params': { + # rtmp download + 'skip_download': True, + }, }, { 'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/', - 'md5': 'ffcd517d2805b57ce11a58a2980c2b02', 'info_dict': { 'id': '21040-anna-erwischt-alex', 'ext': 'mp4', 'title': 'Anna erwischt Alex!', 'description': 'Anna ist Alex\' Tochter bei Köln 50667.' }, + 'params': { + # rtmp download + 'skip_download': True, + }, }] def _real_extract(self, url): @@ -34,12 +41,18 @@ class RTL2IE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - vico_id = self._html_search_regex( - r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id') - vivi_id = self._html_search_regex( - r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id') + mobj = re.search( + r'<div[^>]+data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"', + webpage) + if mobj: + vico_id = mobj.group('vico_id') + vivi_id = mobj.group('vivi_id') + else: + vico_id = self._html_search_regex( + r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id') + vivi_id = self._html_search_regex( + r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id') info_url = 'http://www.rtl2.de/video/php/get_video.php?vico_id=' + vico_id + '&vivi_id=' + vivi_id - webpage = self._download_webpage(info_url, '') info = self._download_json(info_url, video_id) video_info = info['video'] diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py index e0c530d64..543d94417 100644 --- a/youtube_dl/extractor/rtlnl.py +++ b/youtube_dl/extractor/rtlnl.py @@ -82,16 +82,21 @@ class RtlNlIE(InfoExtractor): meta = info.get('meta', {}) - # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118) - # NB: nowadays, recent ffmpeg and avconv can handle these encrypted streams, so - # this adaptive -> flash workaround is not required in general, but it also - # allows bypassing georestriction therefore is retained for now. - videopath = material['videopath'].replace('/adaptive/', '/flash/') + # m3u8 streams are encrypted and may not be handled properly by older ffmpeg/avconv. + # To workaround this previously adaptive -> flash trick was used to obtain + # unencrypted m3u8 streams (see https://github.com/rg3/youtube-dl/issues/4118) + # and bypass georestrictions as well. + # Currently, unencrypted m3u8 playlists are (intentionally?) invalid and therefore + # unusable albeit can be fixed by simple string replacement (see + # https://github.com/rg3/youtube-dl/pull/6337) + # Since recent ffmpeg and avconv handle encrypted streams just fine encrypted + # streams are used now. + videopath = material['videopath'] m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4') - video_urlpart = videopath.split('/flash/')[1][:-5] + video_urlpart = videopath.split('/adaptive/')[1][:-5] PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4' formats.extend([ diff --git a/youtube_dl/extractor/rtp.py b/youtube_dl/extractor/rtp.py index ecf4939cd..82b323cdd 100644 --- a/youtube_dl/extractor/rtp.py +++ b/youtube_dl/extractor/rtp.py @@ -18,6 +18,10 @@ class RTPIE(InfoExtractor): 'description': 'As paixões musicais de António Cartaxo e António Macedo', 'thumbnail': 're:^https?://.*\.jpg', }, + 'params': { + # rtmp download + 'skip_download': True, + }, }, { 'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas', 'only_matching': True, diff --git a/youtube_dl/extractor/rts.py b/youtube_dl/extractor/rts.py index 9fbe239d8..12639f08b 100644 --- a/youtube_dl/extractor/rts.py +++ b/youtube_dl/extractor/rts.py @@ -19,7 +19,16 @@ from ..utils import ( class RTSIE(InfoExtractor): IE_DESC = 'RTS.ch' - _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))' + _VALID_URL = r'''(?x) + (?: + rts:(?P<rts_id>\d+)| + https?:// + (?:www\.)?rts\.ch/ + (?: + (?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html| + play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+) + ) + )''' _TESTS = [ { @@ -123,6 +132,15 @@ class RTSIE(InfoExtractor): }, }, { + # article with videos on rhs + 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', + 'info_dict': { + 'id': '6693917', + 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', + }, + 'playlist_mincount': 5, + }, + { 'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280', 'only_matching': True, } @@ -130,7 +148,7 @@ class RTSIE(InfoExtractor): def _real_extract(self, url): m = re.match(self._VALID_URL, url) - video_id = m.group('id') or m.group('id_new') + video_id = m.group('rts_id') or m.group('id') or m.group('id_new') display_id = m.group('display_id') or m.group('display_id_new') def download_json(internal_id): @@ -143,6 +161,15 @@ class RTSIE(InfoExtractor): # video_id extracted out of URL is not always a real id if 'video' not in all_info and 'audio' not in all_info: page = self._download_webpage(url, display_id) + + # article with videos on rhs + videos = re.findall( + r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"', + page) + if videos: + entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos] + return self.playlist_result(entries, video_id, self._og_search_title(page)) + internal_id = self._html_search_regex( r'<(?:video|audio) data-id="([0-9]+)"', page, 'internal video id') diff --git a/youtube_dl/extractor/rtve.py b/youtube_dl/extractor/rtve.py index 82cd98ac7..603d7bd00 100644 --- a/youtube_dl/extractor/rtve.py +++ b/youtube_dl/extractor/rtve.py @@ -6,11 +6,11 @@ import re import time from .common import InfoExtractor -from ..compat import compat_urlparse from ..utils import ( ExtractorError, float_or_none, remove_end, + sanitized_Request, std_headers, struct_unpack, ) @@ -102,18 +102,14 @@ class RTVEALaCartaIE(InfoExtractor): if info['state'] == 'DESPU': raise ExtractorError('The video is no longer available', expected=True) png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id) - png = self._download_webpage(png_url, video_id, 'Downloading url information') + png_request = sanitized_Request(png_url) + png_request.add_header('Referer', url) + png = self._download_webpage(png_request, video_id, 'Downloading url information') video_url = _decrypt_url(png) if not video_url.endswith('.f4m'): - auth_url = video_url.replace( + video_url = video_url.replace( 'resources/', 'auth/resources/' ).replace('.net.rtve', '.multimedia.cdn.rtve') - video_path = self._download_webpage( - auth_url, video_id, 'Getting video url') - # Use mvod1.akcdn instead of flash.akamaihd.multimedia.cdn to get - # the right Content-Length header and the mp4 format - video_url = compat_urlparse.urljoin( - 'http://mvod1.akcdn.rtve.es/', video_path) subtitles = None if info.get('sbtFile') is not None: diff --git a/youtube_dl/extractor/rtvnh.py b/youtube_dl/extractor/rtvnh.py new file mode 100644 index 000000000..7c9d4b0cd --- /dev/null +++ b/youtube_dl/extractor/rtvnh.py @@ -0,0 +1,47 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class RTVNHIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.rtvnh.nl/video/131946', + 'md5': '6e1d0ab079e2a00b6161442d3ceacfc1', + 'info_dict': { + 'id': '131946', + 'ext': 'mp4', + 'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw', + 'thumbnail': 're:^https?:.*\.jpg$' + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + meta = self._parse_json(self._download_webpage( + 'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id) + + status = meta.get('status') + if status != 200: + raise ExtractorError( + '%s returned error code %d' % (self.IE_NAME, status), expected=True) + + formats = self._extract_smil_formats( + 'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id, fatal=False) + + for item in meta['source']['fb']: + if item.get('type') == 'hls': + formats.extend(self._extract_m3u8_formats( + item['file'], video_id, ext='mp4', entry_protocol='m3u8_native')) + elif item.get('type') == '': + formats.append({'url': item['file']}) + + return { + 'id': video_id, + 'title': meta['title'].strip(), + 'thumbnail': meta.get('image'), + 'formats': formats + } diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py index 5b1c3577a..9db62adb1 100644 --- a/youtube_dl/extractor/rutube.py +++ b/youtube_dl/extractor/rutube.py @@ -9,7 +9,7 @@ from ..compat import ( compat_str, ) from ..utils import ( - ExtractorError, + determine_ext, unified_strdate, ) @@ -17,9 +17,9 @@ from ..utils import ( class RutubeIE(InfoExtractor): IE_NAME = 'rutube' IE_DESC = 'Rutube videos' - _VALID_URL = r'https?://rutube\.ru/video/(?P<id>[\da-z]{32})' + _VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P<id>[\da-z]{32})' - _TEST = { + _TESTS = [{ 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', 'info_dict': { 'id': '3eac3b4561676c17df9132a9a1e62e3e', @@ -30,12 +30,16 @@ class RutubeIE(InfoExtractor): 'uploader': 'NTDRussian', 'uploader_id': '29790', 'upload_date': '20131016', + 'age_limit': 0, }, 'params': { # It requires ffmpeg (m3u8 download) 'skip_download': True, }, - } + }, { + 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -50,10 +54,25 @@ class RutubeIE(InfoExtractor): 'http://rutube.ru/api/play/options/%s/?format=json' % video_id, video_id, 'Downloading options JSON') - m3u8_url = options['video_balancer'].get('m3u8') - if m3u8_url is None: - raise ExtractorError('Couldn\'t find m3u8 manifest url') - formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') + formats = [] + for format_id, format_url in options['video_balancer'].items(): + ext = determine_ext(format_url) + if ext == 'm3u8': + m3u8_formats = self._extract_m3u8_formats( + format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif ext == 'f4m': + f4m_formats = self._extract_f4m_formats( + format_url, video_id, f4m_id=format_id, fatal=False) + if f4m_formats: + formats.extend(f4m_formats) + else: + formats.append({ + 'url': format_url, + 'format_id': format_id, + }) + self._sort_formats(formats) return { 'id': video['id'], @@ -73,9 +92,9 @@ class RutubeIE(InfoExtractor): class RutubeEmbedIE(InfoExtractor): IE_NAME = 'rutube:embed' IE_DESC = 'Rutube embedded videos' - _VALID_URL = 'https?://rutube\.ru/video/embed/(?P<id>[0-9]+)' + _VALID_URL = 'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)' - _TEST = { + _TESTS = [{ 'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=', 'info_dict': { 'id': 'a10e53b86e8f349080f718582ce4c661', @@ -89,7 +108,10 @@ class RutubeEmbedIE(InfoExtractor): 'params': { 'skip_download': 'Requires ffmpeg', }, - } + }, { + 'url': 'http://rutube.ru/play/embed/8083783', + 'only_matching': True, + }] def _real_extract(self, url): embed_id = self._match_id(url) diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py index 4e22628d0..e417bf661 100644 --- a/youtube_dl/extractor/ruutu.py +++ b/youtube_dl/extractor/ruutu.py @@ -6,19 +6,19 @@ from ..compat import compat_urllib_parse_urlparse from ..utils import ( determine_ext, int_or_none, + xpath_attr, xpath_text, ) class RuutuIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?ruutu\.fi/ohjelmat/(?:[^/?#]+/)*(?P<id>[^/?#]+)' + _VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)' _TESTS = [ { - 'url': 'http://www.ruutu.fi/ohjelmat/oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi', + 'url': 'http://www.ruutu.fi/video/2058907', 'md5': 'ab2093f39be1ca8581963451b3c0234f', 'info_dict': { 'id': '2058907', - 'display_id': 'oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi', 'ext': 'mp4', 'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!', 'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6', @@ -28,14 +28,13 @@ class RuutuIE(InfoExtractor): }, }, { - 'url': 'http://www.ruutu.fi/ohjelmat/superpesis/superpesis-katso-koko-kausi-ruudussa', + 'url': 'http://www.ruutu.fi/video/2057306', 'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9', 'info_dict': { 'id': '2057306', - 'display_id': 'superpesis-katso-koko-kausi-ruudussa', 'ext': 'mp4', 'title': 'Superpesis: katso koko kausi Ruudussa', - 'description': 'md5:44c44a99fdbe5b380ab74ebd75f0af77', + 'description': 'md5:da2736052fef3b2bd5e0005e63c25eac', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 40, 'age_limit': 0, @@ -44,29 +43,10 @@ class RuutuIE(InfoExtractor): ] def _real_extract(self, url): - display_id = self._match_id(url) + video_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - - video_id = self._search_regex( - r'data-media-id="(\d+)"', webpage, 'media id') - - video_xml_url = None - - media_data = self._search_regex( - r'jQuery\.extend\([^,]+,\s*(.+?)\);', webpage, - 'media data', default=None) - if media_data: - media_json = self._parse_json(media_data, display_id, fatal=False) - if media_json: - xml_url = media_json.get('ruutuplayer', {}).get('xmlUrl') - if xml_url: - video_xml_url = xml_url.replace('{ID}', video_id) - - if not video_xml_url: - video_xml_url = 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id - - video_xml = self._download_xml(video_xml_url, video_id) + video_xml = self._download_xml( + 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id) formats = [] processed_urls = [] @@ -77,16 +57,21 @@ class RuutuIE(InfoExtractor): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text - if not video_url or video_url in processed_urls or 'NOT_USED' in video_url: + if (not video_url or video_url in processed_urls or + any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): return processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - video_url, video_id, 'mp4', m3u8_id='hls')) + m3u8_formats = self._extract_m3u8_formats( + video_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) elif ext == 'f4m': - formats.extend(self._extract_f4m_formats( - video_url, video_id, f4m_id='hds')) + f4m_formats = self._extract_f4m_formats( + video_url, video_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) else: proto = compat_urllib_parse_urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': @@ -94,7 +79,7 @@ class RuutuIE(InfoExtractor): preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) - width, height = [int_or_none(x) for x in child.get('resolution', '').split('x')] + width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]] formats.append({ 'format_id': '%s-%s' % (proto, label if label else tbr), 'url': video_url, @@ -109,10 +94,9 @@ class RuutuIE(InfoExtractor): return { 'id': video_id, - 'display_id': display_id, - 'title': self._og_search_title(webpage), - 'description': self._og_search_description(webpage), - 'thumbnail': self._og_search_thumbnail(webpage), + 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True), + 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'), + 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'), 'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')), 'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')), 'formats': formats, diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py index f3c80708c..7de7b7273 100644 --- a/youtube_dl/extractor/safari.py +++ b/youtube_dl/extractor/safari.py @@ -4,23 +4,20 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, smuggle_url, std_headers, + urlencode_postdata, ) class SafariBaseIE(InfoExtractor): _LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/' _SUCCESSFUL_LOGIN_REGEX = r'<a href="/accounts/logout/"[^>]*>Sign Out</a>' - _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to supply credentials for safaribooksonline.com' _NETRC_MACHINE = 'safari' _API_BASE = 'https://www.safaribooksonline.com/api/v1/book' @@ -37,9 +34,7 @@ class SafariBaseIE(InfoExtractor): def _login(self): (username, password) = self._get_login_info() if username is None: - raise ExtractorError( - self._ACCOUNT_CREDENTIALS_HINT, - expected=True) + self.raise_login_required('safaribooksonline.com account is required') headers = std_headers if 'Referer' not in headers: @@ -61,8 +56,8 @@ class SafariBaseIE(InfoExtractor): 'next': '', } - request = compat_urllib_request.Request( - self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers) + request = sanitized_Request( + self._LOGIN_URL, urlencode_postdata(login_form), headers=headers) login_page = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -115,11 +110,11 @@ class SafariIE(SafariBaseIE): '%s/%s/chapter-content/%s.html' % (self._API_BASE, course_id, part), part) - bc_url = BrightcoveIE._extract_brightcove_url(webpage) + bc_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if not bc_url: raise ExtractorError('Could not extract Brightcove URL from %s' % url, expected=True) - return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'Brightcove') + return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'BrightcoveLegacy') class SafariCourseIE(SafariBaseIE): diff --git a/youtube_dl/extractor/sandia.py b/youtube_dl/extractor/sandia.py index 9c88167f0..759898a49 100644 --- a/youtube_dl/extractor/sandia.py +++ b/youtube_dl/extractor/sandia.py @@ -6,14 +6,12 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( int_or_none, js_to_json, mimetype2ext, + sanitized_Request, unified_strdate, ) @@ -37,7 +35,7 @@ class SandiaIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py index d1ab66b32..05f93904c 100644 --- a/youtube_dl/extractor/screenwavemedia.py +++ b/youtube_dl/extractor/screenwavemedia.py @@ -7,12 +7,13 @@ from .common import InfoExtractor from ..utils import ( int_or_none, unified_strdate, + js_to_json, ) class ScreenwaveMediaIE(InfoExtractor): - _VALID_URL = r'http://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?[^"]*\bid=(?P<id>.+)' - + _VALID_URL = r'https?://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=(?P<id>[A-Za-z0-9-]+)' + EMBED_PATTERN = r'src=(["\'])(?P<url>(?:https?:)?//player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=.+?)\1' _TESTS = [{ 'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911', 'only_matching': True, @@ -22,59 +23,71 @@ class ScreenwaveMediaIE(InfoExtractor): video_id = self._match_id(url) playerdata = self._download_webpage( - 'http://player.screenwavemedia.com/play/player.php?id=%s' % video_id, + 'http://player.screenwavemedia.com/player.php?id=%s' % video_id, video_id, 'Downloading player webpage') vidtitle = self._search_regex( r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/') - vidurl = self._search_regex( - r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/') - - videolist_url = None - - mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata) - if mobj: - videoserver = mobj.group('videoserver') - mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata) - vidid = mobj.group('vidid') if mobj else video_id - videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid) - else: - mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata) - if mobj: - videolist_url = mobj.group('smil') - - if videolist_url: - videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') - formats = [] - baseurl = vidurl[:vidurl.rfind('/') + 1] - for video in videolist.findall('.//video'): - src = video.get('src') - if not src: + + playerconfig = self._download_webpage( + 'http://player.screenwavemedia.com/player.js', + video_id, 'Downloading playerconfig webpage') + + videoserver = self._search_regex(r'SWMServer\s*=\s*"([\d\.]+)"', playerdata, 'videoserver') + + sources = self._parse_json( + js_to_json( + re.sub( + r'(?s)/\*.*?\*/', '', + self._search_regex( + r"sources\s*:\s*(\[[^\]]+?\])", playerconfig, + 'sources', + ).replace( + "' + thisObj.options.videoserver + '", + videoserver + ).replace( + "' + playerVidId + '", + video_id + ) + ) + ), + video_id, fatal=False + ) + + # Fallback to hardcoded sources if JS changes again + if not sources: + self.report_warning('Falling back to a hardcoded list of streams') + sources = [{ + 'file': 'http://%s/vod/%s_%s.mp4' % (videoserver, video_id, format_id), + 'type': 'mp4', + 'label': format_label, + } for format_id, format_label in ( + ('low', '144p Low'), ('med', '160p Med'), ('high', '360p High'), ('hd1', '720p HD1'))] + sources.append({ + 'file': 'http://%s/vod/smil:%s.smil/playlist.m3u8' % (videoserver, video_id), + 'type': 'hls', + }) + + formats = [] + for source in sources: + if source['type'] == 'hls': + formats.extend(self._extract_m3u8_formats(source['file'], video_id)) + else: + file_ = source.get('file') + if not file_: continue - file_ = src.partition(':')[-1] - width = int_or_none(video.get('width')) - height = int_or_none(video.get('height')) - bitrate = int_or_none(video.get('system-bitrate'), scale=1000) - format = { - 'url': baseurl + file_, - 'format_id': src.rpartition('.')[0].rpartition('_')[-1], - } - if width or height: - format.update({ - 'tbr': bitrate, - 'width': width, - 'height': height, - }) - else: - format.update({ - 'abr': bitrate, - 'vcodec': 'none', - }) - formats.append(format) - else: - formats = [{ - 'url': vidurl, - }] + format_label = source.get('label') + format_id = self._search_regex( + r'_(.+?)\.[^.]+$', file_, 'format id', default=None) + height = int_or_none(self._search_regex( + r'^(\d+)[pP]', format_label, 'height', default=None)) + formats.append({ + 'url': source['file'], + 'format_id': format_id, + 'format': format_label, + 'ext': source.get('type'), + 'height': height, + }) self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/senateisvp.py b/youtube_dl/extractor/senateisvp.py index 9c53704ea..474ebb49b 100644 --- a/youtube_dl/extractor/senateisvp.py +++ b/youtube_dl/extractor/senateisvp.py @@ -121,9 +121,9 @@ class SenateISVPIE(InfoExtractor): 'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=', }] else: - hdcore_sign = '?hdcore=3.1.0' + hdcore_sign = 'hdcore=3.1.0' url_params = (domain, video_id, stream_num) - f4m_url = '%s/z/%s_1@%s/manifest.f4m' % url_params + hdcore_sign + f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'): # URLs without the extra param induce an 404 error diff --git a/youtube_dl/extractor/sexykarma.py b/youtube_dl/extractor/sexykarma.py index 6446d26dc..e33483674 100644 --- a/youtube_dl/extractor/sexykarma.py +++ b/youtube_dl/extractor/sexykarma.py @@ -29,6 +29,7 @@ class SexyKarmaIE(InfoExtractor): 'view_count': int, 'comment_count': int, 'categories': list, + 'age_limit': 18, } }, { 'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html', diff --git a/youtube_dl/extractor/shahid.py b/youtube_dl/extractor/shahid.py new file mode 100644 index 000000000..f76fb12c0 --- /dev/null +++ b/youtube_dl/extractor/shahid.py @@ -0,0 +1,107 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + int_or_none, + parse_iso8601, +) + + +class ShahidIE(InfoExtractor): + _VALID_URL = r'https?://shahid\.mbc\.net/ar/episode/(?P<id>\d+)/?' + _TESTS = [{ + 'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html', + 'info_dict': { + 'id': '90574', + 'ext': 'mp4', + 'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3', + 'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان', + 'duration': 2972, + 'timestamp': 1422057420, + 'upload_date': '20150123', + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + }, { + # shahid plus subscriber only + 'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html', + 'only_matching': True + }] + + def _handle_error(self, response): + if not isinstance(response, dict): + return + error = response.get('error') + if error: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())), + expected=True) + + def _download_json(self, url, video_id, note='Downloading JSON metadata'): + response = super(ShahidIE, self)._download_json(url, video_id, note)['data'] + self._handle_error(response) + return response + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + api_vars = { + 'id': video_id, + 'type': 'player', + 'url': 'http://api.shahid.net/api/v1_1', + 'playerType': 'episode', + } + + flashvars = self._search_regex( + r'var\s+flashvars\s*=\s*({[^}]+})', webpage, 'flashvars', default=None) + if flashvars: + for key in api_vars.keys(): + value = self._search_regex( + r'\b%s\s*:\s*(?P<q>["\'])(?P<value>.+?)(?P=q)' % key, + flashvars, 'type', default=None, group='value') + if value: + api_vars[key] = value + + player = self._download_json( + 'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-%s.html' + % (video_id, api_vars['type']), video_id, 'Downloading player JSON') + + formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4') + + video = self._download_json( + '%s/%s/%s?%s' % ( + api_vars['url'], api_vars['playerType'], api_vars['id'], + compat_urllib_parse.urlencode({ + 'apiKey': 'sh@hid0nlin3', + 'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=', + })), + video_id, 'Downloading video JSON') + + video = video[api_vars['playerType']] + + title = video['title'] + description = video.get('description') + thumbnail = video.get('thumbnailUrl') + duration = int_or_none(video.get('duration')) + timestamp = parse_iso8601(video.get('referenceDate')) + categories = [ + category['name'] + for category in video.get('genres', []) if 'name' in category] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'categories': categories, + 'formats': formats, + } diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py index a07677686..8eda3c864 100644 --- a/youtube_dl/extractor/shared.py +++ b/youtube_dl/extractor/shared.py @@ -3,28 +3,37 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) class SharedIE(InfoExtractor): - _VALID_URL = r'http://shared\.sx/(?P<id>[\da-z]{10})' + IE_DESC = 'shared.sx and vivo.sx' + _VALID_URL = r'http://(?:shared|vivo)\.sx/(?P<id>[\da-z]{10})' - _TEST = { + _TESTS = [{ 'url': 'http://shared.sx/0060718775', 'md5': '106fefed92a8a2adb8c98e6a0652f49b', 'info_dict': { 'id': '0060718775', 'ext': 'mp4', 'title': 'Bmp4', + 'filesize': 1720110, + }, + }, { + 'url': 'http://vivo.sx/d7ddda0e78', + 'md5': '15b3af41be0b4fe01f4df075c2678b2c', + 'info_dict': { + 'id': 'd7ddda0e78', + 'ext': 'mp4', + 'title': 'Chicken', + 'filesize': 528031, }, - } + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -35,7 +44,7 @@ class SharedIE(InfoExtractor): 'Video %s does not exist' % video_id, expected=True) download_form = self._hidden_inputs(webpage) - request = compat_urllib_request.Request( + request = sanitized_Request( url, compat_urllib_parse.urlencode(download_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/sharesix.py b/youtube_dl/extractor/sharesix.py index ac3e3adf2..f1ea9bdb2 100644 --- a/youtube_dl/extractor/sharesix.py +++ b/youtube_dl/extractor/sharesix.py @@ -4,12 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( parse_duration, + sanitized_Request, ) @@ -50,7 +48,7 @@ class ShareSixIE(InfoExtractor): 'method_free': 'Free' } post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py index 0891a441f..b2258a0f6 100644 --- a/youtube_dl/extractor/sina.py +++ b/youtube_dl/extractor/sina.py @@ -4,10 +4,8 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class SinaIE(InfoExtractor): @@ -61,7 +59,7 @@ class SinaIE(InfoExtractor): if mobj.group('token') is not None: # The video id is in the redirected url self.to_screen('Getting video id') - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.get_method = lambda: 'HEAD' (_, urlh) = self._download_webpage_handle(request, 'NA', False) return self._real_extract(urlh.geturl()) diff --git a/youtube_dl/extractor/skynewsarabia.py b/youtube_dl/extractor/skynewsarabia.py new file mode 100644 index 000000000..05e1b02ad --- /dev/null +++ b/youtube_dl/extractor/skynewsarabia.py @@ -0,0 +1,117 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + parse_iso8601, + parse_duration, +) + + +class SkyNewsArabiaBaseIE(InfoExtractor): + _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images' + + def _call_api(self, path, value): + return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value) + + def _get_limelight_media_id(self, url): + return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id') + + def _get_image_url(self, image_path_template, width='1600', height='1200'): + return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height) + + def _extract_video_info(self, video_data): + video_id = compat_str(video_data['id']) + topic = video_data.get('topicTitle') + return { + '_type': 'url_transparent', + 'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']), + 'id': video_id, + 'title': video_data['headline'], + 'description': video_data.get('summary'), + 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']), + 'timestamp': parse_iso8601(video_data.get('date')), + 'duration': parse_duration(video_data.get('runTime')), + 'tags': video_data.get('tags', []), + 'categories': [topic] if topic else [], + 'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id, + 'ie_key': 'LimelightMedia', + } + + +class SkyNewsArabiaIE(SkyNewsArabiaBaseIE): + IE_NAME = 'skynewsarabia:video' + _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3', + 'info_dict': { + 'id': '794902', + 'ext': 'flv', + 'title': 'نصف مليون مصباح على شجرة كريسماس', + 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6', + 'upload_date': '20151128', + 'timestamp': 1448697198, + 'duration': 2119, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._call_api('video', video_id) + return self._extract_video_info(video_data) + + +class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE): + IE_NAME = 'skynewsarabia:video' + _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)' + _TESTS = [{ + 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9', + 'info_dict': { + 'id': '794549', + 'ext': 'flv', + 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة', + 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f', + 'upload_date': '20151126', + 'timestamp': 1448559336, + 'duration': 281.6, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD', + 'info_dict': { + 'id': '794844', + 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن', + 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e', + }, + 'playlist_mincount': 2, + }] + + def _real_extract(self, url): + article_id = self._match_id(url) + article_data = self._call_api('article', article_id) + media_asset = article_data['mediaAsset'] + if media_asset['type'] == 'VIDEO': + topic = article_data.get('topicTitle') + return { + '_type': 'url_transparent', + 'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']), + 'id': article_id, + 'title': article_data['headline'], + 'description': article_data.get('summary'), + 'thumbnail': self._get_image_url(media_asset['imageUrl']), + 'timestamp': parse_iso8601(article_data.get('date')), + 'tags': article_data.get('tags', []), + 'categories': [topic] if topic else [], + 'webpage_url': url, + 'ie_key': 'LimelightMedia', + } + entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO'] + return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary')) diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py index 93a7cfe15..30210c8a3 100644 --- a/youtube_dl/extractor/smotri.py +++ b/youtube_dl/extractor/smotri.py @@ -7,13 +7,11 @@ import hashlib import uuid from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, unified_strdate, ) @@ -176,7 +174,7 @@ class SmotriIE(InfoExtractor): if video_password: video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest() - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') @@ -330,10 +328,7 @@ class SmotriBroadcastIE(InfoExtractor): (username, password) = self._get_login_info() if username is None: - raise ExtractorError( - 'Erotic broadcasts allowed only for registered users, ' - 'use --username and --password options to provide account credentials.', - expected=True) + self.raise_login_required('Erotic broadcasts allowed only for registered users') login_form = { 'login-hint53': '1', @@ -342,7 +337,7 @@ class SmotriBroadcastIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') broadcast_page = self._download_webpage( diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index ba2d5e19b..ea8fc258d 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -6,11 +6,11 @@ import re from .common import InfoExtractor from ..compat import ( compat_str, - compat_urllib_request, compat_urllib_parse, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -96,7 +96,7 @@ class SohuIE(InfoExtractor): else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' - req = compat_urllib_request.Request(base_data_url + vid_id) + req = sanitized_Request(base_data_url + vid_id) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: @@ -158,6 +158,7 @@ class SohuIE(InfoExtractor): 'file': clips_url[i], 'new': su[i], 'prod': 'flash', + 'rb': 1, } if cdnId is not None: diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index 118ca4832..02e64e094 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -4,13 +4,17 @@ from __future__ import unicode_literals import re import itertools -from .common import InfoExtractor +from .common import ( + InfoExtractor, + SearchInfoExtractor +) from ..compat import ( compat_str, compat_urlparse, compat_urllib_parse, ) from ..utils import ( + encode_dict, ExtractorError, int_or_none, unified_strdate, @@ -29,7 +33,7 @@ class SoundcloudIE(InfoExtractor): _VALID_URL = r'''(?x)^(?:https?://)? (?:(?:(?:www\.|m\.)?soundcloud\.com/ (?P<uploader>[\w\d-]+)/ - (?!sets/|(?:likes|tracks)/?(?:$|[?#])) + (?!(?:tracks|sets(?:/[^/?#]+)?|reposts|likes|spotlight)/?(?:$|[?#])) (?P<title>[\w\d-]+)/? (?P<token>[^?]+?)?(?:[?].*)?$) |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+) @@ -113,7 +117,7 @@ class SoundcloudIE(InfoExtractor): }, ] - _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28' + _CLIENT_ID = '02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea' _IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf' def report_resolve(self, video_id): @@ -282,69 +286,150 @@ class SoundcloudSetIE(SoundcloudIE): msgs = (compat_str(err['error_message']) for err in info['errors']) raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs)) + entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in info['tracks']] + return { '_type': 'playlist', - 'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']], + 'entries': entries, 'id': '%s' % info['id'], 'title': info['title'], } class SoundcloudUserIE(SoundcloudIE): - _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$' + _VALID_URL = r'''(?x) + https?:// + (?:(?:www|m)\.)?soundcloud\.com/ + (?P<user>[^/]+) + (?:/ + (?P<rsrc>tracks|sets|reposts|likes|spotlight) + )? + /?(?:[?#].*)?$ + ''' IE_NAME = 'soundcloud:user' _TESTS = [{ - 'url': 'https://soundcloud.com/the-concept-band', + 'url': 'https://soundcloud.com/the-akashic-chronicler', 'info_dict': { - 'id': '9615865', - 'title': 'The Royal Concept', + 'id': '114582580', + 'title': 'The Akashic Chronicler (All)', }, - 'playlist_mincount': 12 + 'playlist_mincount': 111, }, { - 'url': 'https://soundcloud.com/the-concept-band/likes', + 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks', 'info_dict': { - 'id': '9615865', - 'title': 'The Royal Concept', + 'id': '114582580', + 'title': 'The Akashic Chronicler (Tracks)', }, - 'playlist_mincount': 1, + 'playlist_mincount': 50, }, { - 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks', - 'only_matching': True, + 'url': 'https://soundcloud.com/the-akashic-chronicler/sets', + 'info_dict': { + 'id': '114582580', + 'title': 'The Akashic Chronicler (Playlists)', + }, + 'playlist_mincount': 3, + }, { + 'url': 'https://soundcloud.com/the-akashic-chronicler/reposts', + 'info_dict': { + 'id': '114582580', + 'title': 'The Akashic Chronicler (Reposts)', + }, + 'playlist_mincount': 7, + }, { + 'url': 'https://soundcloud.com/the-akashic-chronicler/likes', + 'info_dict': { + 'id': '114582580', + 'title': 'The Akashic Chronicler (Likes)', + }, + 'playlist_mincount': 321, + }, { + 'url': 'https://soundcloud.com/grynpyret/spotlight', + 'info_dict': { + 'id': '7098329', + 'title': 'Grynpyret (Spotlight)', + }, + 'playlist_mincount': 1, }] + _API_BASE = 'https://api.soundcloud.com' + _API_V2_BASE = 'https://api-v2.soundcloud.com' + + _BASE_URL_MAP = { + 'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE, + 'tracks': '%s/users/%%s/tracks' % _API_BASE, + 'sets': '%s/users/%%s/playlists' % _API_V2_BASE, + 'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE, + 'likes': '%s/users/%%s/likes' % _API_V2_BASE, + 'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE, + } + + _TITLE_MAP = { + 'all': 'All', + 'tracks': 'Tracks', + 'sets': 'Playlists', + 'reposts': 'Reposts', + 'likes': 'Likes', + 'spotlight': 'Spotlight', + } + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) uploader = mobj.group('user') - resource = mobj.group('rsrc') - if resource is None: - resource = 'tracks' - elif resource == 'likes': - resource = 'favorites' url = 'http://soundcloud.com/%s/' % uploader resolv_url = self._resolv_url(url) user = self._download_json( resolv_url, uploader, 'Downloading user info') - base_url = 'http://api.soundcloud.com/users/%s/%s.json?' % (uploader, resource) + + resource = mobj.group('rsrc') or 'all' + base_url = self._BASE_URL_MAP[resource] % user['id'] + + next_href = None entries = [] for i in itertools.count(): - data = compat_urllib_parse.urlencode({ - 'offset': i * 50, - 'limit': 50, - 'client_id': self._CLIENT_ID, - }) - new_entries = self._download_json( - base_url + data, uploader, 'Downloading track page %s' % (i + 1)) - if len(new_entries) == 0: + if not next_href: + data = compat_urllib_parse.urlencode({ + 'offset': i * 50, + 'limit': 50, + 'client_id': self._CLIENT_ID, + 'linked_partitioning': '1', + 'representation': 'speedy', + }) + next_href = base_url + '?' + data + + response = self._download_json( + next_href, uploader, 'Downloading track page %s' % (i + 1)) + + collection = response['collection'] + + if not collection: self.to_screen('%s: End page received' % uploader) break - entries.extend(self.url_result(e['permalink_url'], 'Soundcloud') for e in new_entries) + + def resolve_permalink_url(candidates): + for cand in candidates: + if isinstance(cand, dict): + permalink_url = cand.get('permalink_url') + if permalink_url and permalink_url.startswith('http'): + return permalink_url + + for e in collection: + permalink_url = resolve_permalink_url((e, e.get('track'), e.get('playlist'))) + if permalink_url: + entries.append(self.url_result(permalink_url)) + + if 'next_href' in response: + next_href = response['next_href'] + if not next_href: + break + else: + next_href = None return { '_type': 'playlist', 'id': compat_str(user['id']), - 'title': user['username'], + 'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]), 'entries': entries, } @@ -379,9 +464,7 @@ class SoundcloudPlaylistIE(SoundcloudIE): data = self._download_json( base_url + data, playlist_id, 'Downloading playlist') - entries = [ - self._extract_info_dict(t, quiet=True, secret_token=token) - for t in data['tracks']] + entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in data['tracks']] return { '_type': 'playlist', @@ -390,3 +473,60 @@ class SoundcloudPlaylistIE(SoundcloudIE): 'description': data.get('description'), 'entries': entries, } + + +class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): + IE_NAME = 'soundcloud:search' + IE_DESC = 'Soundcloud search' + _MAX_RESULTS = float('inf') + _TESTS = [{ + 'url': 'scsearch15:post-avant jazzcore', + 'info_dict': { + 'title': 'post-avant jazzcore', + }, + 'playlist_count': 15, + }] + + _SEARCH_KEY = 'scsearch' + _MAX_RESULTS_PER_PAGE = 200 + _DEFAULT_RESULTS_PER_PAGE = 50 + _API_V2_BASE = 'https://api-v2.soundcloud.com' + + def _get_collection(self, endpoint, collection_id, **query): + limit = min( + query.get('limit', self._DEFAULT_RESULTS_PER_PAGE), + self._MAX_RESULTS_PER_PAGE) + query['limit'] = limit + query['client_id'] = self._CLIENT_ID + query['linked_partitioning'] = '1' + query['offset'] = 0 + data = compat_urllib_parse.urlencode(encode_dict(query)) + next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data) + + collected_results = 0 + + for i in itertools.count(1): + response = self._download_json( + next_url, collection_id, 'Downloading page {0}'.format(i), + 'Unable to download API page') + + collection = response.get('collection', []) + if not collection: + break + + collection = list(filter(bool, collection)) + collected_results += len(collection) + + for item in collection: + yield self.url_result(item['uri'], SoundcloudIE.ie_key()) + + if not collection or collected_results >= limit: + break + + next_url = response.get('next_href') + if not next_url: + break + + def _get_n_results(self, query, n): + tracks = self._get_collection('/search/tracks', query, limit=n, q=query) + return self.playlist_result(tracks, playlist_title=query) diff --git a/youtube_dl/extractor/southpark.py b/youtube_dl/extractor/southpark.py index 7fb165a87..87b650468 100644 --- a/youtube_dl/extractor/southpark.py +++ b/youtube_dl/extractor/southpark.py @@ -45,6 +45,14 @@ class SouthParkDeIE(SouthParkIE): 'title': 'The Government Won\'t Respect My Privacy', 'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.', }, + }, { + # non-ASCII characters in initial URL + 'url': 'http://www.southpark.de/alle-episoden/s18e09-hashtag-aufwärmen', + 'playlist_count': 4, + }, { + # non-ASCII characters in redirect URL + 'url': 'http://www.southpark.de/alle-episoden/s18e09', + 'playlist_count': 4, }] diff --git a/youtube_dl/extractor/space.py b/youtube_dl/extractor/space.py index c2d0d36a6..ebb5d6ec0 100644 --- a/youtube_dl/extractor/space.py +++ b/youtube_dl/extractor/space.py @@ -3,14 +3,14 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE from ..utils import RegexNotFoundError, ExtractorError class SpaceIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html' _TEST = { - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html', 'info_dict': { 'id': '2780937028001', @@ -31,8 +31,8 @@ class SpaceIE(InfoExtractor): brightcove_url = self._og_search_video_url(webpage) except RegexNotFoundError: # Other videos works fine with the info from the object - brightcove_url = BrightcoveIE._extract_brightcove_url(webpage) + brightcove_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if brightcove_url is None: raise ExtractorError( 'The webpage does not contain a video', expected=True) - return self.url_result(brightcove_url, BrightcoveIE.ie_key()) + return self.url_result(brightcove_url, BrightcoveLegacyIE.ie_key()) diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py index 5fa6faf18..692fd78e8 100644 --- a/youtube_dl/extractor/spankwire.py +++ b/youtube_dl/extractor/spankwire.py @@ -6,9 +6,9 @@ from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, - compat_urllib_request, ) from ..utils import ( + sanitized_Request, str_to_int, unified_strdate, ) @@ -16,8 +16,9 @@ from ..aes import aes_decrypt_text class SpankwireIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)' + _TESTS = [{ + # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4 'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/', 'md5': '8bbfde12b101204b39e4b9fe7eb67095', 'info_dict': { @@ -30,14 +31,27 @@ class SpankwireIE(InfoExtractor): 'upload_date': '20070507', 'age_limit': 18, } - } + }, { + # download URL pattern: */mp4_<format_id>_<video_id>.mp4 + 'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/', + 'md5': '09b3c20833308b736ae8902db2f8d7e6', + 'info_dict': { + 'id': '1921551', + 'ext': 'mp4', + 'title': 'Titcums Compiloation I', + 'description': 'cum on tits', + 'uploader': 'dannyh78999', + 'uploader_id': '3056053', + 'upload_date': '20150822', + 'age_limit': 18, + }, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('videoid') - url = 'http://www.' + mobj.group('url') + video_id = mobj.group('id') - req = compat_urllib_request.Request(url) + req = sanitized_Request('http://www.' + mobj.group('url')) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -54,7 +68,7 @@ class SpankwireIE(InfoExtractor): r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False) uploader_id = self._html_search_regex( - r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', + r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False) upload_date = unified_strdate(self._html_search_regex( r'</a> on (.+?) at \d+:\d+', @@ -67,9 +81,10 @@ class SpankwireIE(InfoExtractor): r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>', webpage, 'comment count', fatal=False)) - video_urls = list(map( - compat_urllib_parse_unquote, - re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage))) + videos = re.findall( + r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage) + heights = [int(video[0]) for video in videos] + video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos])) if webpage.find('flashvars\.encrypted = "true"') != -1: password = self._search_regex( r'flashvars\.video_title = "([^"]+)', @@ -79,21 +94,22 @@ class SpankwireIE(InfoExtractor): video_urls)) formats = [] - for video_url in video_urls: + for height, video_url in zip(heights, video_urls): path = compat_urllib_parse_urlparse(video_url).path - format = path.split('/')[4].split('_')[:2] - resolution, bitrate_str = format - format = "-".join(format) - height = int(resolution.rstrip('Pp')) - tbr = int(bitrate_str.rstrip('Kk')) - formats.append({ + _, quality = path.split('/')[4].split('_')[:2] + f = { 'url': video_url, - 'resolution': resolution, - 'format': format, - 'tbr': tbr, 'height': height, - 'format_id': format, - }) + } + tbr = self._search_regex(r'^(\d+)[Kk]$', quality, 'tbr', default=None) + if tbr: + f.update({ + 'tbr': int(tbr), + 'format_id': '%dp' % height, + }) + else: + f['format_id'] = quality + formats.append(f) self._sort_formats(formats) age_limit = self._rta_search(webpage) diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py index b868241d5..39a7aaf9d 100644 --- a/youtube_dl/extractor/spiegel.py +++ b/youtube_dl/extractor/spiegel.py @@ -9,7 +9,7 @@ from .spiegeltv import SpiegeltvIE class SpiegelIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$' + _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$' _TESTS = [{ 'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html', 'md5': '2c2754212136f35fb4b19767d242f66e', @@ -39,6 +39,9 @@ class SpiegelIE(InfoExtractor): 'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.', 'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"', } + }, { + 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html', + 'only_matching': True, }] def _real_extract(self, url): @@ -55,7 +58,8 @@ class SpiegelIE(InfoExtractor): description = self._html_search_meta('description', webpage, 'description') base_url = self._search_regex( - r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL') + [r'server\s*:\s*(["\'])(?P<url>.+?)\1', r'var\s+server\s*=\s*"(?P<url>[^"]+)\"'], + webpage, 'server URL', group='url') xml_url = base_url + video_id + '.xml' idoc = self._download_xml(xml_url, video_id) diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py index 27f4033c5..034bd47ff 100644 --- a/youtube_dl/extractor/spiegeltv.py +++ b/youtube_dl/extractor/spiegeltv.py @@ -77,17 +77,21 @@ class SpiegeltvIE(InfoExtractor): 'rtmp_live': True, }) elif determine_ext(endpoint) == 'm3u8': - m3u8_formats = self._extract_m3u8_formats( - endpoint.replace('[video]', play_path), - video_id, 'm4v', - preference=1, # Prefer hls since it allows to workaround georestriction - m3u8_id='hls', fatal=False) - if m3u8_formats is not False: - formats.extend(m3u8_formats) + formats.append({ + 'url': endpoint.replace('[video]', play_path), + 'ext': 'm4v', + 'format_id': 'hls', # Prefer hls since it allows to workaround georestriction + 'protocol': 'm3u8', + 'preference': 1, + 'http_headers': { + 'Accept-Encoding': 'deflate', # gzip causes trouble on the server side + }, + }) else: formats.append({ 'url': endpoint, }) + self._check_formats(formats, video_id) thumbnails = [] for image in media_json['images']: diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py index 1a57aebf1..ebb75f059 100644 --- a/youtube_dl/extractor/sportdeutschland.py +++ b/youtube_dl/extractor/sportdeutschland.py @@ -4,11 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_iso8601, + sanitized_Request, ) @@ -38,10 +36,12 @@ class SportDeutschlandIE(InfoExtractor): 'upload_date': '20140825', 'description': 'md5:60a20536b57cee7d9a4ec005e8687504', 'timestamp': 1408976060, + 'duration': 2732, 'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee', 'thumbnail': 're:^https?://.*\.jpg$', 'view_count': int, 'categories': ['Li-Ning Badminton WM 2014'], + } }] @@ -50,20 +50,19 @@ class SportDeutschlandIE(InfoExtractor): video_id = mobj.group('id') sport_id = mobj.group('sport') - api_url = 'http://splink.tv/api/permalinks/%s/%s' % ( + api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % ( sport_id, video_id) - req = compat_urllib_request.Request(api_url, headers={ + req = sanitized_Request(api_url, headers={ 'Accept': 'application/vnd.vidibus.v2.html+json', 'Referer': url, }) data = self._download_json(req, video_id) - categories = list(data.get('section', {}).get('tags', {}).values()) asset = data['asset'] - assets_info = self._download_json(asset['url'], video_id) + categories = [data['section']['title']] formats = [] - smil_url = assets_info['video'] + smil_url = asset['video'] if '.smil' in smil_url: m3u8_url = smil_url.replace('.smil', '.m3u8') formats.extend( @@ -91,6 +90,7 @@ class SportDeutschlandIE(InfoExtractor): 'title': asset['title'], 'thumbnail': asset.get('image'), 'description': asset.get('teaser'), + 'duration': asset.get('duration'), 'categories': categories, 'view_count': asset.get('views'), 'rtmp_live': asset.get('live'), diff --git a/youtube_dl/extractor/srf.py b/youtube_dl/extractor/srf.py index 77eec0bc7..16e1bf2d6 100644 --- a/youtube_dl/extractor/srf.py +++ b/youtube_dl/extractor/srf.py @@ -11,7 +11,7 @@ from ..utils import ( class SrfIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})' + _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/(?:tv|radio)/[^/]+/(?P<media_type>video|audio)/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})' _TESTS = [{ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'md5': '4cd93523723beff51bb4bee974ee238d', @@ -36,6 +36,20 @@ class SrfIE(InfoExtractor): 'timestamp': 1373493600, }, }, { + 'url': 'http://www.srf.ch/play/radio/hoerspielarchiv-srf-musikwelle/audio/saegel-ohni-wind-von-jakob-stebler?id=415bf3d3-6429-4de7-968d-95866e37cfbc', + 'md5': '', + 'info_dict': { + 'id': '415bf3d3-6429-4de7-968d-95866e37cfbc', + 'display_id': 'saegel-ohni-wind-von-jakob-stebler', + 'ext': 'mp3', + 'upload_date': '20080518', + 'title': '«Sägel ohni Wind» von Jakob Stebler', + 'timestamp': 1211112000, + }, + 'params': { + 'skip_download': True, # requires rtmpdump + }, + }, { 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }, { @@ -44,11 +58,13 @@ class SrfIE(InfoExtractor): }] def _real_extract(self, url): - video_id = self._match_id(url) - display_id = re.match(self._VALID_URL, url).group('display_id') or video_id + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + media_type = mobj.group('media_type') + display_id = mobj.group('display_id') or video_id video_data = self._download_xml( - 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id, + 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/%s/play/%s.xml' % (media_type, video_id), display_id) title = xpath_text( @@ -64,7 +80,7 @@ class SrfIE(InfoExtractor): for url_node in item.findall('url'): quality = url_node.attrib['quality'] full_url = url_node.text - original_ext = determine_ext(full_url) + original_ext = determine_ext(full_url).lower() format_id = '%s-%s' % (quality, item.attrib['protocol']) if original_ext == 'f4m': formats.extend(self._extract_f4m_formats( diff --git a/youtube_dl/extractor/stitcher.py b/youtube_dl/extractor/stitcher.py new file mode 100644 index 000000000..d5c852f52 --- /dev/null +++ b/youtube_dl/extractor/stitcher.py @@ -0,0 +1,81 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + int_or_none, + js_to_json, + unescapeHTML, +) + + +class StitcherIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)' + _TESTS = [{ + 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true', + 'md5': '391dd4e021e6edeb7b8e68fbf2e9e940', + 'info_dict': { + 'id': '40789481', + 'ext': 'mp3', + 'title': 'Machine Learning Mastery and Cancer Clusters', + 'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3', + 'duration': 1604, + 'thumbnail': 're:^https?://.*\.jpg', + }, + }, { + 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true', + 'info_dict': { + 'id': '40846275', + 'display_id': 'the-rare-hourlong-comedy-plus', + 'ext': 'mp3', + 'title': "The CW's 'Crazy Ex-Girlfriend'", + 'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17', + 'duration': 2235, + 'thumbnail': 're:^https?://.*\.jpg', + }, + 'params': { + 'skip_download': True, + }, + }, { + # escaped title + 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true', + 'only_matching': True, + }, { + 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + audio_id = mobj.group('id') + display_id = mobj.group('display_id') or audio_id + + webpage = self._download_webpage(url, display_id) + + episode = self._parse_json( + js_to_json(self._search_regex( + r'(?s)var\s+stitcher\s*=\s*({.+?});\n', webpage, 'episode config')), + display_id)['config']['episode'] + + title = unescapeHTML(episode['title']) + formats = [{ + 'url': episode[episode_key], + 'ext': determine_ext(episode[episode_key]) or 'mp3', + 'vcodec': 'none', + } for episode_key in ('episodeURL',) if episode.get(episode_key)] + description = self._search_regex( + r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False) + duration = int_or_none(episode.get('duration')) + thumbnail = episode.get('episodeImage') + + return { + 'id': audio_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'duration': duration, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/youtube_dl/extractor/streamcloud.py b/youtube_dl/extractor/streamcloud.py index d4e134015..77841b946 100644 --- a/youtube_dl/extractor/streamcloud.py +++ b/youtube_dl/extractor/streamcloud.py @@ -4,10 +4,8 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class StreamcloudIE(InfoExtractor): @@ -43,7 +41,7 @@ class StreamcloudIE(InfoExtractor): headers = { b'Content-Type': b'application/x-www-form-urlencoded', } - req = compat_urllib_request.Request(url, post, headers) + req = sanitized_Request(url, post, headers) webpage = self._download_webpage( req, video_id, note='Downloading video page ...') diff --git a/youtube_dl/extractor/streamcz.py b/youtube_dl/extractor/streamcz.py index e92b93285..d3d2b7eb7 100644 --- a/youtube_dl/extractor/streamcz.py +++ b/youtube_dl/extractor/streamcz.py @@ -5,11 +5,9 @@ import hashlib import time from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, + sanitized_Request, ) @@ -54,7 +52,7 @@ class StreamCZIE(InfoExtractor): video_id = self._match_id(url) api_path = '/episode/%s' % video_id - req = compat_urllib_request.Request(self._API_URL + api_path) + req = sanitized_Request(self._API_URL + api_path) req.add_header('Api-Password', _get_api_key(api_path)) data = self._download_json(req, video_id) diff --git a/youtube_dl/extractor/tapely.py b/youtube_dl/extractor/tapely.py index f1f43d0a7..ed560bd24 100644 --- a/youtube_dl/extractor/tapely.py +++ b/youtube_dl/extractor/tapely.py @@ -4,19 +4,17 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( clean_html, ExtractorError, float_or_none, parse_iso8601, + sanitized_Request, ) class TapelyIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?' + _VALID_URL = r'https?://(?:www\.)?(?:tape\.ly|tapely\.com)/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?' _API_URL = 'http://tape.ly/showtape?id={0:}' _S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}' _SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}' @@ -42,6 +40,10 @@ class TapelyIE(InfoExtractor): 'ext': 'm4a', }, }, + { + 'url': 'https://tapely.com/my-grief-as-told-by-water', + 'only_matching': True, + }, ] def _real_extract(self, url): @@ -49,7 +51,7 @@ class TapelyIE(InfoExtractor): display_id = mobj.group('id') playlist_url = self._API_URL.format(display_id) - request = compat_urllib_request.Request(playlist_url) + request = sanitized_Request(playlist_url) request.add_header('X-Requested-With', 'XMLHttpRequest') request.add_header('Accept', 'application/json') request.add_header('Referer', url) diff --git a/youtube_dl/extractor/teachingchannel.py b/youtube_dl/extractor/teachingchannel.py index 117afa9bf..e0477382c 100644 --- a/youtube_dl/extractor/teachingchannel.py +++ b/youtube_dl/extractor/teachingchannel.py @@ -16,6 +16,7 @@ class TeachingChannelIE(InfoExtractor): 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', + 'duration': 422.255, }, 'params': { # m3u8 download diff --git a/youtube_dl/extractor/tele13.py b/youtube_dl/extractor/tele13.py new file mode 100644 index 000000000..a363b4d40 --- /dev/null +++ b/youtube_dl/extractor/tele13.py @@ -0,0 +1,81 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from .youtube import YoutubeIE +from ..utils import ( + js_to_json, + qualities, + determine_ext, +) + + +class Tele13IE(InfoExtractor): + _VALID_URL = r'^http://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)' + _TESTS = [ + { + 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', + 'md5': '4cb1fa38adcad8fea88487a078831755', + 'info_dict': { + 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', + 'ext': 'mp4', + 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda', + }, + 'params': { + # HTTP Error 404: Not Found + 'skip_download': True, + }, + }, + { + 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok', + 'md5': '867adf6a3b3fef932c68a71d70b70946', + 'info_dict': { + 'id': 'rOoKv2OMpOw', + 'ext': 'mp4', + 'title': 'Shooting star seen on 7-Sep-2015', + 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e', + 'uploader': 'Porjai Jaturongkhakun', + 'upload_date': '20150906', + 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw', + }, + 'add_ie': ['Youtube'], + } + ] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + setup_js = self._search_regex(r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", webpage, 'setup code') + sources = self._parse_json(self._search_regex(r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'), display_id, js_to_json) + + preference = qualities(['Móvil', 'SD', 'HD']) + formats = [] + urls = [] + for f in sources: + format_url = f['file'] + if format_url and format_url not in urls: + ext = determine_ext(format_url) + if ext == 'm3u8': + m3u8_formats = self._extract_m3u8_formats(format_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif YoutubeIE.suitable(format_url): + return self.url_result(format_url, 'Youtube') + else: + formats.append({ + 'url': format_url, + 'format_id': f.get('label'), + 'preference': preference(f.get('label')), + 'ext': ext, + }) + urls.append(format_url) + self._sort_formats(formats) + + return { + 'id': display_id, + 'title': self._search_regex(r'title\s*:\s*"([^"]+)"', setup_js, 'title'), + 'description': self._html_search_meta('description', webpage, 'description'), + 'thumbnail': self._search_regex(r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None), + 'formats': formats, + } diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py index a0c744fd1..2c8e9b941 100644 --- a/youtube_dl/extractor/telecinco.py +++ b/youtube_dl/extractor/telecinco.py @@ -1,26 +1,94 @@ # coding: utf-8 from __future__ import unicode_literals -from .mitele import MiTeleIE +import json +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse, + compat_urllib_parse_unquote, + compat_urlparse, +) +from ..utils import ( + get_element_by_attribute, + parse_duration, + strip_jsonp, +) -class TelecincoIE(MiTeleIE): - IE_NAME = 'telecinco.es' - _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/(?:[^/]+/)?(?P<id>.*?)\.html' + +class TelecincoIE(InfoExtractor): + IE_DESC = 'telecinco.es, cuatro.com and mediaset.es' + _VALID_URL = r'https?://www\.(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html' _TESTS = [{ 'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html', + 'md5': '5cbef3ad5ef17bf0d21570332d140729', 'info_dict': { 'id': 'MDSVID20141015_0058', 'ext': 'mp4', 'title': 'Con Martín Berasategui, hacer un bacalao al ...', 'duration': 662, }, - 'params': { - # m3u8 download - 'skip_download': True, + }, { + 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html', + 'md5': '0a5b9f3cc8b074f50a0578f823a12694', + 'info_dict': { + 'id': 'MDSVID20150916_0128', + 'ext': 'mp4', + 'title': '¿Quién es este ex futbolista con el que hablan ...', + 'duration': 79, + }, + }, { + 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html', + 'md5': 'ad1bfaaba922dd4a295724b05b68f86a', + 'info_dict': { + 'id': 'MDSVID20150513_0220', + 'ext': 'mp4', + 'title': '#DOYLACARA. Con la trata no hay trato', + 'duration': 50, }, }, { 'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html', 'only_matching': True, + }, { + 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html', + 'only_matching': True, }] + + def _real_extract(self, url): + episode = self._match_id(url) + webpage = self._download_webpage(url, episode) + embed_data_json = self._search_regex( + r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data', + ).replace('\'', '"') + embed_data = json.loads(embed_data_json) + + domain = embed_data['mediaUrl'] + if not domain.startswith('http'): + # only happens in telecinco.es videos + domain = 'http://' + domain + info_url = compat_urlparse.urljoin( + domain, + compat_urllib_parse_unquote(embed_data['flashvars']['host']) + ) + info_el = self._download_xml(info_url, episode).find('./video/info') + + video_link = info_el.find('videoUrl/link').text + token_query = compat_urllib_parse.urlencode({'id': video_link}) + token_info = self._download_json( + embed_data['flashvars']['ov_tk'] + '?' + token_query, + episode, + transform_source=strip_jsonp + ) + formats = self._extract_m3u8_formats( + token_info['tokenizedUrl'], episode, ext='mp4', entry_protocol='m3u8_native') + + return { + 'id': embed_data['videoId'], + 'display_id': episode, + 'title': info_el.find('title').text, + 'formats': formats, + 'description': get_element_by_attribute('class', 'text', webpage), + 'thumbnail': info_el.find('thumb').text, + 'duration': parse_duration(info_el.find('duration').text), + } diff --git a/youtube_dl/extractor/telegraaf.py b/youtube_dl/extractor/telegraaf.py new file mode 100644 index 000000000..6f8333cfc --- /dev/null +++ b/youtube_dl/extractor/telegraaf.py @@ -0,0 +1,35 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import remove_end + + +class TelegraafIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?telegraaf\.nl/tv/(?:[^/]+/)+(?P<id>\d+)/[^/]+\.html' + _TEST = { + 'url': 'http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html', + 'md5': '83245a9779bcc4a24454bfd53c65b6dc', + 'info_dict': { + 'id': '24353229', + 'ext': 'mp4', + 'title': 'Tikibad ontruimd wegens brand', + 'description': 'md5:05ca046ff47b931f9b04855015e163a4', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 33, + }, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + playlist_url = self._search_regex( + r"iframe\.loadPlayer\('([^']+)'", webpage, 'player') + + entries = self._extract_xspf_playlist(playlist_url, playlist_id) + title = remove_end(self._og_search_title(webpage), ' - VIDEO') + description = self._og_search_description(webpage) + + return self.playlist_result(entries, playlist_id, title, description) diff --git a/youtube_dl/extractor/tf1.py b/youtube_dl/extractor/tf1.py index 3a68eaa80..6890021cf 100644 --- a/youtube_dl/extractor/tf1.py +++ b/youtube_dl/extractor/tf1.py @@ -6,7 +6,7 @@ from .common import InfoExtractor class TF1IE(InfoExtractor): """TF1 uses the wat.tv player.""" - _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/.*?-(?P<id>\d+)(?:-\d+)?\.html' + _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/(?:[^/]+/)*(?P<id>.+?)\.html' _TESTS = [{ 'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html', 'info_dict': { @@ -22,7 +22,7 @@ class TF1IE(InfoExtractor): }, { 'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html', 'info_dict': { - 'id': '12043945', + 'id': 'le-grand-mysterioso-chuggington-7085291-739', 'ext': 'mp4', 'title': 'Le grand Mystérioso - Chuggington', 'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.', @@ -32,22 +32,24 @@ class TF1IE(InfoExtractor): # Sometimes wat serves the whole file with the --test option 'skip_download': True, }, + 'skip': 'HTTP Error 410: Gone', }, { 'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html', 'only_matching': True, }, { 'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html', 'only_matching': True, + }, { + 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html', + 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - embed_url = self._html_search_regex( - r'["\'](https?://www.wat.tv/embedframe/.*?)["\']', webpage, 'embed url') - embed_page = self._download_webpage(embed_url, video_id, - 'Downloading embed player page') - wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id') + wat_id = self._html_search_regex( + r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1', + webpage, 'wat id', group='id') wat_info = self._download_json( 'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id) return self.url_result(wat_info['media']['url'], 'Wat') diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 83d833e30..0bf6726b5 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -1,7 +1,7 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals import re -import json import time import hmac import binascii @@ -10,20 +10,79 @@ import hashlib from .common import InfoExtractor from ..compat import ( - compat_str, + compat_parse_qs, + compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, ExtractorError, - xpath_with_ns, - unsmuggle_url, + float_or_none, int_or_none, + sanitized_Request, + unsmuggle_url, + url_basename, + xpath_with_ns, ) -_x = lambda p: xpath_with_ns(p, {'smil': 'http://www.w3.org/2005/SMIL21/Language'}) +default_ns = 'http://www.w3.org/2005/SMIL21/Language' +_x = lambda p: xpath_with_ns(p, {'smil': default_ns}) -class ThePlatformIE(InfoExtractor): +class ThePlatformBaseIE(InfoExtractor): + def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'): + meta = self._download_xml(smil_url, video_id, note=note) + try: + error_msg = next( + n.attrib['abstract'] + for n in meta.findall(_x('.//smil:ref')) + if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired') + except StopIteration: + pass + else: + raise ExtractorError(error_msg, expected=True) + + formats = self._parse_smil_formats( + meta, smil_url, video_id, namespace=default_ns, + # the parameters are from syfy.com, other sites may use others, + # they also work for nbc.com + f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}, + transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src)) + + for _format in formats: + ext = determine_ext(_format['url']) + if ext == 'once': + _format['ext'] = 'mp4' + + self._sort_formats(formats) + + subtitles = self._parse_smil_subtitles(meta, default_ns) + + return formats, subtitles + + def get_metadata(self, path, video_id): + info_url = 'http://link.theplatform.com/s/%s?format=preview' % path + info = self._download_json(info_url, video_id) + + subtitles = {} + captions = info.get('captions') + if isinstance(captions, list): + for caption in captions: + lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type') + subtitles[lang] = [{ + 'ext': 'srt' if mime == 'text/srt' else 'ttml', + 'url': src, + }] + + return { + 'title': info['title'], + 'subtitles': subtitles, + 'description': info['description'], + 'thumbnail': info['defaultThumbnailUrl'], + 'duration': int_or_none(info.get('duration'), 1000), + } + + +class ThePlatformIE(ThePlatformBaseIE): _VALID_URL = r'''(?x) (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/ (?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))? @@ -67,6 +126,25 @@ class ThePlatformIE(InfoExtractor): }, { 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7', 'only_matching': True, + }, { + 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701', + 'md5': '734f3790fb5fc4903da391beeebc4836', + 'info_dict': { + 'id': 'tdy_or_siri_150701', + 'ext': 'mp4', + 'title': 'iPhone Siri’s sassy response to a math question has people talking', + 'description': 'md5:a565d1deadd5086f3331d57298ec6333', + 'duration': 83.0, + 'thumbnail': 're:^https?://.*\.jpg$', + 'timestamp': 1435752600, + 'upload_date': '20150701', + 'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"], + }, + }, { + # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 + # geo-restricted (US), HLS encrypted with AES-128 + 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', + 'only_matching': True, }] @staticmethod @@ -101,14 +179,54 @@ class ThePlatformIE(InfoExtractor): path += '/media' path += '/' + video_id + qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query) + if 'guid' in qs_dict: + webpage = self._download_webpage(url, video_id) + scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage) + feed_id = None + # feed id usually locates in the last script. + # Seems there's no pattern for the interested script filename, so + # I try one by one + for script in reversed(scripts): + feed_script = self._download_webpage( + self._proto_relative_url(script, 'http:'), + video_id, 'Downloading feed script') + feed_id = self._search_regex( + r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, + 'default feed id', default=None) + if feed_id is not None: + break + if feed_id is None: + raise ExtractorError('Unable to find feed id') + return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % ( + provider_id, feed_id, qs_dict['guid'][0])) + if smuggled_data.get('force_smil_url', False): smil_url = url + # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385) + elif '/guid/' in url: + headers = {} + source_url = smuggled_data.get('source_url') + if source_url: + headers['Referer'] = source_url + request = sanitized_Request(url, headers=headers) + webpage = self._download_webpage(request, video_id) + smil_url = self._search_regex( + r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml', + webpage, 'smil url', group='url') + path = self._search_regex( + r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path') + smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4&format=SMIL' elif mobj.group('config'): config_url = url + '&form=json' config_url = config_url.replace('swf/', 'config/') config_url = config_url.replace('onsite/', 'onsite/config/') config = self._download_json(config_url, video_id, 'Downloading config') - smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m' + if 'releaseUrl' in config: + release_url = config['releaseUrl'] + else: + release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path + smil_url = release_url + '&format=SMIL&formats=MPEG4&manifest=f4m' else: smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path @@ -116,95 +234,85 @@ class ThePlatformIE(InfoExtractor): if sig: smil_url = self._sign_url(smil_url, sig['key'], sig['secret']) - meta = self._download_xml(smil_url, video_id) - try: - error_msg = next( - n.attrib['abstract'] - for n in meta.findall(_x('.//smil:ref')) - if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired') - except StopIteration: - pass - else: - raise ExtractorError(error_msg, expected=True) + formats, subtitles = self._extract_theplatform_smil(smil_url, video_id) - info_url = 'http://link.theplatform.com/s/%s?format=preview' % path - info_json = self._download_webpage(info_url, video_id) - info = json.loads(info_json) + ret = self.get_metadata(path, video_id) + combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles) + ret.update({ + 'id': video_id, + 'formats': formats, + 'subtitles': combined_subtitles, + }) + + return ret + + +class ThePlatformFeedIE(ThePlatformBaseIE): + _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s' + _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)' + _TEST = { + # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207 + 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207', + 'md5': '22d2b84f058d3586efcd99e57d59d314', + 'info_dict': { + 'id': 'n_hardball_5biden_140207', + 'ext': 'mp4', + 'title': 'The Biden factor: will Joe run in 2016?', + 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.', + 'thumbnail': 're:^https?://.*\.jpg$', + 'upload_date': '20140208', + 'timestamp': 1391824260, + 'duration': 467.0, + 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'], + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + video_id = mobj.group('id') + provider_id = mobj.group('provider_id') + feed_id = mobj.group('feed_id') + + real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id) + feed = self._download_json(real_url, video_id) + entry = feed['entries'][0] + formats = [] subtitles = {} - captions = info.get('captions') - if isinstance(captions, list): - for caption in captions: - lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type') - subtitles[lang] = [{ - 'ext': 'srt' if mime == 'text/srt' else 'ttml', - 'url': src, - }] + first_video_id = None + duration = None + for item in entry['media$content']: + smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M' + cur_video_id = url_basename(smil_url) + if first_video_id is None: + first_video_id = cur_video_id + duration = float_or_none(item.get('plfile$duration')) + cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id) + formats.extend(cur_formats) + subtitles = self._merge_subtitles(subtitles, cur_subtitles) - head = meta.find(_x('smil:head')) - body = meta.find(_x('smil:body')) + self._sort_formats(formats) - f4m_node = body.find(_x('smil:seq//smil:video')) - if f4m_node is None: - f4m_node = body.find(_x('smil:seq/smil:video')) - if f4m_node is not None and '.f4m' in f4m_node.attrib['src']: - f4m_url = f4m_node.attrib['src'] - if 'manifest.f4m?' not in f4m_url: - f4m_url += '?' - # the parameters are from syfy.com, other sites may use others, - # they also work for nbc.com - f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3' - formats = self._extract_f4m_formats(f4m_url, video_id) - else: - formats = [] - switch = body.find(_x('smil:switch')) - if switch is None: - switch = body.find(_x('smil:par//smil:switch')) - if switch is None: - switch = body.find(_x('smil:par/smil:switch')) - if switch is None: - switch = body.find(_x('smil:par')) - if switch is not None: - base_url = head.find(_x('smil:meta')).attrib['base'] - for f in switch.findall(_x('smil:video')): - attr = f.attrib - width = int_or_none(attr.get('width')) - height = int_or_none(attr.get('height')) - vbr = int_or_none(attr.get('system-bitrate'), 1000) - format_id = '%dx%d_%dk' % (width, height, vbr) - formats.append({ - 'format_id': format_id, - 'url': base_url, - 'play_path': 'mp4:' + attr['src'], - 'ext': 'flv', - 'width': width, - 'height': height, - 'vbr': vbr, - }) - else: - switch = body.find(_x('smil:seq//smil:switch')) - if switch is None: - switch = body.find(_x('smil:seq/smil:switch')) - for f in switch.findall(_x('smil:video')): - attr = f.attrib - vbr = int_or_none(attr.get('system-bitrate'), 1000) - ext = determine_ext(attr['src']) - if ext == 'once': - ext = 'mp4' - formats.append({ - 'format_id': compat_str(vbr), - 'url': attr['src'], - 'vbr': vbr, - 'ext': ext, - }) - self._sort_formats(formats) + thumbnails = [{ + 'url': thumbnail['plfile$url'], + 'width': int_or_none(thumbnail.get('plfile$width')), + 'height': int_or_none(thumbnail.get('plfile$height')), + } for thumbnail in entry.get('media$thumbnails', [])] - return { + timestamp = int_or_none(entry.get('media$availableDate'), scale=1000) + categories = [item['media$name'] for item in entry.get('media$categories', [])] + + ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id) + subtitles = self._merge_subtitles(subtitles, ret['subtitles']) + ret.update({ 'id': video_id, - 'title': info['title'], - 'subtitles': subtitles, 'formats': formats, - 'description': info['description'], - 'thumbnail': info['defaultThumbnailUrl'], - 'duration': int_or_none(info.get('duration'), 1000), - } + 'subtitles': subtitles, + 'thumbnails': thumbnails, + 'duration': duration, + 'timestamp': timestamp, + 'categories': categories, + }) + + return ret diff --git a/youtube_dl/extractor/tlc.py b/youtube_dl/extractor/tlc.py index 13263614c..d6d038a8d 100644 --- a/youtube_dl/extractor/tlc.py +++ b/youtube_dl/extractor/tlc.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE from .discovery import DiscoveryIE from ..compat import compat_urlparse @@ -66,6 +66,6 @@ class TlcDeIE(InfoExtractor): return { '_type': 'url', - 'url': BrightcoveIE._extract_brightcove_url(iframe), - 'ie': BrightcoveIE.ie_key(), + 'url': BrightcoveLegacyIE._extract_brightcove_url(iframe), + 'ie': BrightcoveLegacyIE.ie_key(), } diff --git a/youtube_dl/extractor/toggle.py b/youtube_dl/extractor/toggle.py new file mode 100644 index 000000000..a47239952 --- /dev/null +++ b/youtube_dl/extractor/toggle.py @@ -0,0 +1,194 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + ExtractorError, + float_or_none, + int_or_none, + parse_iso8601, + sanitized_Request, +) + + +class ToggleIE(InfoExtractor): + IE_NAME = 'toggle' + _VALID_URL = r'https?://video\.toggle\.sg/(?:en|zh)/(?:series|clips|movies)/(?:[^/]+/)+(?P<id>[0-9]+)' + _TESTS = [{ + 'url': 'http://video.toggle.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115', + 'info_dict': { + 'id': '343115', + 'ext': 'mp4', + 'title': 'Lion Moms Premiere', + 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b', + 'upload_date': '20150910', + 'timestamp': 1441858274, + }, + 'params': { + 'skip_download': 'm3u8 download', + } + }, { + 'note': 'DRM-protected video', + 'url': 'http://video.toggle.sg/en/movies/dug-s-special-mission/341413', + 'info_dict': { + 'id': '341413', + 'ext': 'wvm', + 'title': 'Dug\'s Special Mission', + 'description': 'md5:e86c6f4458214905c1772398fabc93e0', + 'upload_date': '20150827', + 'timestamp': 1440644006, + }, + 'params': { + 'skip_download': 'DRM-protected wvm download', + } + }, { + # this also tests correct video id extraction + 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay', + 'url': 'http://video.toggle.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861', + 'info_dict': { + 'id': '332861', + 'ext': 'mp4', + 'title': '28th SEA Games (5 Show) - Episode 11', + 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa', + 'upload_date': '20150605', + 'timestamp': 1433480166, + }, + 'params': { + 'skip_download': 'DRM-protected wvm download', + }, + 'skip': 'm3u8 links are geo-restricted' + }, { + 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/zh/series/zero-calling-s2-hd/ep13/336367', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/en/movies/seven-days/321936', + 'only_matching': True, + }] + + _FORMAT_PREFERENCES = { + 'wvm-STBMain': -10, + 'wvm-iPadMain': -20, + 'wvm-iPhoneMain': -30, + 'wvm-Android': -40, + } + _API_USER = 'tvpapi_147' + _API_PASS = '11111' + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + url, video_id, note='Downloading video page') + + api_user = self._search_regex( + r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser', + default=self._API_USER, group='user') + api_pass = self._search_regex( + r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass', + default=self._API_PASS, group='pass') + + params = { + 'initObj': { + 'Locale': { + 'LocaleLanguage': '', + 'LocaleCountry': '', + 'LocaleDevice': '', + 'LocaleUserState': 0 + }, + 'Platform': 0, + 'SiteGuid': 0, + 'DomainID': '0', + 'UDID': '', + 'ApiUser': api_user, + 'ApiPass': api_pass + }, + 'MediaID': video_id, + 'mediaType': 0, + } + + req = sanitized_Request( + 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo', + json.dumps(params).encode('utf-8')) + info = self._download_json(req, video_id, 'Downloading video info json') + + title = info['MediaName'] + + formats = [] + for video_file in info.get('Files', []): + video_url, vid_format = video_file.get('URL'), video_file.get('Format') + if not video_url or not vid_format: + continue + ext = determine_ext(video_url) + vid_format = vid_format.replace(' ', '') + # if geo-restricted, m3u8 is inaccessible, but mp4 is okay + if ext == 'm3u8': + m3u8_formats = self._extract_m3u8_formats( + video_url, video_id, ext='mp4', m3u8_id=vid_format, + note='Downloading %s m3u8 information' % vid_format, + errnote='Failed to download %s m3u8 information' % vid_format, + fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + elif ext in ('mp4', 'wvm'): + # wvm are drm-protected files + formats.append({ + 'ext': ext, + 'url': video_url, + 'format_id': vid_format, + 'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1, + 'format_note': 'DRM-protected video' if ext == 'wvm' else None + }) + if not formats: + # Most likely because geo-blocked + raise ExtractorError('No downloadable videos found', expected=True) + self._sort_formats(formats) + + duration = int_or_none(info.get('Duration')) + description = info.get('Description') + created_at = parse_iso8601(info.get('CreationDate') or None) + + average_rating = float_or_none(info.get('Rating')) + view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter')) + like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter')) + + thumbnails = [] + for picture in info.get('Pictures', []): + if not isinstance(picture, dict): + continue + pic_url = picture.get('URL') + if not pic_url: + continue + thumbnail = { + 'url': pic_url, + } + pic_size = picture.get('PicSize', '') + m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size) + if m: + thumbnail.update({ + 'width': int(m.group('width')), + 'height': int(m.group('height')), + }) + thumbnails.append(thumbnail) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': created_at, + 'average_rating': average_rating, + 'view_count': view_count, + 'like_count': like_count, + 'thumbnails': thumbnails, + 'formats': formats, + } diff --git a/youtube_dl/extractor/trilulilu.py b/youtube_dl/extractor/trilulilu.py index 185accc4b..a800449e9 100644 --- a/youtube_dl/extractor/trilulilu.py +++ b/youtube_dl/extractor/trilulilu.py @@ -1,80 +1,103 @@ # coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + int_or_none, + parse_iso8601, +) class TriluliluIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/(?:video-[^/]+/)?(?P<id>[^/#\?]+)' - _TEST = { - 'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1', - 'md5': 'c1450a00da251e2769b74b9005601cac', + _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)' + _TESTS = [{ + 'url': 'http://www.trilulilu.ro/big-buck-bunny-1', + 'md5': '68da087b676a6196a413549212f60cc6', 'info_dict': { 'id': 'ae2899e124140b', 'ext': 'mp4', 'title': 'Big Buck Bunny', 'description': ':) pentru copilul din noi', + 'uploader_id': 'chipy', + 'upload_date': '20120304', + 'timestamp': 1330830647, + 'uploader': 'chipy', + 'view_count': int, + 'like_count': int, + 'comment_count': int, }, - } + }, { + 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta', + 'md5': '929dfb8729dc71750463af88bbbbf4a4', + 'info_dict': { + 'id': 'f299710e3c91c5', + 'ext': 'mp4', + 'title': 'Adena ft. Morreti - Inocenta', + 'description': 'pop music', + 'uploader_id': 'VEVOmixt', + 'upload_date': '20151204', + 'uploader': 'VEVOmixt', + 'timestamp': 1449187937, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + }] def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) + media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id) - if re.search(r'Fişierul nu este disponibil pentru vizionare în ţara dumneavoastră', webpage): - raise ExtractorError( - 'This video is not available in your country.', expected=True) - elif re.search('Fişierul poate fi accesat doar de către prietenii lui', webpage): + age_limit = 0 + errors = media_info.get('errors', {}) + if errors.get('friends'): raise ExtractorError('This video is private.', expected=True) + elif errors.get('geoblock'): + raise ExtractorError('This video is not available in your country.', expected=True) + elif errors.get('xxx_unlogged'): + age_limit = 18 - flashvars_str = self._search_regex( - r'block_flash_vars\s*=\s*(\{[^\}]+\})', webpage, 'flashvars', fatal=False, default=None) + media_class = media_info.get('class') + if media_class not in ('video', 'audio'): + raise ExtractorError('not a video or an audio') - if flashvars_str: - flashvars = self._parse_json(flashvars_str, display_id) - else: - raise ExtractorError( - 'This page does not contain videos', expected=True) + user = media_info.get('user', {}) - if flashvars['isMP3'] == 'true': - raise ExtractorError( - 'Audio downloads are currently not supported', expected=True) + thumbnail = media_info.get('cover_url') + if thumbnail: + thumbnail.format(width='1600', height='1200') - video_id = flashvars['hash'] - title = self._og_search_title(webpage) - thumbnail = self._og_search_thumbnail(webpage) - description = self._og_search_description(webpage, default=None) - - format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/' - 'video-formats2' % flashvars) - format_doc = self._download_xml( - format_url, video_id, - note='Downloading formats', - errnote='Error while downloading formats') - - video_url_template = ( - 'http://fs%(server)s.trilulilu.ro/stream.php?type=video' - '&source=site&hash=%(hash)s&username=%(userid)s&' - 'key=ministhebest&format=%%s&sig=&exp=' % - flashvars) - formats = [ - { - 'format_id': fnode.text.partition('-')[2], - 'url': video_url_template % fnode.text, - 'ext': fnode.text.partition('-')[0] - } - - for fnode in format_doc.findall('./formats/format') - ] + # TODO: get correct ext for audio files + stream_type = media_info.get('stream_type') + formats = [{ + 'url': media_info['href'], + 'ext': stream_type, + }] + if media_info.get('is_hd'): + formats.append({ + 'format_id': 'hd', + 'url': media_info['hrefhd'], + 'ext': stream_type, + }) + if media_class == 'audio': + formats[0]['vcodec'] = 'none' + else: + formats[0]['format_id'] = 'sd' return { - 'id': video_id, + 'id': media_info['identifier'].split('|')[1], 'display_id': display_id, 'formats': formats, - 'title': title, - 'description': description, + 'title': media_info['title'], + 'description': media_info.get('description'), 'thumbnail': thumbnail, + 'uploader_id': user.get('username'), + 'uploader': user.get('fullname'), + 'timestamp': parse_iso8601(media_info.get('published'), ' '), + 'duration': int_or_none(media_info.get('duration')), + 'view_count': int_or_none(media_info.get('count_views')), + 'like_count': int_or_none(media_info.get('count_likes')), + 'comment_count': int_or_none(media_info.get('count_comments')), + 'age_limit': age_limit, } diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py index c9cb69333..46ef61ff5 100644 --- a/youtube_dl/extractor/tube8.py +++ b/youtube_dl/extractor/tube8.py @@ -4,12 +4,10 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse_urlparse from ..utils import ( int_or_none, + sanitized_Request, str_to_int, ) from ..aes import aes_decrypt_text @@ -42,7 +40,7 @@ class Tube8IE(InfoExtractor): video_id = mobj.group('id') display_id = mobj.group('display_id') - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, display_id) diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py index 2c4b21807..6d78b5dfe 100644 --- a/youtube_dl/extractor/tubitv.py +++ b/youtube_dl/extractor/tubitv.py @@ -5,13 +5,11 @@ import codecs import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -44,7 +42,7 @@ class TubiTvIE(InfoExtractor): 'password': password, } payload = compat_urllib_parse.urlencode(form_data).encode('utf-8') - request = compat_urllib_request.Request(self._LOGIN_URL, payload) + request = sanitized_Request(self._LOGIN_URL, payload) request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_page = self._download_webpage( request, None, False, 'Wrong login info') @@ -60,9 +58,7 @@ class TubiTvIE(InfoExtractor): webpage = self._download_webpage(url, video_id) if re.search(r"<(?:DIV|div) class='login-required-screen'>", webpage): - raise ExtractorError( - 'This video requires login, use --username and --password ' - 'options to provide account credentials.', expected=True) + self.raise_login_required('This video requires login') title = self._og_search_title(webpage) description = self._og_search_description(webpage) diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index c89de5ba4..5f7ac4b35 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -2,14 +2,12 @@ from __future__ import unicode_literals -import re -import json - from .common import InfoExtractor +from ..compat import compat_str class TudouIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/.*?/(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])' + _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/([^/]+/)*(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])' _TESTS = [{ 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', 'md5': '140a49ed444bd22f93330985d8475fcb', @@ -27,35 +25,41 @@ class TudouIE(InfoExtractor): 'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012', 'thumbnail': 're:^https?://.*\.jpg$', } + }, { + 'url': 'http://www.tudou.com/albumplay/cJAHGih4yYg.html', + 'only_matching': True, }] - def _url_for_id(self, id, quality=None): - info_url = "http://v2.tudou.com/f?id=" + str(id) + _PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf' + + def _url_for_id(self, video_id, quality=None): + info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id) if quality: info_url += '&hd' + quality - webpage = self._download_webpage(info_url, id, "Opening the info webpage") - final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url') + xml_data = self._download_xml(info_url, video_id, "Opening the info XML page") + final_url = xml_data.text return final_url def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage) - if m and m.group(1): - return { - '_type': 'url', - 'url': 'youku:' + m.group(1), - 'ie_key': 'Youku' - } + youku_vcode = self._search_regex( + r'vcode\s*:\s*[\'"]([^\'"]*)[\'"]', webpage, 'youku vcode', default=None) + if youku_vcode: + return self.url_result('youku:' + youku_vcode, ie='Youku') title = self._search_regex( - r",kw:\s*['\"](.+?)[\"']", webpage, 'title') + r',kw\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'title') thumbnail_url = self._search_regex( - r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False) + r',pic\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'thumbnail URL', fatal=False) + + player_url = self._search_regex( + r'playerUrl\s*:\s*[\'"]([^\'"]+\.swf)[\'"]', + webpage, 'player URL', default=self._PLAYER_URL) - segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments') - segments = json.loads(segs_json) + segments = self._parse_json(self._search_regex( + r'segs: \'([^\']+)\'', webpage, 'segments'), video_id) # It looks like the keys are the arguments that have to be passed as # the hd field in the request url, we pick the higher # Also, filter non-number qualities (see issue #3643). @@ -76,6 +80,9 @@ class TudouIE(InfoExtractor): 'ext': ext, 'title': title, 'thumbnail': thumbnail_url, + 'http_headers': { + 'Referer': player_url, + }, } result.append(part_info) diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py index 3d3b635e4..4f844706d 100644 --- a/youtube_dl/extractor/tumblr.py +++ b/youtube_dl/extractor/tumblr.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..utils import int_or_none class TumblrIE(InfoExtractor): @@ -29,6 +30,19 @@ class TumblrIE(InfoExtractor): 'thumbnail': 're:http://.*\.jpg', } }, { + 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video', + 'md5': '7ae503065ad150122dc3089f8cf1546c', + 'info_dict': { + 'id': '130323439814', + 'ext': 'mp4', + 'title': 'HD Video Testing \u2014 Test description for my HD video', + 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c', + 'thumbnail': 're:http://.*\.jpg', + }, + 'params': { + 'format': 'hd', + }, + }, { 'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching', 'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab', 'info_dict': { @@ -37,6 +51,9 @@ class TumblrIE(InfoExtractor): 'title': 'naked smoking & stretching', 'upload_date': '20150506', 'timestamp': 1430931613, + 'age_limit': 18, + 'uploader_id': '1638622', + 'uploader': 'naked-yogi', }, 'add_ie': ['Vidme'], }, { @@ -66,10 +83,38 @@ class TumblrIE(InfoExtractor): if iframe_url is None: return self.url_result(urlh.geturl(), 'Generic') - iframe = self._download_webpage(iframe_url, video_id, - 'Downloading iframe page') - video_url = self._search_regex(r'<source src="([^"]+)"', - iframe, 'video url') + iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page') + + duration = None + sources = [] + + sd_url = self._search_regex( + r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe, + 'sd video url', default=None, group='url') + if sd_url: + sources.append((sd_url, 'sd')) + + options = self._parse_json( + self._search_regex( + r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe, + 'hd video url', default='', group='options'), + video_id, fatal=False) + if options: + duration = int_or_none(options.get('duration')) + hd_url = options.get('hdUrl') + if hd_url: + sources.append((hd_url, 'hd')) + + formats = [{ + 'url': video_url, + 'ext': 'mp4', + 'format_id': format_id, + 'height': int_or_none(self._search_regex( + r'/(\d{3,4})$', video_url, 'height', default=None)), + 'quality': quality, + } for quality, (video_url, format_id) in enumerate(sources)] + + self._sort_formats(formats) # The only place where you can get a title, it's not complete, # but searching in other places doesn't work for all videos @@ -79,9 +124,9 @@ class TumblrIE(InfoExtractor): return { 'id': video_id, - 'url': video_url, - 'ext': 'mp4', 'title': video_title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), + 'duration': duration, + 'formats': formats, } diff --git a/youtube_dl/extractor/tutv.py b/youtube_dl/extractor/tutv.py index fad720b68..822372ea1 100644 --- a/youtube_dl/extractor/tutv.py +++ b/youtube_dl/extractor/tutv.py @@ -10,10 +10,10 @@ class TutvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P<id>[^/?]+)' _TEST = { 'url': 'http://tu.tv/videos/robots-futbolistas', - 'md5': '627c7c124ac2a9b5ab6addb94e0e65f7', + 'md5': '0cd9e28ad270488911b0d2a72323395d', 'info_dict': { 'id': '2973058', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Robots futbolistas', }, } diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py index 79863e781..b4683de54 100644 --- a/youtube_dl/extractor/tvplay.py +++ b/youtube_dl/extractor/tvplay.py @@ -104,6 +104,7 @@ class TVPlayIE(InfoExtractor): 'duration': 1492, 'timestamp': 1330522854, 'upload_date': '20120229', + 'age_limit': 18, }, 'params': { # rtmp download diff --git a/youtube_dl/extractor/tweakers.py b/youtube_dl/extractor/tweakers.py index c80ec15cf..f3198fb85 100644 --- a/youtube_dl/extractor/tweakers.py +++ b/youtube_dl/extractor/tweakers.py @@ -1,19 +1,13 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import ( - xpath_text, - xpath_with_ns, - int_or_none, - float_or_none, -) class TweakersIE(InfoExtractor): _VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)' _TEST = { 'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html', - 'md5': '1b5afa817403bb5baa08359dca31e6df', + 'md5': '3147e4ddad366f97476a93863e4557c8', 'info_dict': { 'id': '9926', 'ext': 'mp4', @@ -25,41 +19,7 @@ class TweakersIE(InfoExtractor): } def _real_extract(self, url): - video_id = self._match_id(url) - - playlist = self._download_xml( - 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % video_id, - video_id) - - NS_MAP = { - 'xspf': 'http://xspf.org/ns/0/', - 's1': 'http://static.streamone.nl/player/ns/0', - } - - track = playlist.find(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)) - - title = xpath_text( - track, xpath_with_ns('./xspf:title', NS_MAP), 'title') - description = xpath_text( - track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description') - thumbnail = xpath_text( - track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail') - duration = float_or_none( - xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), - 1000) - - formats = [{ - 'url': location.text, - 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)), - 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))), - 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))), - } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))] - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'duration': duration, - 'formats': formats, - } + playlist_id = self._match_id(url) + entries = self._extract_xspf_playlist( + 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % playlist_id, playlist_id) + return self.playlist_result(entries, playlist_id) diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py index 73ce335b7..69882da63 100644 --- a/youtube_dl/extractor/twitch.py +++ b/youtube_dl/extractor/twitch.py @@ -7,13 +7,19 @@ import random from .common import InfoExtractor from ..compat import ( + compat_parse_qs, compat_str, compat_urllib_parse, - compat_urllib_request, + compat_urllib_parse_urlparse, + compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, + int_or_none, + parse_duration, parse_iso8601, + sanitized_Request, ) @@ -22,8 +28,7 @@ class TwitchBaseIE(InfoExtractor): _API_BASE = 'https://api.twitch.tv' _USHER_BASE = 'http://usher.twitch.tv' - _LOGIN_URL = 'https://secure.twitch.tv/login' - _LOGIN_POST_URL = 'https://passport.twitch.tv/authorize' + _LOGIN_URL = 'http://www.twitch.tv/login' _NETRC_MACHINE = 'twitch' def _handle_error(self, response): @@ -43,7 +48,7 @@ class TwitchBaseIE(InfoExtractor): for cookie in self._downloader.cookiejar: if cookie.name == 'api_token': headers['Twitch-Api-Token'] = cookie.value - request = compat_urllib_request.Request(url, headers=headers) + request = sanitized_Request(url, headers=headers) response = super(TwitchBaseIE, self)._download_json(request, video_id, note) self._handle_error(response) return response @@ -56,19 +61,28 @@ class TwitchBaseIE(InfoExtractor): if username is None: return - login_page = self._download_webpage( + login_page, handle = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ - 'login': username.encode('utf-8'), - 'password': password.encode('utf-8'), + 'username': username, + 'password': password, }) - request = compat_urllib_request.Request( - self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) - request.add_header('Referer', self._LOGIN_URL) + redirect_url = handle.geturl() + + post_url = self._search_regex( + r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, + 'post url', default=redirect_url, group='url') + + if not post_url.startswith('http'): + post_url = compat_urlparse.urljoin(redirect_url, post_url) + + request = sanitized_Request( + post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8')) + request.add_header('Referer', redirect_url) response = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -129,14 +143,14 @@ class TwitchItemBaseIE(TwitchBaseIE): def _extract_info(self, info): return { 'id': info['_id'], - 'title': info['title'], - 'description': info['description'], - 'duration': info['length'], - 'thumbnail': info['preview'], - 'uploader': info['channel']['display_name'], - 'uploader_id': info['channel']['name'], - 'timestamp': parse_iso8601(info['recorded_at']), - 'view_count': info['views'], + 'title': info.get('title') or 'Untitled Broadcast', + 'description': info.get('description'), + 'duration': int_or_none(info.get('length')), + 'thumbnail': info.get('preview'), + 'uploader': info.get('channel', {}).get('display_name'), + 'uploader_id': info.get('channel', {}).get('name'), + 'timestamp': parse_iso8601(info.get('recorded_at')), + 'view_count': int_or_none(info.get('views')), } def _real_extract(self, url): @@ -184,8 +198,8 @@ class TwitchVodIE(TwitchItemBaseIE): _ITEM_TYPE = 'vod' _ITEM_SHORTCUT = 'v' - _TEST = { - 'url': 'http://www.twitch.tv/riotgames/v/6528877', + _TESTS = [{ + 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s', 'info_dict': { 'id': 'v6528877', 'ext': 'mp4', @@ -197,25 +211,61 @@ class TwitchVodIE(TwitchItemBaseIE): 'uploader': 'Riot Games', 'uploader_id': 'riotgames', 'view_count': int, + 'start_time': 310, }, 'params': { # m3u8 download 'skip_download': True, }, - } + }, { + # Untitled broadcast (title is None) + 'url': 'http://www.twitch.tv/belkao_o/v/11230755', + 'info_dict': { + 'id': 'v11230755', + 'ext': 'mp4', + 'title': 'Untitled Broadcast', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 1638, + 'timestamp': 1439746708, + 'upload_date': '20150816', + 'uploader': 'BelkAO_o', + 'uploader_id': 'belkao_o', + 'view_count': int, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }] def _real_extract(self, url): item_id = self._match_id(url) + info = self._download_info(self._ITEM_SHORTCUT, item_id) access_token = self._download_json( '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id, 'Downloading %s access token' % self._ITEM_TYPE) + formats = self._extract_m3u8_formats( - '%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true' - % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']), + '%s/vod/%s?%s' % ( + self._USHER_BASE, item_id, + compat_urllib_parse.urlencode({ + 'allow_source': 'true', + 'allow_spectre': 'true', + 'player': 'twitchweb', + 'nauth': access_token['token'], + 'nauthsig': access_token['sig'], + })), item_id, 'mp4') + self._prefer_source(formats) info['formats'] = formats + + parsed_url = compat_urllib_parse_urlparse(url) + query = compat_parse_qs(parsed_url.query) + if 't' in query: + info['start_time'] = parse_duration(query['t'][0]) + return info diff --git a/youtube_dl/extractor/twitter.py b/youtube_dl/extractor/twitter.py index 1aaa06305..a161f046b 100644 --- a/youtube_dl/extractor/twitter.py +++ b/youtube_dl/extractor/twitter.py @@ -1,28 +1,73 @@ +# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( float_or_none, - unescapeHTML, + xpath_text, + remove_end, + int_or_none, + ExtractorError, + sanitized_Request, ) class TwitterCardIE(InfoExtractor): + IE_NAME = 'twitter:card' _VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)' - _TEST = { - 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', - 'md5': 'a74f50b310c83170319ba16de6955192', - 'info_dict': { - 'id': '560070183650213889', - 'ext': 'mp4', - 'title': 'TwitterCard', - 'thumbnail': 're:^https?://.*\.jpg$', - 'duration': 30.033, + _TESTS = [ + { + 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', + 'md5': '4fa26a35f9d1bf4b646590ba8e84be19', + 'info_dict': { + 'id': '560070183650213889', + 'ext': 'mp4', + 'title': 'TwitterCard', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 30.033, + } }, - } + { + 'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768', + 'md5': '7ee2a553b63d1bccba97fbed97d9e1c8', + 'info_dict': { + 'id': '623160978427936768', + 'ext': 'mp4', + 'title': 'TwitterCard', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 80.155, + }, + }, + { + 'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977', + 'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814', + 'info_dict': { + 'id': 'dq4Oj5quskI', + 'ext': 'mp4', + 'title': 'Ubuntu 11.10 Overview', + 'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/', + 'upload_date': '20111013', + 'uploader': 'OMG! Ubuntu!', + 'uploader_id': 'omgubuntu', + }, + 'add_ie': ['Youtube'], + }, + { + 'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568', + 'md5': 'ab2745d0b0ce53319a534fccaa986439', + 'info_dict': { + 'id': 'iBb2x00UVlv', + 'ext': 'mp4', + 'upload_date': '20151113', + 'uploader_id': '1189339351084113920', + 'uploader': '@ArsenalTerje', + 'title': 'Vine by @ArsenalTerje', + }, + 'add_ie': ['Vine'], + } + ] def _real_extract(self, url): video_id = self._match_id(url) @@ -36,14 +81,28 @@ class TwitterCardIE(InfoExtractor): config = None formats = [] for user_agent in USER_AGENTS: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('User-Agent', user_agent) webpage = self._download_webpage(request, video_id) - config = self._parse_json( - unescapeHTML(self._search_regex( - r'data-player-config="([^"]+)"', webpage, 'data player config')), + iframe_url = self._html_search_regex( + r'<iframe[^>]+src="((?:https?:)?//(?:www.youtube.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"', + webpage, 'video iframe', default=None) + if iframe_url: + return self.url_result(iframe_url) + + config = self._parse_json(self._html_search_regex( + r'data-player-config="([^"]+)"', webpage, 'data player config'), video_id) + if 'playlist' not in config: + if 'vmapUrl' in config: + vmap_data = self._download_xml(config['vmapUrl'], video_id) + video_url = xpath_text(vmap_data, './/MediaFile').strip() + formats.append({ + 'url': video_url, + }) + break # same video regardless of UA + continue video_url = config['playlist'][0]['source'] @@ -70,3 +129,100 @@ class TwitterCardIE(InfoExtractor): 'duration': duration, 'formats': formats, } + + +class TwitterIE(InfoExtractor): + IE_NAME = 'twitter' + _VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)' + _TEMPLATE_URL = 'https://twitter.com/%s/status/%s' + + _TESTS = [{ + 'url': 'https://twitter.com/freethenipple/status/643211948184596480', + 'md5': 'db6612ec5d03355953c3ca9250c97e5e', + 'info_dict': { + 'id': '643211948184596480', + 'ext': 'mp4', + 'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 12.922, + 'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"', + 'uploader': 'FREE THE NIPPLE', + 'uploader_id': 'freethenipple', + }, + }, { + 'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1', + 'md5': 'f36dcd5fb92bf7057f155e7d927eeb42', + 'info_dict': { + 'id': '657991469417025536', + 'ext': 'mp4', + 'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai', + 'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"', + 'thumbnail': 're:^https?://.*\.png', + 'uploader': 'Gifs', + 'uploader_id': 'giphz', + }, + }, { + 'url': 'https://twitter.com/starwars/status/665052190608723968', + 'md5': '39b7199856dee6cd4432e72c74bc69d4', + 'info_dict': { + 'id': '665052190608723968', + 'ext': 'mp4', + 'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.', + 'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."', + 'uploader_id': 'starwars', + 'uploader': 'Star Wars', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + user_id = mobj.group('user_id') + twid = mobj.group('id') + + webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid) + + username = remove_end(self._og_search_title(webpage), ' on Twitter') + + title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”') + + # strip 'https -_t.co_BJYgOjSeGA' junk from filenames + title = re.sub(r'\s+(https?://[^ ]+)', '', title) + + info = { + 'uploader_id': user_id, + 'uploader': username, + 'webpage_url': url, + 'description': '%s on Twitter: "%s"' % (username, description), + 'title': username + ' - ' + title, + } + + card_id = self._search_regex( + r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url', default=None) + if card_id: + card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id + info.update({ + '_type': 'url_transparent', + 'ie_key': 'TwitterCard', + 'url': card_url, + }) + return info + + mobj = re.search(r'''(?x) + <video[^>]+class="animated-gif"[^>]+ + (?:data-height="(?P<height>\d+)")?[^>]+ + (?:data-width="(?P<width>\d+)")?[^>]+ + (?:poster="(?P<poster>[^"]+)")?[^>]*>\s* + <source[^>]+video-src="(?P<url>[^"]+)" + ''', webpage) + + if mobj: + info.update({ + 'id': twid, + 'url': mobj.group('url'), + 'height': int_or_none(mobj.group('height')), + 'width': int_or_none(mobj.group('width')), + 'thumbnail': mobj.group('poster'), + }) + return info + + raise ExtractorError('There\'s not video in this tweet.') diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py index 4a0eaf65f..59832b1ec 100644 --- a/youtube_dl/extractor/udemy.py +++ b/youtube_dl/extractor/udemy.py @@ -1,14 +1,16 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..compat import ( + compat_HTTPError, compat_urllib_parse, compat_urllib_request, ) from ..utils import ( ExtractorError, + float_or_none, + int_or_none, + sanitized_Request, ) @@ -17,6 +19,8 @@ class UdemyIE(InfoExtractor): _VALID_URL = r'https?://www\.udemy\.com/(?:[^#]+#/lecture/|lecture/view/?\?lectureId=)(?P<id>\d+)' _LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1' _ORIGIN_URL = 'https://www.udemy.com' + _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<' + _ALREADY_ENROLLED = '>You are already taking this course.<' _NETRC_MACHINE = 'udemy' _TESTS = [{ @@ -32,6 +36,29 @@ class UdemyIE(InfoExtractor): 'skip': 'Requires udemy account credentials', }] + def _enroll_course(self, webpage, course_id): + enroll_url = self._search_regex( + r'href=(["\'])(?P<url>https?://(?:www\.)?udemy\.com/course/subscribe/.+?)\1', + webpage, 'enroll url', group='url', + default='https://www.udemy.com/course/subscribe/?courseId=%s' % course_id) + webpage = self._download_webpage(enroll_url, course_id, 'Enrolling in the course') + if self._SUCCESSFULLY_ENROLLED in webpage: + self.to_screen('%s: Successfully enrolled in' % course_id) + elif self._ALREADY_ENROLLED in webpage: + self.to_screen('%s: Already enrolled in' % course_id) + + def _download_lecture(self, course_id, lecture_id): + return self._download_json( + 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % ( + course_id, lecture_id, compat_urllib_parse.urlencode({ + 'video_only': '', + 'auto_play': '', + 'fields[lecture]': 'title,description,asset', + 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data', + 'instructorPreviewMode': 'False', + })), + lecture_id, 'Downloading lecture JSON') + def _handle_error(self, response): if not isinstance(response, dict): return @@ -53,12 +80,13 @@ class UdemyIE(InfoExtractor): headers['X-Udemy-Client-Id'] = cookie.value elif cookie.name == 'access_token': headers['X-Udemy-Bearer-Token'] = cookie.value + headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value if isinstance(url_or_request, compat_urllib_request.Request): for header, value in headers.items(): url_or_request.add_header(header, value) else: - url_or_request = compat_urllib_request.Request(url_or_request, headers=headers) + url_or_request = sanitized_Request(url_or_request, headers=headers) response = super(UdemyIE, self)._download_json(url_or_request, video_id, note) self._handle_error(response) @@ -70,9 +98,7 @@ class UdemyIE(InfoExtractor): def _login(self): (username, password) = self._get_login_info() if username is None: - raise ExtractorError( - 'Udemy account is required, use --username and --password options to provide account credentials.', - expected=True) + return login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') @@ -91,7 +117,7 @@ class UdemyIE(InfoExtractor): 'password': password.encode('utf-8'), }) - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Referer', self._ORIGIN_URL) request.add_header('Origin', self._ORIGIN_URL) @@ -110,44 +136,76 @@ class UdemyIE(InfoExtractor): def _real_extract(self, url): lecture_id = self._match_id(url) - lecture = self._download_json( - 'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id, - lecture_id, 'Downloading lecture JSON') + webpage = self._download_webpage(url, lecture_id) + + course_id = self._search_regex( + r'data-course-id=["\'](\d+)', webpage, 'course id') + + try: + lecture = self._download_lecture(course_id, lecture_id) + except ExtractorError as e: + # Error could possibly mean we are not enrolled in the course + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: + self._enroll_course(webpage, course_id) + lecture_id = self._download_lecture(course_id, lecture_id) + else: + raise + + title = lecture['title'] + description = lecture.get('description') + + asset = lecture['asset'] - asset_type = lecture.get('assetType') or lecture.get('asset_type') + asset_type = asset.get('assetType') or asset.get('asset_type') if asset_type != 'Video': raise ExtractorError( 'Lecture %s is not a video' % lecture_id, expected=True) - asset = lecture['asset'] - stream_url = asset.get('streamUrl') or asset.get('stream_url') - mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url) - if mobj: - return self.url_result(mobj.group(1), 'Youtube') + if stream_url: + youtube_url = self._search_regex( + r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None) + if youtube_url: + return self.url_result(youtube_url, 'Youtube') video_id = asset['id'] thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url') - duration = asset['data']['duration'] - - download_url = asset.get('downloadUrl') or asset.get('download_url') - - video = download_url.get('Video') or download_url.get('video') - video_480p = download_url.get('Video480p') or download_url.get('video_480p') - - formats = [ - { - 'url': video_480p[0], - 'format_id': '360p', - }, - { - 'url': video[0], - 'format_id': '720p', - }, - ] - - title = lecture['title'] - description = lecture['description'] + duration = float_or_none(asset.get('data', {}).get('duration')) + outputs = asset.get('data', {}).get('outputs', {}) + + formats = [] + for format_ in asset.get('download_urls', {}).get('Video', []): + video_url = format_.get('file') + if not video_url: + continue + format_id = format_.get('label') + f = { + 'url': format_['file'], + 'height': int_or_none(format_id), + } + if format_id: + # Some videos contain additional metadata (e.g. + # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208) + output = outputs.get(format_id) + if isinstance(output, dict): + f.update({ + 'format_id': '%sp' % (output.get('label') or format_id), + 'width': int_or_none(output.get('width')), + 'height': int_or_none(output.get('height')), + 'vbr': int_or_none(output.get('video_bitrate_in_kbps')), + 'vcodec': output.get('video_codec'), + 'fps': int_or_none(output.get('frame_rate')), + 'abr': int_or_none(output.get('audio_bitrate_in_kbps')), + 'acodec': output.get('audio_codec'), + 'asr': int_or_none(output.get('audio_sample_rate')), + 'tbr': int_or_none(output.get('total_bitrate_in_kbps')), + 'filesize': int_or_none(output.get('file_size_in_bytes')), + }) + else: + f['format_id'] = '%sp' % format_id + formats.append(f) + + self._sort_formats(formats) return { 'id': video_id, @@ -161,9 +219,7 @@ class UdemyIE(InfoExtractor): class UdemyCourseIE(UdemyIE): IE_NAME = 'udemy:course' - _VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)' - _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<' - _ALREADY_ENROLLED = '>You are already taking this course.<' + _VALID_URL = r'https?://www\.udemy\.com/(?P<id>[\da-z-]+)' _TESTS = [] @classmethod @@ -171,24 +227,18 @@ class UdemyCourseIE(UdemyIE): return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - course_path = mobj.group('coursepath') + course_path = self._match_id(url) + + webpage = self._download_webpage(url, course_path) response = self._download_json( 'https://www.udemy.com/api-1.1/courses/%s' % course_path, course_path, 'Downloading course JSON') - course_id = int(response['id']) - course_title = response['title'] - - webpage = self._download_webpage( - 'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id, - course_id, 'Enrolling in the course') + course_id = response['id'] + course_title = response.get('title') - if self._SUCCESSFULLY_ENROLLED in webpage: - self.to_screen('%s: Successfully enrolled in' % course_id) - elif self._ALREADY_ENROLLED in webpage: - self.to_screen('%s: Already enrolled in' % course_id) + self._enroll_course(webpage, course_id) response = self._download_json( 'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id, diff --git a/youtube_dl/extractor/udn.py b/youtube_dl/extractor/udn.py index 2151f8338..ee35b7227 100644 --- a/youtube_dl/extractor/udn.py +++ b/youtube_dl/extractor/udn.py @@ -12,7 +12,8 @@ from ..compat import compat_urlparse class UDNEmbedIE(InfoExtractor): IE_DESC = '聯合影音' - _VALID_URL = r'https?://video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' + _PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' + _VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL _TESTS = [{ 'url': 'http://video.udn.com/embed/news/300040', 'md5': 'de06b4c90b042c128395a88f0384817e', diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py index c39c278ab..73b05ecab 100644 --- a/youtube_dl/extractor/ustream.py +++ b/youtube_dl/extractor/ustream.py @@ -1,17 +1,20 @@ from __future__ import unicode_literals -import json import re from .common import InfoExtractor from ..compat import ( compat_urlparse, ) -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + int_or_none, + float_or_none, +) class UstreamIE(InfoExtractor): - _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)' + _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)' IE_NAME = 'ustream' _TESTS = [{ 'url': 'http://www.ustream.tv/recorded/20274954', @@ -19,8 +22,12 @@ class UstreamIE(InfoExtractor): 'info_dict': { 'id': '20274954', 'ext': 'flv', - 'uploader': 'Young Americans for Liberty', 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', + 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM', + 'timestamp': 1328577035, + 'upload_date': '20120207', + 'uploader': 'yaliberty', + 'uploader_id': '6780869', }, }, { # From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444 @@ -32,20 +39,21 @@ class UstreamIE(InfoExtractor): 'ext': 'flv', 'title': '-CG11- Canada Games Figure Skating', 'uploader': 'sportscanadatv', - } + }, + 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.', }] def _real_extract(self, url): m = re.match(self._VALID_URL, url) - video_id = m.group('videoID') + video_id = m.group('id') # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990) if m.group('type') == 'embed/recorded': - video_id = m.group('videoID') + video_id = m.group('id') desktop_url = 'http://www.ustream.tv/recorded/' + video_id return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': - video_id = m.group('videoID') + video_id = m.group('id') webpage = self._download_webpage(url, video_id) desktop_video_id = self._html_search_regex( r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id') @@ -53,52 +61,50 @@ class UstreamIE(InfoExtractor): return self.url_result(desktop_url, 'Ustream') params = self._download_json( - 'http://cdngw.ustream.tv/rgwjson/Viewer.getVideo/' + json.dumps({ - 'brandId': 1, - 'videoId': int(video_id), - 'autoplay': False, - }), video_id) - - if 'error' in params: - raise ExtractorError(params['error']['message'], expected=True) - - video_url = params['flv'] + 'https://api.ustream.tv/videos/%s.json' % video_id, video_id) - webpage = self._download_webpage(url, video_id) + error = params.get('error') + if error: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error), expected=True) - self.report_extraction(video_id) + video = params['video'] - video_title = self._html_search_regex(r'data-title="(?P<title>.+)"', - webpage, 'title', default=None) + title = video['title'] + filesize = float_or_none(video.get('file_size')) - if not video_title: - try: - video_title = params['moduleConfig']['meta']['title'] - except KeyError: - pass - - if not video_title: - video_title = 'Ustream video ' + video_id + formats = [{ + 'id': video_id, + 'url': video_url, + 'ext': format_id, + 'filesize': filesize, + } for format_id, video_url in video['media_urls'].items()] + self._sort_formats(formats) - uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>', - webpage, 'uploader', fatal=False, flags=re.DOTALL, default=None) + description = video.get('description') + timestamp = int_or_none(video.get('created_at')) + duration = float_or_none(video.get('length')) + view_count = int_or_none(video.get('views')) - if not uploader: - try: - uploader = params['moduleConfig']['meta']['userName'] - except KeyError: - uploader = None + uploader = video.get('owner', {}).get('username') + uploader_id = video.get('owner', {}).get('id') - thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"', - webpage, 'thumbnail', fatal=False) + thumbnails = [{ + 'id': thumbnail_id, + 'url': thumbnail_url, + } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()] return { 'id': video_id, - 'url': video_url, - 'ext': 'flv', - 'title': video_title, + 'title': title, + 'description': description, + 'thumbnails': thumbnails, + 'timestamp': timestamp, + 'duration': duration, + 'view_count': view_count, 'uploader': uploader, - 'thumbnail': thumbnail, + 'uploader_id': uploader_id, + 'formats': formats, } diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py index 722eb5236..1e740fbe6 100644 --- a/youtube_dl/extractor/vbox7.py +++ b/youtube_dl/extractor/vbox7.py @@ -4,11 +4,11 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -49,7 +49,7 @@ class Vbox7IE(InfoExtractor): info_url = "http://vbox7.com/play/magare.do" data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id}) - info_request = compat_urllib_request.Request(info_url, data) + info_request = sanitized_Request(info_url, data) info_request.add_header('Content-Type', 'application/x-www-form-urlencoded') info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage') if info_response is None: diff --git a/youtube_dl/extractor/veoh.py b/youtube_dl/extractor/veoh.py index 01e258e32..9633f7ffe 100644 --- a/youtube_dl/extractor/veoh.py +++ b/youtube_dl/extractor/veoh.py @@ -4,12 +4,10 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, ExtractorError, + sanitized_Request, ) @@ -110,7 +108,7 @@ class VeohIE(InfoExtractor): if 'class="adultwarning-container"' in webpage: self.report_age_confirmation() age_limit = 18 - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Cookie', 'confirmedAdult=true') webpage = self._download_webpage(request, video_id) diff --git a/youtube_dl/extractor/vessel.py b/youtube_dl/extractor/vessel.py index 3c8d2a943..1a0ff3395 100644 --- a/youtube_dl/extractor/vessel.py +++ b/youtube_dl/extractor/vessel.py @@ -4,10 +4,10 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( ExtractorError, parse_iso8601, + sanitized_Request, ) @@ -33,7 +33,7 @@ class VesselIE(InfoExtractor): @staticmethod def make_json_request(url, data): payload = json.dumps(data).encode('utf-8') - req = compat_urllib_request.Request(url, payload) + req = sanitized_Request(url, payload) req.add_header('Content-Type', 'application/json; charset=utf-8') return req diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index c17094f81..02dfd36f4 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -1,15 +1,16 @@ from __future__ import unicode_literals import re -import xml.etree.ElementTree from .common import InfoExtractor from ..compat import ( - compat_urllib_request, + compat_etree_fromstring, + compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -69,11 +70,22 @@ class VevoIE(InfoExtractor): 'params': { 'skip_download': 'true', } + }, { + 'note': 'No video_info', + 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000', + 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0', + 'info_dict': { + 'id': 'USUV71503000', + 'ext': 'mp4', + 'title': 'Till I Die - K Camp ft. T.I.', + 'duration': 193, + }, + 'expected_warnings': ['Unable to download SMIL file'], }] _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/' def _real_initialize(self): - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.vevo.com/auth', data=b'') webpage = self._download_webpage( req, None, @@ -83,11 +95,17 @@ class VevoIE(InfoExtractor): if webpage is False: self._oauth_token = None else: + if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage: + raise ExtractorError('%s said: This page is currently unavailable in your region.' % self.IE_NAME, expected=True) + self._oauth_token = self._search_regex( r'access_token":\s*"([^"]+)"', webpage, 'access token', fatal=False) def _formats_from_json(self, video_info): + if not video_info: + return [] + last_version = {'version': -1} for version in video_info['videoVersions']: # These are the HTTP downloads, other types are for different manifests @@ -97,7 +115,7 @@ class VevoIE(InfoExtractor): if last_version['version'] == -1: raise ExtractorError('Unable to extract last version of the video') - renditions = xml.etree.ElementTree.fromstring(last_version['data']) + renditions = compat_etree_fromstring(last_version['data']) formats = [] # Already sorted from worst to best quality for rend in renditions.findall('rendition'): @@ -112,9 +130,8 @@ class VevoIE(InfoExtractor): }) return formats - def _formats_from_smil(self, smil_xml): + def _formats_from_smil(self, smil_doc): formats = [] - smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8')) els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video') for el in els: src = el.attrib['src'] @@ -147,14 +164,14 @@ class VevoIE(InfoExtractor): }) return formats - def _download_api_formats(self, video_id): + def _download_api_formats(self, video_id, video_url): if not self._oauth_token: self._downloader.report_warning( 'No oauth token available, skipping API HLS download') return [] - api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % ( - video_id, self._oauth_token) + api_url = compat_urlparse.urljoin(video_url, '//apiv2.vevo.com/video/%s/streams/hls?token=%s' % ( + video_id, self._oauth_token)) api_data = self._download_json( api_url, video_id, note='Downloading HLS formats', @@ -168,18 +185,26 @@ class VevoIE(InfoExtractor): preference=0) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + webpage = None json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id response = self._download_json(json_url, video_id) - video_info = response['video'] + video_info = response['video'] or {} - if not video_info: + if not video_info and response.get('statusCode') != 909: if 'statusMessage' in response: raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True) raise ExtractorError('Unable to extract videos') + if not video_info: + if url.startswith('vevo:'): + raise ExtractorError('Please specify full Vevo URL for downloading', expected=True) + webpage = self._download_webpage(url, video_id) + + title = video_info.get('title') or self._og_search_title(webpage) + formats = self._formats_from_json(video_info) is_explicit = video_info.get('isExplicit') @@ -191,11 +216,11 @@ class VevoIE(InfoExtractor): age_limit = None # Download via HLS API - formats.extend(self._download_api_formats(video_id)) + formats.extend(self._download_api_formats(video_id, url)) # Download SMIL smil_blocks = sorted(( - f for f in video_info['videoVersions'] + f for f in video_info.get('videoVersions', []) if f['sourceType'] == 13), key=lambda f: f['version']) smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % ( @@ -207,23 +232,26 @@ class VevoIE(InfoExtractor): if smil_url_m is not None: smil_url = smil_url_m if smil_url: - smil_xml = self._download_webpage( - smil_url, video_id, 'Downloading SMIL info', fatal=False) - if smil_xml: - formats.extend(self._formats_from_smil(smil_xml)) + smil_doc = self._download_smil(smil_url, video_id, fatal=False) + if smil_doc: + formats.extend(self._formats_from_smil(smil_doc)) self._sort_formats(formats) - timestamp_ms = int_or_none(self._search_regex( + timestamp = int_or_none(self._search_regex( r'/Date\((\d+)\)/', - video_info['launchDate'], 'launch date', fatal=False)) + video_info['launchDate'], 'launch date', fatal=False), + scale=1000) if video_info else None + + duration = video_info.get('duration') or int_or_none( + self._html_search_meta('video:duration', webpage)) return { 'id': video_id, - 'title': video_info['title'], + 'title': title, 'formats': formats, - 'thumbnail': video_info['imageUrl'], - 'timestamp': timestamp_ms // 1000, - 'uploader': video_info['mainArtists'][0]['artistName'], - 'duration': video_info['duration'], + 'thumbnail': video_info.get('imageUrl'), + 'timestamp': timestamp, + 'uploader': video_info['mainArtists'][0]['artistName'] if video_info else None, + 'duration': duration, 'age_limit': age_limit, } diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py index 01af7a995..3db6286e4 100644 --- a/youtube_dl/extractor/vice.py +++ b/youtube_dl/extractor/vice.py @@ -15,6 +15,7 @@ class ViceIE(InfoExtractor): 'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp', 'ext': 'mp4', 'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov', + 'duration': 725.983, }, 'params': { # Requires ffmpeg (m3u8 manifest) diff --git a/youtube_dl/extractor/viddler.py b/youtube_dl/extractor/viddler.py index 8516a2940..40ffbad2a 100644 --- a/youtube_dl/extractor/viddler.py +++ b/youtube_dl/extractor/viddler.py @@ -4,9 +4,7 @@ from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, -) -from ..compat import ( - compat_urllib_request + sanitized_Request, ) @@ -65,7 +63,7 @@ class ViddlerIE(InfoExtractor): 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' % video_id) headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'} - request = compat_urllib_request.Request(json_url, None, headers) + request = sanitized_Request(json_url, None, headers) data = self._download_json(request, video_id)['video'] formats = [] diff --git a/youtube_dl/extractor/videobam.py b/youtube_dl/extractor/videobam.py deleted file mode 100644 index 0eb3d9414..000000000 --- a/youtube_dl/extractor/videobam.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import unicode_literals - -import re -import json - -from .common import InfoExtractor -from ..utils import int_or_none - - -class VideoBamIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?videobam\.com/(?:videos/download/)?(?P<id>[a-zA-Z]+)' - - _TESTS = [ - { - 'url': 'http://videobam.com/OiJQM', - 'md5': 'db471f27763a531f10416a0c58b5a1e0', - 'info_dict': { - 'id': 'OiJQM', - 'ext': 'mp4', - 'title': 'Is Alcohol Worse Than Ecstasy?', - 'description': 'md5:d25b96151515c91debc42bfbb3eb2683', - 'uploader': 'frihetsvinge', - }, - }, - { - 'url': 'http://videobam.com/pqLvq', - 'md5': 'd9a565b5379a99126ef94e1d7f9a383e', - 'note': 'HD video', - 'info_dict': { - 'id': 'pqLvq', - 'ext': 'mp4', - 'title': '_', - } - }, - ] - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - page = self._download_webpage('http://videobam.com/%s' % video_id, video_id, 'Downloading page') - - formats = [] - - for preference, format_id in enumerate(['low', 'high']): - mobj = re.search(r"%s: '(?P<url>[^']+)'" % format_id, page) - if not mobj: - continue - formats.append({ - 'url': mobj.group('url'), - 'ext': 'mp4', - 'format_id': format_id, - 'preference': preference, - }) - - if not formats: - player_config = json.loads(self._html_search_regex(r'var player_config = ({.+?});', page, 'player config')) - formats = [{ - 'url': item['url'], - 'ext': 'mp4', - } for item in player_config['playlist'] if 'autoPlay' in item] - - self._sort_formats(formats) - - title = self._og_search_title(page, default='_', fatal=False) - description = self._og_search_description(page, default=None) - thumbnail = self._og_search_thumbnail(page) - uploader = self._html_search_regex(r'Upload by ([^<]+)</a>', page, 'uploader', fatal=False, default=None) - view_count = int_or_none( - self._html_search_regex(r'<strong>Views:</strong> (\d+) ', page, 'view count', fatal=False)) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'uploader': uploader, - 'view_count': view_count, - 'formats': formats, - 'age_limit': 18, - } diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py index 94f9e9be9..cd3f50a63 100644 --- a/youtube_dl/extractor/videofyme.py +++ b/youtube_dl/extractor/videofyme.py @@ -2,8 +2,8 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( - find_xpath_attr, int_or_none, + parse_iso8601, ) @@ -18,33 +18,35 @@ class VideofyMeIE(InfoExtractor): 'id': '1100701', 'ext': 'mp4', 'title': 'This is VideofyMe', - 'description': None, + 'description': '', + 'upload_date': '20130326', + 'timestamp': 1364288959, 'uploader': 'VideofyMe', 'uploader_id': 'thisisvideofyme', 'view_count': int, + 'likes': int, + 'comment_count': int, }, - } def _real_extract(self, url): video_id = self._match_id(url) - config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id, - video_id) - video = config.find('video') - sources = video.find('sources') - url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) - for key in ['on', 'av', 'off']] if node is not None) - video_url = url_node.find('url').text - view_count = int_or_none(self._search_regex( - r'([0-9]+)', video.find('views').text, 'view count', fatal=False)) + + config = self._download_json('http://vf-player-info-loader.herokuapp.com/%s.json' % video_id, video_id)['videoinfo'] + + video = config.get('video') + blog = config.get('blog', {}) return { 'id': video_id, - 'title': video.find('title').text, - 'url': video_url, - 'thumbnail': video.find('thumb').text, - 'description': video.find('description').text, - 'uploader': config.find('blog/name').text, - 'uploader_id': video.find('identifier').text, - 'view_count': view_count, + 'title': video['title'], + 'url': video['sources']['source']['url'], + 'thumbnail': video.get('thumb'), + 'description': video.get('description'), + 'timestamp': parse_iso8601(video.get('date')), + 'uploader': blog.get('name'), + 'uploader_id': blog.get('identifier'), + 'view_count': int_or_none(self._search_regex(r'([0-9]+)', video.get('views'), 'view count', fatal=False)), + 'likes': int_or_none(video.get('likes')), + 'comment_count': int_or_none(video.get('nrOfComments')), } diff --git a/youtube_dl/extractor/videolecturesnet.py b/youtube_dl/extractor/videolecturesnet.py deleted file mode 100644 index d6a7eb203..000000000 --- a/youtube_dl/extractor/videolecturesnet.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import ( - find_xpath_attr, - int_or_none, - parse_duration, - unified_strdate, -) - - -class VideoLecturesNetIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/' - IE_NAME = 'videolectures.net' - - _TEST = { - 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', - 'info_dict': { - 'id': 'promogram_igor_mekjavic_eng', - 'ext': 'mp4', - 'title': 'Automatics, robotics and biocybernetics', - 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', - 'upload_date': '20130627', - 'duration': 565, - 'thumbnail': 're:http://.*\.jpg', - }, - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id - smil = self._download_xml(smil_url, video_id) - - title = find_xpath_attr(smil, './/meta', 'name', 'title').attrib['content'] - description_el = find_xpath_attr(smil, './/meta', 'name', 'abstract') - description = ( - None if description_el is None - else description_el.attrib['content']) - upload_date = unified_strdate( - find_xpath_attr(smil, './/meta', 'name', 'date').attrib['content']) - - switch = smil.find('.//switch') - duration = parse_duration(switch.attrib.get('dur')) - thumbnail_el = find_xpath_attr(switch, './image', 'type', 'thumbnail') - thumbnail = ( - None if thumbnail_el is None else thumbnail_el.attrib.get('src')) - - formats = [] - for v in switch.findall('./video'): - proto = v.attrib.get('proto') - if proto not in ['http', 'rtmp']: - continue - f = { - 'width': int_or_none(v.attrib.get('width')), - 'height': int_or_none(v.attrib.get('height')), - 'filesize': int_or_none(v.attrib.get('size')), - 'tbr': int_or_none(v.attrib.get('systemBitrate')) / 1000.0, - 'ext': v.attrib.get('ext'), - } - src = v.attrib['src'] - if proto == 'http': - if self._is_valid_url(src, video_id): - f['url'] = src - formats.append(f) - elif proto == 'rtmp': - f.update({ - 'url': v.attrib['streamer'], - 'play_path': src, - 'rtmp_real_time': True, - }) - formats.append(f) - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'upload_date': upload_date, - 'duration': duration, - 'thumbnail': thumbnail, - 'formats': formats, - } diff --git a/youtube_dl/extractor/videomega.py b/youtube_dl/extractor/videomega.py index 78ff6310a..87aca327b 100644 --- a/youtube_dl/extractor/videomega.py +++ b/youtube_dl/extractor/videomega.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_urllib_request +from ..utils import sanitized_Request class VideoMegaIE(InfoExtractor): @@ -30,7 +30,7 @@ class VideoMegaIE(InfoExtractor): video_id = self._match_id(url) iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id - req = compat_urllib_request.Request(iframe_url) + req = sanitized_Request(iframe_url) req.add_header('Referer', url) req.add_header('Cookie', 'noadvtday=0') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/videoweed.py b/youtube_dl/extractor/videoweed.py deleted file mode 100644 index ca2e50935..000000000 --- a/youtube_dl/extractor/videoweed.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class VideoWeedIE(NovaMovIE): - IE_NAME = 'videoweed' - IE_DESC = 'VideoWeed' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'} - - _HOST = 'www.videoweed.es' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>' - - _TEST = { - 'url': 'http://www.videoweed.es/file/b42178afbea14', - 'md5': 'abd31a2132947262c50429e1d16c1bfd', - 'info_dict': { - 'id': 'b42178afbea14', - 'ext': 'flv', - 'title': 'optical illusion dissapeared image magic illusion', - 'description': '' - }, - } diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py index e0b55078b..3d63ed4f0 100644 --- a/youtube_dl/extractor/vidme.py +++ b/youtube_dl/extractor/vidme.py @@ -1,10 +1,12 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..compat import compat_HTTPError from ..utils import ( + ExtractorError, int_or_none, float_or_none, - str_to_int, + parse_iso8601, ) @@ -18,49 +20,185 @@ class VidmeIE(InfoExtractor): 'ext': 'mp4', 'title': 'Fishing for piranha - the easy way', 'description': 'source: https://www.facebook.com/photo.php?v=312276045600871', - 'duration': 119.92, + 'thumbnail': 're:^https?://.*\.jpg', 'timestamp': 1406313244, 'upload_date': '20140725', + 'age_limit': 0, + 'duration': 119.92, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + }, { + 'url': 'https://vid.me/Gc6M', + 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82', + 'info_dict': { + 'id': 'Gc6M', + 'ext': 'mp4', + 'title': 'O Mere Dil ke chain - Arnav and Khushi VM', 'thumbnail': 're:^https?://.*\.jpg', + 'timestamp': 1441211642, + 'upload_date': '20150902', + 'uploader': 'SunshineM', + 'uploader_id': '3552827', + 'age_limit': 0, + 'duration': 223.72, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + 'params': { + 'skip_download': True, }, }, { - # From http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching + # tests uploader field + 'url': 'https://vid.me/4Iib', + 'info_dict': { + 'id': '4Iib', + 'ext': 'mp4', + 'title': 'The Carver', + 'description': 'md5:e9c24870018ae8113be936645b93ba3c', + 'thumbnail': 're:^https?://.*\.jpg', + 'timestamp': 1433203629, + 'upload_date': '20150602', + 'uploader': 'Thomas', + 'uploader_id': '109747', + 'age_limit': 0, + 'duration': 97.859999999999999, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + 'params': { + 'skip_download': True, + }, + }, { + # nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching 'url': 'https://vid.me/e/Wmur', + 'info_dict': { + 'id': 'Wmur', + 'ext': 'mp4', + 'title': 'naked smoking & stretching', + 'thumbnail': 're:^https?://.*\.jpg', + 'timestamp': 1430931613, + 'upload_date': '20150506', + 'uploader': 'naked-yogi', + 'uploader_id': '1638622', + 'age_limit': 18, + 'duration': 653.26999999999998, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + 'params': { + 'skip_download': True, + }, + }, { + # nsfw, user-disabled + 'url': 'https://vid.me/dzGJ', + 'only_matching': True, + }, { + # suspended + 'url': 'https://vid.me/Ox3G', + 'only_matching': True, + }, { + # deleted + 'url': 'https://vid.me/KTPm', 'only_matching': True, + }, { + # no formats in the API response + 'url': 'https://vid.me/e5g', + 'info_dict': { + 'id': 'e5g', + 'ext': 'mp4', + 'title': 'Video upload (e5g)', + 'thumbnail': 're:^https?://.*\.jpg', + 'timestamp': 1401480195, + 'upload_date': '20140530', + 'uploader': None, + 'uploader_id': None, + 'age_limit': 0, + 'duration': 483, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): - url = url.replace('vid.me/e/', 'vid.me/') video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - video_url = self._html_search_regex( - r'<source src="([^"]+)"', webpage, 'video URL') + try: + response = self._download_json( + 'https://api.vid.me/videoByUrl/%s' % video_id, video_id) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: + response = self._parse_json(e.cause.read(), video_id) + else: + raise + + error = response.get('error') + if error: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error), expected=True) + + video = response['video'] + + if video.get('state') == 'deleted': + raise ExtractorError( + 'Vidme said: Sorry, this video has been deleted.', + expected=True) + + if video.get('state') in ('user-disabled', 'suspended'): + raise ExtractorError( + 'Vidme said: This video has been suspended either due to a copyright claim, ' + 'or for violating the terms of use.', + expected=True) + + formats = [{ + 'format_id': f.get('type'), + 'url': f['uri'], + 'width': int_or_none(f.get('width')), + 'height': int_or_none(f.get('height')), + 'preference': 0 if f.get('type', '').endswith('clip') else 1, + } for f in video.get('formats', []) if f.get('uri')] + + if not formats and video.get('complete_url'): + formats.append({ + 'url': video.get('complete_url'), + 'width': int_or_none(video.get('width')), + 'height': int_or_none(video.get('height')), + }) + + self._sort_formats(formats) - title = self._og_search_title(webpage) - description = self._og_search_description(webpage, default='') - thumbnail = self._og_search_thumbnail(webpage) - timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False)) - width = int_or_none(self._og_search_property('video:width', webpage, fatal=False)) - height = int_or_none(self._og_search_property('video:height', webpage, fatal=False)) - duration = float_or_none(self._html_search_regex( - r'data-duration="([^"]+)"', webpage, 'duration', fatal=False)) - view_count = str_to_int(self._html_search_regex( - r'<(?:li|span) class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False)) - like_count = str_to_int(self._html_search_regex( - r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">', - webpage, 'like count', fatal=False)) + title = video['title'] + description = video.get('description') + thumbnail = video.get('thumbnail_url') + timestamp = parse_iso8601(video.get('date_created'), ' ') + uploader = video.get('user', {}).get('username') + uploader_id = video.get('user', {}).get('user_id') + age_limit = 18 if video.get('nsfw') is True else 0 + duration = float_or_none(video.get('duration')) + view_count = int_or_none(video.get('view_count')) + like_count = int_or_none(video.get('likes_count')) + comment_count = int_or_none(video.get('comment_count')) return { 'id': video_id, - 'url': video_url, - 'title': title, + 'title': title or 'Video upload (%s)' % video_id, 'description': description, 'thumbnail': thumbnail, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'age_limit': age_limit, 'timestamp': timestamp, - 'width': width, - 'height': height, 'duration': duration, 'view_count': view_count, 'like_count': like_count, + 'comment_count': comment_count, + 'formats': formats, } diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py index 08a5a7b8d..2ba9f31df 100644 --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -20,8 +20,14 @@ class VidziIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - video_url = self._html_search_regex( - r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url') + video_host = self._html_search_regex( + r'id=\'vplayer\'><img src="http://(.*?)/i', webpage, + 'video host') + video_hash = self._html_search_regex( + r'\|([a-z0-9]+)\|hls\|type', webpage, 'video_hash') + ext = self._html_search_regex( + r'\|tracks\|([a-z0-9]+)\|', webpage, 'video ext') + video_url = 'http://' + video_host + '/' + video_hash + '/v.' + ext title = self._html_search_regex( r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title') diff --git a/youtube_dl/extractor/vier.py b/youtube_dl/extractor/vier.py index 15377097e..c76c20614 100644 --- a/youtube_dl/extractor/vier.py +++ b/youtube_dl/extractor/vier.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import re +import itertools from .common import InfoExtractor @@ -91,31 +92,27 @@ class VierVideosIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) program = mobj.group('program') - webpage = self._download_webpage(url, program) - page_id = mobj.group('page') if page_id: page_id = int(page_id) start_page = page_id - last_page = start_page + 1 playlist_id = '%s-page%d' % (program, page_id) else: start_page = 0 - last_page = int(self._search_regex( - r'videos\?page=(\d+)">laatste</a>', - webpage, 'last page', default=0)) + 1 playlist_id = program entries = [] - for current_page_id in range(start_page, last_page): + for current_page_id in itertools.count(start_page): current_page = self._download_webpage( 'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id), program, - 'Downloading page %d' % (current_page_id + 1)) if current_page_id != page_id else webpage + 'Downloading page %d' % (current_page_id + 1)) page_entries = [ self.url_result('http://www.vier.be' + video_url, 'Vier') for video_url in re.findall( r'<h3><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)] entries.extend(page_entries) + if page_id or '>Meer<' not in current_page: + break return self.playlist_result(entries, playlist_id) diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py index 6ef36290b..185b1c119 100644 --- a/youtube_dl/extractor/viewster.py +++ b/youtube_dl/extractor/viewster.py @@ -3,25 +3,29 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( - compat_urllib_request, + compat_HTTPError, compat_urllib_parse, + compat_urllib_parse_unquote, ) from ..utils import ( determine_ext, + ExtractorError, int_or_none, parse_iso8601, + sanitized_Request, + HEADRequest, ) class ViewsterIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)' + _VALID_URL = r'https?://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)' _TESTS = [{ # movie, Type=Movie 'url': 'http://www.viewster.com/movie/1140-11855-000/the-listening-project/', - 'md5': '14d3cfffe66d57b41ae2d9c873416f01', + 'md5': 'e642d1b27fcf3a4ffa79f194f5adde36', 'info_dict': { 'id': '1140-11855-000', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'The listening Project', 'description': 'md5:bac720244afd1a8ea279864e67baa071', 'timestamp': 1214870400, @@ -31,10 +35,10 @@ class ViewsterIE(InfoExtractor): }, { # series episode, Type=Episode 'url': 'http://www.viewster.com/serie/1284-19427-001/the-world-and-a-wall/', - 'md5': 'd5434c80fcfdb61651cc2199a88d6ba3', + 'md5': '9243079a8531809efe1b089db102c069', 'info_dict': { 'id': '1284-19427-001', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'The World and a Wall', 'description': 'md5:24814cf74d3453fdf5bfef9716d073e3', 'timestamp': 1428192000, @@ -59,19 +63,30 @@ class ViewsterIE(InfoExtractor): 'description': 'md5:e7097a8fc97151e25f085c9eb7a1cdb1', }, 'playlist_mincount': 16, + }, { + # geo restricted series + 'url': 'https://www.viewster.com/serie/1280-18794-002/', + 'only_matching': True, + }, { + # geo restricted video + 'url': 'https://www.viewster.com/serie/1280-18794-002/what-is-extraterritoriality-lawo/', + 'only_matching': True, }] _ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01' - _AUTH_TOKEN = '/YqhSYsx8EaU9Bsta3ojlA==' def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True): - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Accept', self._ACCEPT_HEADER) request.add_header('Auth-token', self._AUTH_TOKEN) return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal) def _real_extract(self, url): video_id = self._match_id(url) + # Get 'api_token' cookie + self._request_webpage(HEADRequest('http://www.viewster.com/'), video_id) + cookies = self._get_cookies('http://www.viewster.com/') + self._AUTH_TOKEN = compat_urllib_parse_unquote(cookies['api_token'].value) info = self._download_json( 'https://public-api.viewster.com/search/%s' % video_id, @@ -80,10 +95,16 @@ class ViewsterIE(InfoExtractor): entry_id = info.get('Id') or info['id'] # unfinished serie has no Type - if info.get('Type') in ['Serie', None]: - episodes = self._download_json( - 'https://public-api.viewster.com/series/%s/episodes' % entry_id, - video_id, 'Downloading series JSON') + if info.get('Type') in ('Serie', None): + try: + episodes = self._download_json( + 'https://public-api.viewster.com/series/%s/episodes' % entry_id, + video_id, 'Downloading series JSON') + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404: + self.raise_geo_restricted() + else: + raise entries = [ self.url_result( 'http://www.viewster.com/movie/%s' % episode['OriginId'], 'Viewster') @@ -93,7 +114,7 @@ class ViewsterIE(InfoExtractor): return self.playlist_result(entries, video_id, title, description) formats = [] - for media_type in ('application/f4m+xml', 'application/x-mpegURL'): + for media_type in ('application/f4m+xml', 'application/x-mpegURL', 'video/mp4'): media = self._download_json( 'https://public-api.viewster.com/movies/%s/video?mediaType=%s' % (entry_id, compat_urllib_parse.quote(media_type)), @@ -110,14 +131,28 @@ class ViewsterIE(InfoExtractor): formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds')) elif ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( + m3u8_formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', - fatal=False # m3u8 sometimes fail - )) + fatal=False) # m3u8 sometimes fail + if m3u8_formats: + formats.extend(m3u8_formats) else: - formats.append({ + format_id = media.get('Bitrate') + f = { 'url': video_url, - }) + 'format_id': 'mp4-%s' % format_id, + 'height': int_or_none(media.get('Height')), + 'width': int_or_none(media.get('Width')), + 'preference': 1, + } + if format_id and not f['height']: + f['height'] = int_or_none(self._search_regex( + r'^(\d+)[pP]$', format_id, 'height', default=None)) + formats.append(f) + + if not formats and not info.get('LanguageSets') and not info.get('VODSettings'): + self.raise_geo_restricted() + self._sort_formats(formats) synopsis = info.get('Synopsis', {}) diff --git a/youtube_dl/extractor/viidea.py b/youtube_dl/extractor/viidea.py new file mode 100644 index 000000000..525e303d4 --- /dev/null +++ b/youtube_dl/extractor/viidea.py @@ -0,0 +1,188 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, + compat_str, +) +from ..utils import ( + parse_duration, + js_to_json, + parse_iso8601, +) + + +class ViideaIE(InfoExtractor): + _VALID_URL = r'''(?x)http://(?:www\.)?(?: + videolectures\.net| + flexilearn\.viidea\.net| + presentations\.ocwconsortium\.org| + video\.travel-zoom\.si| + video\.pomp-forum\.si| + tv\.nil\.si| + video\.hekovnik.com| + video\.szko\.si| + kpk\.viidea\.com| + inside\.viidea\.net| + video\.kiberpipa\.org| + bvvideo\.si| + kongres\.viidea\.net| + edemokracija\.viidea\.com + )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$''' + + _TESTS = [{ + 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', + 'info_dict': { + 'id': '20171', + 'display_id': 'promogram_igor_mekjavic_eng', + 'ext': 'mp4', + 'title': 'Automatics, robotics and biocybernetics', + 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1372349289, + 'upload_date': '20130627', + 'duration': 565, + }, + }, { + # video with invalid direct format links (HTTP 403) + 'url': 'http://videolectures.net/russir2010_filippova_nlp/', + 'info_dict': { + 'id': '14891', + 'display_id': 'russir2010_filippova_nlp', + 'ext': 'flv', + 'title': 'NLP at Google', + 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1284375600, + 'upload_date': '20100913', + 'duration': 5352, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # event playlist + 'url': 'http://videolectures.net/deeplearning2015_montreal/', + 'info_dict': { + 'id': '23181', + 'title': 'Deep Learning Summer School, Montreal 2015', + 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1438560000, + }, + 'playlist_count': 30, + }, { + # multi part lecture + 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/', + 'info_dict': { + 'id': '9737', + 'display_id': 'mlss09uk_bishop_ibi', + 'title': 'Introduction To Bayesian Inference', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1251622800, + }, + 'playlist': [{ + 'info_dict': { + 'id': '9737_part1', + 'display_id': 'mlss09uk_bishop_ibi_part1', + 'ext': 'wmv', + 'title': 'Introduction To Bayesian Inference (Part 1)', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 4622, + 'timestamp': 1251622800, + 'upload_date': '20090830', + }, + }, { + 'info_dict': { + 'id': '9737_part2', + 'display_id': 'mlss09uk_bishop_ibi_part2', + 'ext': 'wmv', + 'title': 'Introduction To Bayesian Inference (Part 2)', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 5641, + 'timestamp': 1251622800, + 'upload_date': '20090830', + }, + }], + 'playlist_count': 2, + }] + + def _real_extract(self, url): + lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups() + + webpage = self._download_webpage(url, lecture_slug) + + cfg = self._parse_json(self._search_regex( + [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function', + r'cfg\s*:\s*({[^}]+})'], + webpage, 'cfg'), lecture_slug, js_to_json) + + lecture_id = compat_str(cfg['obj_id']) + + base_url = self._proto_relative_url(cfg['livepipe'], 'http:') + + lecture_data = self._download_json( + '%s/site/api/lecture/%s?format=json' % (base_url, lecture_id), + lecture_id)['lecture'][0] + + lecture_info = { + 'id': lecture_id, + 'display_id': lecture_slug, + 'title': lecture_data['title'], + 'timestamp': parse_iso8601(lecture_data.get('time')), + 'description': lecture_data.get('description_wiki'), + 'thumbnail': lecture_data.get('thumb'), + } + + playlist_entries = [] + lecture_type = lecture_data.get('type') + parts = [compat_str(video) for video in cfg.get('videos', [])] + if parts: + multipart = len(parts) > 1 + + def extract_part(part_id): + smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id) + smil = self._download_smil(smil_url, lecture_id) + info = self._parse_smil(smil, smil_url, lecture_id) + info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id) + info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id) + if multipart: + info['title'] += ' (Part %s)' % part_id + switch = smil.find('.//switch') + if switch is not None: + info['duration'] = parse_duration(switch.attrib.get('dur')) + item_info = lecture_info.copy() + item_info.update(info) + return item_info + + if explicit_part_id or not multipart: + result = extract_part(explicit_part_id or parts[0]) + else: + result = { + '_type': 'multi_video', + 'entries': [extract_part(part) for part in parts], + } + result.update(lecture_info) + + # Immediately return explicitly requested part or non event item + if explicit_part_id or lecture_type != 'evt': + return result + + playlist_entries.append(result) + + # It's probably a playlist + if not parts or lecture_type == 'evt': + playlist_webpage = self._download_webpage( + '%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id) + entries = [ + self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea') + for _, video_url in re.findall( + r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)] + playlist_entries.extend(entries) + + playlist = self.playlist_result(playlist_entries, lecture_id) + playlist.update(lecture_info) + return playlist diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py index ddbd395c8..a63c23617 100644 --- a/youtube_dl/extractor/viki.py +++ b/youtube_dl/extractor/viki.py @@ -7,14 +7,14 @@ import hmac import hashlib import itertools +from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_iso8601, + sanitized_Request, ) -from ..compat import compat_urllib_request -from .common import InfoExtractor class VikiBaseIE(InfoExtractor): @@ -43,7 +43,7 @@ class VikiBaseIE(InfoExtractor): hashlib.sha1 ).hexdigest() url = self._API_URL_TEMPLATE % (query, sig) - return compat_urllib_request.Request( + return sanitized_Request( url, json.dumps(post_data).encode('utf-8')) if post_data else url def _call_api(self, path, video_id, note, timestamp=None, post_data=None): diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index 10d6745af..ce08e6955 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -8,27 +8,29 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_HTTPError, - compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, InAdvancePagedList, int_or_none, RegexNotFoundError, + sanitized_Request, smuggle_url, std_headers, unified_strdate, unsmuggle_url, urlencode_postdata, unescapeHTML, + parse_filesize, ) class VimeoBaseInfoExtractor(InfoExtractor): _NETRC_MACHINE = 'vimeo' _LOGIN_REQUIRED = False + _LOGIN_URL = 'https://vimeo.com/log_in' def _login(self): (username, password) = self._get_login_info() @@ -37,21 +39,33 @@ class VimeoBaseInfoExtractor(InfoExtractor): raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) return self.report_login() - login_url = 'https://vimeo.com/log_in' - webpage = self._download_webpage(login_url, None, False) - token = self._search_regex(r'xsrft":"(.*?)"', webpage, 'login token') - data = urlencode_postdata({ + webpage = self._download_webpage(self._LOGIN_URL, None, False) + token, vuid = self._extract_xsrft_and_vuid(webpage) + data = urlencode_postdata(encode_dict({ + 'action': 'login', 'email': username, 'password': password, - 'action': 'login', 'service': 'vimeo', 'token': token, - }) - login_request = compat_urllib_request.Request(login_url, data) + })) + login_request = sanitized_Request(self._LOGIN_URL, data) login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') - login_request.add_header('Cookie', 'xsrft=%s' % token) + login_request.add_header('Referer', self._LOGIN_URL) + self._set_vimeo_cookie('vuid', vuid) self._download_webpage(login_request, None, False, 'Wrong login info') + def _extract_xsrft_and_vuid(self, webpage): + xsrft = self._search_regex( + r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)', + webpage, 'login token', group='xsrft') + vuid = self._search_regex( + r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1', + webpage, 'vuid', group='vuid') + return xsrft, vuid + + def _set_vimeo_cookie(self, name, value): + self._set_cookie('vimeo.com', name, value) + class VimeoIE(VimeoBaseInfoExtractor): """Information extractor for vimeo.com.""" @@ -75,12 +89,12 @@ class VimeoIE(VimeoBaseInfoExtractor): 'info_dict': { 'id': '56015672', 'ext': 'mp4', - "upload_date": "20121220", - "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", - "uploader_id": "user7108434", - "uploader": "Filippo Valsorda", - "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", - "duration": 10, + 'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", + 'description': 'md5:2d3305bad981a06ff79f027f19865021', + 'upload_date': '20121220', + 'uploader_id': 'user7108434', + 'uploader': 'Filippo Valsorda', + 'duration': 10, }, }, { @@ -93,7 +107,7 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'openstreetmapus', 'uploader': 'OpenStreetMap US', 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography', - 'description': 'md5:380943ec71b89736ff4bf27183233d09', + 'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30', 'duration': 1595, }, }, @@ -123,7 +137,7 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'user18948128', 'uploader': 'Jaime Marquínez Ferrándiz', 'duration': 10, - 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.', + 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026', }, 'params': { 'videopassword': 'youtube-dl', @@ -147,7 +161,6 @@ class VimeoIE(VimeoBaseInfoExtractor): }, { 'url': 'http://vimeo.com/76979871', - 'md5': '3363dd6ffebe3784d56f4132317fd446', 'note': 'Video with subtitles', 'info_dict': { 'id': '76979871', @@ -172,6 +185,29 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'user28849593', }, }, + { + # contains original format + 'url': 'https://vimeo.com/33951933', + 'md5': '53c688fa95a55bf4b7293d37a89c5c53', + 'info_dict': { + 'id': '33951933', + 'ext': 'mp4', + 'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute', + 'uploader': 'The DMCI', + 'uploader_id': 'dmci', + 'upload_date': '20111220', + 'description': 'md5:ae23671e82d05415868f7ad1aec21147', + }, + }, + { + 'url': 'https://vimeo.com/109815029', + 'note': 'Video not completely processed, "failed" seed status', + 'only_matching': True, + }, + { + 'url': 'https://vimeo.com/groups/travelhd/videos/22439234', + 'only_matching': True, + }, ] @staticmethod @@ -193,17 +229,18 @@ class VimeoIE(VimeoBaseInfoExtractor): password = self._downloader.params.get('videopassword', None) if password is None: raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True) - token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token') - data = urlencode_postdata({ + token, vuid = self._extract_xsrft_and_vuid(webpage) + data = urlencode_postdata(encode_dict({ 'password': password, 'token': token, - }) + })) if url.startswith('http://'): # vimeo only supports https now, but the user can give an http url url = url.replace('http://', 'https://') - password_request = compat_urllib_request.Request(url + '/password', data) + password_request = sanitized_Request(url + '/password', data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') - password_request.add_header('Cookie', 'xsrft=%s' % token) + password_request.add_header('Referer', url) + self._set_vimeo_cookie('vuid', vuid) return self._download_webpage( password_request, video_id, 'Verifying the password', 'Wrong password') @@ -212,9 +249,9 @@ class VimeoIE(VimeoBaseInfoExtractor): password = self._downloader.params.get('videopassword', None) if password is None: raise ExtractorError('This video is protected by a password, use the --video-password option') - data = compat_urllib_parse.urlencode({'password': password}) + data = urlencode_postdata(encode_dict({'password': password})) pass_url = url + '/check-password' - password_request = compat_urllib_request.Request(pass_url, data) + password_request = sanitized_Request(pass_url, data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') return self._download_json( password_request, video_id, @@ -243,7 +280,7 @@ class VimeoIE(VimeoBaseInfoExtractor): url = 'https://vimeo.com/' + video_id # Retrieve video webpage to extract further information - request = compat_urllib_request.Request(url, None, headers) + request = sanitized_Request(url, None, headers) try: webpage = self._download_webpage(request, video_id) except ExtractorError as ee: @@ -263,20 +300,30 @@ class VimeoIE(VimeoBaseInfoExtractor): self.report_extraction(video_id) vimeo_config = self._search_regex( - r'vimeo\.config\s*=\s*({.+?});', webpage, + r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage, 'vimeo config', default=None) if vimeo_config: seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {}) if seed_status.get('state') == 'failed': raise ExtractorError( - '%s returned error: %s' % (self.IE_NAME, seed_status['title']), + '%s said: %s' % (self.IE_NAME, seed_status['title']), expected=True) # Extract the config JSON try: try: config_url = self._html_search_regex( - r' data-config-url="(.+?)"', webpage, 'config URL') + r' data-config-url="(.+?)"', webpage, + 'config URL', default=None) + if not config_url: + # Sometimes new react-based page is served instead of old one that require + # different config URL extraction approach (see + # https://github.com/rg3/youtube-dl/pull/7209) + vimeo_clip_page_config = self._search_regex( + r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage, + 'vimeo clip page config') + config_url = self._parse_json( + vimeo_clip_page_config, video_id)['player']['config_url'] config_json = self._download_webpage(config_url, video_id) config = json.loads(config_json) except RegexNotFoundError: @@ -359,41 +406,44 @@ class VimeoIE(VimeoBaseInfoExtractor): like_count = None comment_count = None - # Vimeo specific: extract request signature and timestamp - sig = config['request']['signature'] - timestamp = config['request']['timestamp'] - - # Vimeo specific: extract video codec and quality information - # First consider quality, then codecs, then take everything - codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')] - files = {'hd': [], 'sd': [], 'other': []} - config_files = config["video"].get("files") or config["request"].get("files") - for codec_name, codec_extension in codecs: - for quality in config_files.get(codec_name, []): - format_id = '-'.join((codec_name, quality)).lower() - key = quality if quality in files else 'other' - video_url = None - if isinstance(config_files[codec_name], dict): - file_info = config_files[codec_name][quality] - video_url = file_info.get('url') - else: - file_info = {} - if video_url is None: - video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ - % (video_id, sig, timestamp, quality, codec_name.upper()) - - files[key].append({ - 'ext': codec_extension, - 'url': video_url, - 'format_id': format_id, - 'width': file_info.get('width'), - 'height': file_info.get('height'), - }) formats = [] - for key in ('other', 'sd', 'hd'): - formats += files[key] - if len(formats) == 0: - raise ExtractorError('No known codec found') + download_request = sanitized_Request('https://vimeo.com/%s?action=load_download_config' % video_id, headers={ + 'X-Requested-With': 'XMLHttpRequest'}) + download_data = self._download_json(download_request, video_id, fatal=False) + if download_data: + source_file = download_data.get('source_file') + if source_file and not source_file.get('is_cold') and not source_file.get('is_defrosting'): + formats.append({ + 'url': source_file['download_url'], + 'ext': source_file['extension'].lower(), + 'width': int_or_none(source_file.get('width')), + 'height': int_or_none(source_file.get('height')), + 'filesize': parse_filesize(source_file.get('size')), + 'format_id': source_file.get('public_name', 'Original'), + 'preference': 1, + }) + config_files = config['video'].get('files') or config['request'].get('files', {}) + for f in config_files.get('progressive', []): + video_url = f.get('url') + if not video_url: + continue + formats.append({ + 'url': video_url, + 'format_id': 'http-%s' % f.get('quality'), + 'width': int_or_none(f.get('width')), + 'height': int_or_none(f.get('height')), + 'fps': int_or_none(f.get('fps')), + 'tbr': int_or_none(f.get('bitrate')), + }) + m3u8_url = config_files.get('hls', {}).get('url') + if m3u8_url: + m3u8_formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + # Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps + # at the same time without actual units specified. This lead to wrong sorting. + self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id')) subtitles = {} text_tracks = config['request'].get('text_tracks') @@ -422,10 +472,11 @@ class VimeoIE(VimeoBaseInfoExtractor): } -class VimeoChannelIE(InfoExtractor): +class VimeoChannelIE(VimeoBaseInfoExtractor): IE_NAME = 'vimeo:channel' _VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])' _MORE_PAGES_INDICATOR = r'<a.+?rel="next"' + _TITLE = None _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"' _TESTS = [{ 'url': 'https://vimeo.com/channels/tributes', @@ -440,7 +491,7 @@ class VimeoChannelIE(InfoExtractor): return '%s/videos/page:%d/' % (base_url, pagenum) def _extract_list_title(self, webpage): - return self._html_search_regex(self._TITLE_RE, webpage, 'list title') + return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title') def _login_list_password(self, page_url, list_id, webpage): login_form = self._search_regex( @@ -453,23 +504,23 @@ class VimeoChannelIE(InfoExtractor): if password is None: raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True) fields = self._hidden_inputs(login_form) - token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token') + token, vuid = self._extract_xsrft_and_vuid(webpage) fields['token'] = token fields['password'] = password - post = urlencode_postdata(fields) + post = urlencode_postdata(encode_dict(fields)) password_path = self._search_regex( r'action="([^"]+)"', login_form, 'password URL') password_url = compat_urlparse.urljoin(page_url, password_path) - password_request = compat_urllib_request.Request(password_url, post) + password_request = sanitized_Request(password_url, post) password_request.add_header('Content-type', 'application/x-www-form-urlencoded') - self._set_cookie('vimeo.com', 'xsrft', token) + self._set_vimeo_cookie('vuid', vuid) + self._set_vimeo_cookie('xsrft', token) return self._download_webpage( password_request, list_id, 'Verifying the password', 'Wrong password') - def _extract_videos(self, list_id, base_url): - video_ids = [] + def _title_and_entries(self, list_id, base_url): for pagenum in itertools.count(1): page_url = self._page_url(base_url, pagenum) webpage = self._download_webpage( @@ -478,18 +529,18 @@ class VimeoChannelIE(InfoExtractor): if pagenum == 1: webpage = self._login_list_password(page_url, list_id, webpage) + yield self._extract_list_title(webpage) + + for video_id in re.findall(r'id="clip_(\d+?)"', webpage): + yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo') - video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage)) if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: break - entries = [self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo') - for video_id in video_ids] - return {'_type': 'playlist', - 'id': list_id, - 'title': self._extract_list_title(webpage), - 'entries': entries, - } + def _extract_videos(self, list_id, base_url): + title_and_entries = self._title_and_entries(list_id, base_url) + list_title = next(title_and_entries) + return self.playlist_result(title_and_entries, list_id, list_title) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -499,7 +550,7 @@ class VimeoChannelIE(InfoExtractor): class VimeoUserIE(VimeoChannelIE): IE_NAME = 'vimeo:user' - _VALID_URL = r'https://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)' + _VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)' _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>' _TESTS = [{ 'url': 'https://vimeo.com/nkistudio/videos', @@ -550,7 +601,7 @@ class VimeoAlbumIE(VimeoChannelIE): class VimeoGroupsIE(VimeoAlbumIE): IE_NAME = 'vimeo:group' - _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)' + _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)(?:/(?!videos?/\d+)|$)' _TESTS = [{ 'url': 'https://vimeo.com/groups/rolexawards', 'info_dict': { @@ -603,14 +654,14 @@ class VimeoReviewIE(InfoExtractor): return self.url_result(player_url, 'Vimeo', video_id) -class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE): +class VimeoWatchLaterIE(VimeoChannelIE): IE_NAME = 'vimeo:watchlater' IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)' - _VALID_URL = r'https://vimeo\.com/home/watchlater|:vimeowatchlater' + _VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater' + _TITLE = 'Watch Later' _LOGIN_REQUIRED = True - _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<' _TESTS = [{ - 'url': 'https://vimeo.com/home/watchlater', + 'url': 'https://vimeo.com/watchlater', 'only_matching': True, }] @@ -619,14 +670,14 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE): def _page_url(self, base_url, pagenum): url = '%s/page:%d/' % (base_url, pagenum) - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) # Set the header to get a partial html page with the ids, # the normal page doesn't contain them. request.add_header('X-Requested-With', 'XMLHttpRequest') return request def _real_extract(self, url): - return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater') + return self._extract_videos('watchlater', 'https://vimeo.com/watchlater') class VimeoLikesIE(InfoExtractor): diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py index c733a48fa..cb2a4b0b5 100644 --- a/youtube_dl/extractor/vine.py +++ b/youtube_dl/extractor/vine.py @@ -1,10 +1,14 @@ +# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor -from ..utils import unified_strdate +from ..utils import ( + int_or_none, + unified_strdate, +) class VineIE(InfoExtractor): @@ -17,10 +21,12 @@ class VineIE(InfoExtractor): 'ext': 'mp4', 'title': 'Chicken.', 'alt_title': 'Vine by Jack Dorsey', - 'description': 'Chicken.', 'upload_date': '20130519', 'uploader': 'Jack Dorsey', 'uploader_id': '76', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/v/MYxVapFvz2z', @@ -29,11 +35,13 @@ class VineIE(InfoExtractor): 'id': 'MYxVapFvz2z', 'ext': 'mp4', 'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14', - 'alt_title': 'Vine by Luna', - 'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14', + 'alt_title': 'Vine by Mars Ruiz', 'upload_date': '20140815', - 'uploader': 'Luna', + 'uploader': 'Mars Ruiz', 'uploader_id': '1102363502380728320', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/v/bxVjBbZlPUH', @@ -43,14 +51,33 @@ class VineIE(InfoExtractor): 'ext': 'mp4', 'title': '#mw3 #ac130 #killcam #angelofdeath', 'alt_title': 'Vine by Z3k3', - 'description': '#mw3 #ac130 #killcam #angelofdeath', 'upload_date': '20130430', 'uploader': 'Z3k3', 'uploader_id': '936470460173008896', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/oembed/MYxVapFvz2z.json', 'only_matching': True, + }, { + 'url': 'https://vine.co/v/e192BnZnZ9V', + 'info_dict': { + 'id': 'e192BnZnZ9V', + 'ext': 'mp4', + 'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2', + 'alt_title': 'Vine by Pimry_zaa', + 'upload_date': '20150705', + 'uploader': 'Pimry_zaa', + 'uploader_id': '1135760698325307392', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): @@ -58,32 +85,33 @@ class VineIE(InfoExtractor): webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id) data = self._parse_json( - self._html_search_regex( - r'window\.POST_DATA = { %s: ({.+?}) };\s*</script>' % video_id, + self._search_regex( + r'window\.POST_DATA\s*=\s*{\s*%s\s*:\s*({.+?})\s*};\s*</script>' % video_id, webpage, 'vine data'), video_id) formats = [{ 'format_id': '%(format)s-%(rate)s' % f, - 'vcodec': f['format'], - 'quality': f['rate'], + 'vcodec': f.get('format'), + 'quality': f.get('rate'), 'url': f['videoUrl'], - } for f in data['videoUrls']] + } for f in data['videoUrls'] if f.get('videoUrl')] self._sort_formats(formats) + username = data.get('username') + return { 'id': video_id, - 'title': self._og_search_title(webpage), - 'alt_title': self._og_search_description(webpage, default=None), - 'description': data['description'], - 'thumbnail': data['thumbnailUrl'], - 'upload_date': unified_strdate(data['created']), - 'uploader': data['username'], - 'uploader_id': data['userIdStr'], - 'like_count': data['likes']['count'], - 'comment_count': data['comments']['count'], - 'repost_count': data['reposts']['count'], + 'title': data.get('description') or self._og_search_title(webpage), + 'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None), + 'thumbnail': data.get('thumbnailUrl'), + 'upload_date': unified_strdate(data.get('created')), + 'uploader': username, + 'uploader_id': data.get('userIdStr'), + 'like_count': int_or_none(data.get('likes', {}).get('count')), + 'comment_count': int_or_none(data.get('comments', {}).get('count')), + 'repost_count': int_or_none(data.get('reposts', {}).get('count')), 'formats': formats, } diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index c30c5a8e5..90557fa61 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -8,15 +8,17 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( ExtractorError, orderedSet, + sanitized_Request, str_to_int, unescapeHTML, unified_strdate, ) +from .vimeo import VimeoIE +from .pladform import PladformIE class VKIE(InfoExtractor): @@ -163,6 +165,11 @@ class VKIE(InfoExtractor): # vk wrapper 'url': 'http://www.biqle.ru/watch/847655_160197695', 'only_matching': True, + }, + { + # pladform embed + 'url': 'https://vk.com/video-76116461_171554880', + 'only_matching': True, } ] @@ -181,7 +188,7 @@ class VKIE(InfoExtractor): 'pass': password.encode('cp1251'), }) - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://login.vk.com/?act=login', compat_urllib_parse.urlencode(login_form).encode('utf-8')) login_page = self._download_webpage( @@ -249,10 +256,17 @@ class VKIE(InfoExtractor): if youtube_url: return self.url_result(youtube_url, 'Youtube') + vimeo_url = VimeoIE._extract_vimeo_url(url, info_page) + if vimeo_url is not None: + return self.url_result(vimeo_url) + + pladform_url = PladformIE._extract_url(info_page) + if pladform_url: + return self.url_result(pladform_url) + m_rutube = re.search( r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page) if m_rutube is not None: - self.to_screen('rutube video detected') rutube_url = self._proto_relative_url( m_rutube.group(1).replace('\\', '')) return self.url_result(rutube_url) @@ -276,9 +290,13 @@ class VKIE(InfoExtractor): mobj.group(1) + ' ' + mobj.group(2) upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2)) - view_count = str_to_int(self._search_regex( - r'"mv_views_count_number"[^>]*>([\d,.]+) views<', - info_page, 'view count', fatal=False)) + view_count = None + views = self._html_search_regex( + r'"mv_views_count_number"[^>]*>(.+?\bviews?)<', + info_page, 'view count', fatal=False) + if views: + view_count = str_to_int(self._search_regex( + r'([\d,.]+)', views, 'view count', fatal=False)) formats = [{ 'format_id': k, diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py new file mode 100644 index 000000000..86c1cb5ef --- /dev/null +++ b/youtube_dl/extractor/vlive.py @@ -0,0 +1,86 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import hmac +from hashlib import sha1 +from base64 import b64encode +from time import time + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + determine_ext +) +from ..compat import compat_urllib_parse + + +class VLiveIE(InfoExtractor): + IE_NAME = 'vlive' + # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices + _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://m.vlive.tv/video/1326', + 'md5': 'cc7314812855ce56de70a06a27314983', + 'info_dict': { + 'id': '1326', + 'ext': 'mp4', + 'title': '[V] Girl\'s Day\'s Broadcast', + 'creator': 'Girl\'s Day', + }, + } + _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH' + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'http://m.vlive.tv/video/%s' % video_id, + video_id, note='Download video page') + + title = self._og_search_title(webpage) + thumbnail = self._og_search_thumbnail(webpage) + creator = self._html_search_regex( + r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator') + + url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id + msgpad = '%.0f' % (time() * 1000) + md = b64encode( + hmac.new(self._SECRET.encode('ascii'), + (url[:255] + msgpad).encode('ascii'), sha1).digest() + ) + url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md}) + playinfo = self._download_json(url, video_id, 'Downloading video json') + + if playinfo.get('message', '') != 'success': + raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful')) + + if not playinfo.get('result'): + raise ExtractorError('No videos found.') + + formats = [] + for vid in playinfo['result'].get('videos', {}).get('list', []): + formats.append({ + 'url': vid['source'], + 'ext': 'mp4', + 'abr': vid.get('bitrate', {}).get('audio'), + 'vbr': vid.get('bitrate', {}).get('video'), + 'format_id': vid['encodingOption']['name'], + 'height': vid.get('height'), + 'width': vid.get('width'), + }) + self._sort_formats(formats) + + subtitles = {} + for caption in playinfo['result'].get('captions', {}).get('list', []): + subtitles[caption['language']] = [ + {'ext': determine_ext(caption['source'], default_ext='vtt'), + 'url': caption['source']}] + + return { + 'id': video_id, + 'title': title, + 'creator': creator, + 'thumbnail': thumbnail, + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/youtube_dl/extractor/vodlocker.py b/youtube_dl/extractor/vodlocker.py index ccf1928b5..357594a11 100644 --- a/youtube_dl/extractor/vodlocker.py +++ b/youtube_dl/extractor/vodlocker.py @@ -2,14 +2,15 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + sanitized_Request, ) class VodlockerIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?' + _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:\..*?)?' _TESTS = [{ 'url': 'http://vodlocker.com/e8wvyzz4sl42', @@ -26,12 +27,18 @@ class VodlockerIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) + if any(p in webpage for p in ( + '>THIS FILE WAS DELETED<', + '>File Not Found<', + 'The file you were looking for could not be found, sorry for any inconvenience.<')): + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + fields = self._hidden_inputs(webpage) if fields['op'] == 'download1': self._sleep(3, video_id) # they do detect when requests happen too fast! post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/voicerepublic.py b/youtube_dl/extractor/voicerepublic.py index 254383d6c..93d15a556 100644 --- a/youtube_dl/extractor/voicerepublic.py +++ b/youtube_dl/extractor/voicerepublic.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( ExtractorError, determine_ext, int_or_none, + sanitized_Request, ) @@ -37,7 +35,7 @@ class VoiceRepublicIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) - req = compat_urllib_request.Request( + req = sanitized_Request( compat_urlparse.urljoin(url, '/talks/%s' % display_id)) # Older versions of Firefox get redirected to an "upgrade browser" page req.add_header('User-Agent', 'youtube-dl') diff --git a/youtube_dl/extractor/washingtonpost.py b/youtube_dl/extractor/washingtonpost.py index 72eb010f8..ec8b99998 100644 --- a/youtube_dl/extractor/washingtonpost.py +++ b/youtube_dl/extractor/washingtonpost.py @@ -19,25 +19,25 @@ class WashingtonPostIE(InfoExtractor): 'title': 'Sinkhole of bureaucracy', }, 'playlist': [{ - 'md5': '79132cc09ec5309fa590ae46e4cc31bc', + 'md5': 'b9be794ceb56c7267d410a13f99d801a', 'info_dict': { 'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'Breaking Points: The Paper Mine', - 'duration': 1287, + 'duration': 1290, 'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.', 'uploader': 'The Washington Post', 'timestamp': 1395527908, 'upload_date': '20140322', }, }, { - 'md5': 'e1d5734c06865cc504ad99dc2de0d443', + 'md5': '1fff6a689d8770966df78c8cb6c8c17c', 'info_dict': { 'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f', 'ext': 'mp4', 'title': 'The town bureaucracy sustains', 'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.', - 'duration': 2217, + 'duration': 2220, 'timestamp': 1395528005, 'upload_date': '20140322', 'uploader': 'The Washington Post', diff --git a/youtube_dl/extractor/wdr.py b/youtube_dl/extractor/wdr.py index b46802306..ef096cbd2 100644 --- a/youtube_dl/extractor/wdr.py +++ b/youtube_dl/extractor/wdr.py @@ -10,8 +10,8 @@ from ..compat import ( compat_urlparse, ) from ..utils import ( - determine_ext, unified_strdate, + qualities, ) @@ -33,6 +33,7 @@ class WDRIE(InfoExtractor): 'params': { 'skip_download': True, }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html', @@ -47,6 +48,7 @@ class WDRIE(InfoExtractor): 'params': { 'skip_download': True, }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html', @@ -71,6 +73,7 @@ class WDRIE(InfoExtractor): 'upload_date': '20140717', 'is_live': False }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html', @@ -83,10 +86,10 @@ class WDRIE(InfoExtractor): 'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html', 'info_dict': { 'id': 'mdb-103364', - 'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'title': 're:^WDR Fernsehen Live [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9', 'ext': 'flv', - 'upload_date': '20150212', + 'upload_date': '20150101', 'is_live': True }, 'params': { @@ -150,25 +153,52 @@ class WDRIE(InfoExtractor): if upload_date: upload_date = unified_strdate(upload_date) + formats = [] + preference = qualities(['S', 'M', 'L', 'XL']) + if video_url.endswith('.f4m'): - video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18' - ext = 'flv' + f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', page_id, f4m_id='hds', fatal=False) + if f4m_formats: + formats.extend(f4m_formats) elif video_url.endswith('.smil'): - fmt = self._extract_smil_formats(video_url, page_id)[0] - video_url = fmt['url'] - sep = '&' if '?' in video_url else '?' - video_url += sep - video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43' - ext = fmt['ext'] + smil_formats = self._extract_smil_formats(video_url, page_id, False, { + 'hdcore': '3.3.0', + 'plugin': 'aasp-3.3.0.99.43', + }) + if smil_formats: + formats.extend(smil_formats) else: - ext = determine_ext(video_url) + formats.append({ + 'url': video_url, + 'http_headers': { + 'User-Agent': 'mobile', + }, + }) + + m3u8_url = self._search_regex(r'rel="adaptiv"[^>]+href="([^"]+)"', webpage, 'm3u8 url', default=None) + if m3u8_url: + m3u8_formats = self._extract_m3u8_formats(m3u8_url, page_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) + if m3u8_formats: + formats.extend(m3u8_formats) + + direct_urls = re.findall(r'rel="web(S|M|L|XL)"[^>]+href="([^"]+)"', webpage) + if direct_urls: + for quality, video_url in direct_urls: + formats.append({ + 'url': video_url, + 'preference': preference(quality), + 'http_headers': { + 'User-Agent': 'mobile', + }, + }) + + self._sort_formats(formats) description = self._html_search_meta('Description', webpage, 'description') return { 'id': page_id, - 'url': video_url, - 'ext': ext, + 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py index f69d46a28..041ff6c55 100644 --- a/youtube_dl/extractor/wimp.py +++ b/youtube_dl/extractor/wimp.py @@ -1,52 +1,50 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor from .youtube import YoutubeIE class WimpIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/' + _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.wimp.com/maruexhausted/', - 'md5': 'f1acced123ecb28d9bb79f2479f2b6a1', + 'md5': 'ee21217ffd66d058e8b16be340b74883', 'info_dict': { 'id': 'maruexhausted', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Maru is exhausted.', 'description': 'md5:57e099e857c0a4ea312542b684a869b8', } }, { - # youtube video 'url': 'http://www.wimp.com/clowncar/', + 'md5': '4e2986c793694b55b37cf92521d12bb4', 'info_dict': { - 'id': 'cG4CEr2aiSg', + 'id': 'clowncar', 'ext': 'mp4', - 'title': 'Basset hound clown car...incredible!', - 'description': 'md5:8d228485e0719898c017203f900b3a35', - 'uploader': 'Gretchen Hoey', - 'uploader_id': 'gretchenandjeff1', - 'upload_date': '20140303', + 'title': 'It\'s like a clown car.', + 'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2', }, - 'add_ie': ['Youtube'], }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group(1) + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) - video_url = self._search_regex( - [r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"], - webpage, 'video URL') - if YoutubeIE.suitable(video_url): - self.to_screen('Found YouTube video') + + youtube_id = self._search_regex( + r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']", + webpage, 'video URL', default=None) + if youtube_id: return { '_type': 'url', - 'url': video_url, + 'url': youtube_id, 'ie_key': YoutubeIE.ie_key(), } + video_url = self._search_regex( + r'<video[^>]+>\s*<source[^>]+src=(["\'])(?P<url>.+?)\1', + webpage, 'video URL', group='url') + return { 'id': video_id, 'url': video_url, diff --git a/youtube_dl/extractor/wistia.py b/youtube_dl/extractor/wistia.py index 13a079151..fdb16d91c 100644 --- a/youtube_dl/extractor/wistia.py +++ b/youtube_dl/extractor/wistia.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import compat_urllib_request -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + sanitized_Request, +) class WistiaIE(InfoExtractor): @@ -23,7 +25,7 @@ class WistiaIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - request = compat_urllib_request.Request(self._API_URL.format(video_id)) + request = sanitized_Request(self._API_URL.format(video_id)) request.add_header('Referer', url) # Some videos require this. data_json = self._download_json(request, video_id) if data_json.get('error'): diff --git a/youtube_dl/extractor/wsj.py b/youtube_dl/extractor/wsj.py index 2ddf29a69..5a897371d 100644 --- a/youtube_dl/extractor/wsj.py +++ b/youtube_dl/extractor/wsj.py @@ -84,6 +84,5 @@ class WSJIE(InfoExtractor): 'duration': duration, 'upload_date': upload_date, 'title': title, - 'formats': formats, 'categories': categories, } diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/xfileshare.py index f006f0cb1..a3236e66c 100644 --- a/youtube_dl/extractor/gorillavid.py +++ b/youtube_dl/extractor/xfileshare.py @@ -1,24 +1,23 @@ -# -*- coding: utf-8 -*- +# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + encode_dict, int_or_none, + sanitized_Request, ) -class GorillaVidIE(InfoExtractor): - IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net' +class XFileShareIE(InfoExtractor): + IE_DESC = 'XFileShare based sites: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net, filehoot.com and vidto.me' _VALID_URL = r'''(?x) https?://(?P<host>(?:www\.)? - (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net))/ + (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com|vidto\.me))/ (?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)? ''' @@ -67,13 +66,29 @@ class GorillaVidIE(InfoExtractor): }, { 'url': 'http://movpod.in/0wguyyxi1yca', 'only_matching': True, + }, { + 'url': 'http://filehoot.com/3ivfabn7573c.html', + 'info_dict': { + 'id': '3ivfabn7573c', + 'ext': 'mp4', + 'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4', + 'thumbnail': 're:http://.*\.jpg', + } + }, { + 'url': 'http://vidto.me/ku5glz52nqe1.html', + 'info_dict': { + 'id': 'ku5glz52nqe1', + 'ext': 'mp4', + 'title': 'test' + } }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id) + url = 'http://%s/%s' % (mobj.group('host'), video_id) + webpage = self._download_webpage(url, video_id) if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None: raise ExtractorError('Video %s does not exist' % video_id, expected=True) @@ -87,20 +102,25 @@ class GorillaVidIE(InfoExtractor): if countdown: self._sleep(countdown, video_id) - post = compat_urllib_parse.urlencode(fields) + post = compat_urllib_parse.urlencode(encode_dict(fields)) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, 'Downloading video page') - title = self._search_regex( - [r'style="z-index: [0-9]+;">([^<]+)</span>', r'>Watch (.+) '], - webpage, 'title', default=None) or self._og_search_title(webpage) + title = (self._search_regex( + [r'style="z-index: [0-9]+;">([^<]+)</span>', + r'<td nowrap>([^<]+)</td>', + r'>Watch (.+) ', + r'<h2 class="video-page-head">([^<]+)</h2>'], + webpage, 'title', default=None) or self._og_search_title(webpage)).strip() video_url = self._search_regex( - r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url') + [r'file\s*:\s*["\'](http[^"\']+)["\'],', + r'file_link\s*=\s*\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'], + webpage, 'file url') thumbnail = self._search_regex( - r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False) + r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None) formats = [{ 'format_id': 'sd', diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index b4ad513a0..8938c0e45 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -4,7 +4,6 @@ import re from .common import InfoExtractor from ..utils import ( - ExtractorError, unified_strdate, str_to_int, int_or_none, @@ -22,7 +21,7 @@ class XHamsterIE(InfoExtractor): 'ext': 'mp4', 'title': 'FemaleAgent Shy beauty takes the bait', 'upload_date': '20121014', - 'uploader_id': 'Ruseful2011', + 'uploader': 'Ruseful2011', 'duration': 893, 'age_limit': 18, } @@ -34,7 +33,7 @@ class XHamsterIE(InfoExtractor): 'ext': 'mp4', 'title': 'Britney Spears Sexy Booty', 'upload_date': '20130914', - 'uploader_id': 'jojo747400', + 'uploader': 'jojo747400', 'duration': 200, 'age_limit': 18, } @@ -46,12 +45,12 @@ class XHamsterIE(InfoExtractor): ] def _real_extract(self, url): - def extract_video_url(webpage): - mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage) - if mp4 is None: - raise ExtractorError('Unable to extract media URL') - else: - return mp4.group(1) + def extract_video_url(webpage, name): + return self._search_regex( + [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''', + r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''', + r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''], + webpage, name, group='mp4') def is_hd(webpage): return '<div class=\'icon iconHD\'' in webpage @@ -64,7 +63,9 @@ class XHamsterIE(InfoExtractor): mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo) webpage = self._download_webpage(mrss_url, video_id) - title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, 'title') + title = self._html_search_regex( + [r'<title>(?P<title>.+?)(?:, (?:[^,]+? )?Porn: xHamster| - xHamster\.com)</title>', + r'<h1>([^<]+)</h1>'], webpage, 'title') # Only a few videos have an description mobj = re.search(r'<span>Description: </span>([^<]+)', webpage) @@ -75,10 +76,14 @@ class XHamsterIE(InfoExtractor): if upload_date: upload_date = unified_strdate(upload_date) - uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)', - webpage, 'uploader id', default='anonymous') + uploader = self._html_search_regex( + r"<a href='[^']+xhamster\.com/user/[^>]+>(?P<uploader>[^<]+)", + webpage, 'uploader', default='anonymous') - thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False) + thumbnail = self._search_regex( + [r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''', + r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''], + webpage, 'thumbnail', fatal=False, group='thumbnail') duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>', webpage, 'duration', fatal=False)) @@ -97,7 +102,9 @@ class XHamsterIE(InfoExtractor): hd = is_hd(webpage) - video_url = extract_video_url(webpage) + format_id = 'hd' if hd else 'sd' + + video_url = extract_video_url(webpage, format_id) formats = [{ 'url': video_url, 'format_id': 'hd' if hd else 'sd', @@ -108,7 +115,7 @@ class XHamsterIE(InfoExtractor): mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url') webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage') if is_hd(webpage): - video_url = extract_video_url(webpage) + video_url = extract_video_url(webpage, 'hd') formats.append({ 'url': video_url, 'format_id': 'hd', @@ -122,7 +129,7 @@ class XHamsterIE(InfoExtractor): 'title': title, 'description': description, 'upload_date': upload_date, - 'uploader_id': uploader_id, + 'uploader': uploader, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 779e4f46a..a1fe24050 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse_unquote, -) +from ..compat import compat_urllib_parse_unquote from ..utils import ( parse_duration, + sanitized_Request, str_to_int, ) @@ -32,7 +30,7 @@ class XTubeIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/xuite.py b/youtube_dl/extractor/xuite.py index 5aac8adb3..8bbac54e2 100644 --- a/youtube_dl/extractor/xuite.py +++ b/youtube_dl/extractor/xuite.py @@ -19,7 +19,7 @@ class XuiteIE(InfoExtractor): _TESTS = [{ # Audio 'url': 'http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2', - 'md5': '63a42c705772aa53fd4c1a0027f86adf', + 'md5': 'e79284c87b371424885448d11f6398c8', 'info_dict': { 'id': '3860914', 'ext': 'mp3', diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py index 5dcf2fdd1..710ad5041 100644 --- a/youtube_dl/extractor/xvideos.py +++ b/youtube_dl/extractor/xvideos.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_unquote, - compat_urllib_request, -) +from ..compat import compat_urllib_parse_unquote from ..utils import ( clean_html, ExtractorError, determine_ext, + sanitized_Request, ) @@ -48,7 +46,7 @@ class XVideosIE(InfoExtractor): 'url': video_url, }] - android_req = compat_urllib_request.Request(url) + android_req = sanitized_Request(url) android_req.add_header('User-Agent', self._ANDROID_USER_AGENT) android_webpage = self._download_webpage(android_req, video_id, fatal=False) diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index f9afbdbab..fca5ddc69 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -101,7 +101,7 @@ class YahooIE(InfoExtractor): } }, { 'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html', - 'md5': '67010fdf3a08d290e060a4dd96baa07b', + 'md5': '88e209b417f173d86186bef6e4d1f160', 'info_dict': { 'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521', 'ext': 'mp4', @@ -144,6 +144,17 @@ class YahooIE(InfoExtractor): }, { 'url': 'https://tw.news.yahoo.com/-100120367.html', 'only_matching': True, + }, { + # Query result is embedded in webpage, but explicit request to video API fails with geo restriction + 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html', + 'md5': '4fbafb9c9b6f07aa8f870629f6671b35', + 'info_dict': { + 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504', + 'ext': 'mp4', + 'title': 'Communitary - Community Episode 1: Ladders', + 'description': 'md5:8fc39608213295748e1e289807838c97', + 'duration': 1646, + }, } ] @@ -171,6 +182,19 @@ class YahooIE(InfoExtractor): if nbc_sports_url: return self.url_result(nbc_sports_url, 'NBCSportsVPlayer') + # Query result is often embedded in webpage as JSON. Sometimes explicit requests + # to video API results in a failure with geo restriction reason therefore using + # embedded query result when present sounds reasonable. + config_json = self._search_regex( + r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)', + webpage, 'videoplayer applet', default=None) + if config_json: + config = self._parse_json(config_json, display_id, fatal=False) + if config: + sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi') + if sapi: + return self._extract_info(display_id, sapi, webpage) + items_json = self._search_regex( r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE, default=None) @@ -190,22 +214,10 @@ class YahooIE(InfoExtractor): video_id = info['id'] return self._get_info(video_id, display_id, webpage) - def _get_info(self, video_id, display_id, webpage): - region = self._search_regex( - r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"', - webpage, 'region', fatal=False, default='US') - data = compat_urllib_parse.urlencode({ - 'protocol': 'http', - 'region': region, - }) - query_url = ( - 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/' - '{id}?{data}'.format(id=video_id, data=data)) - query_result = self._download_json( - query_url, display_id, 'Downloading video info') - - info = query_result['query']['results']['mediaObj'][0] + def _extract_info(self, display_id, query, webpage): + info = query['query']['results']['mediaObj'][0] meta = info.get('meta') + video_id = info.get('id') if not meta: msg = info['status'].get('msg') @@ -231,6 +243,9 @@ class YahooIE(InfoExtractor): 'ext': 'flv', }) else: + if s.get('format') == 'm3u8_playlist': + format_info['protocol'] = 'm3u8_native' + format_info['ext'] = 'mp4' format_url = compat_urlparse.urljoin(host, path) format_info['url'] = format_url formats.append(format_info) @@ -264,6 +279,21 @@ class YahooIE(InfoExtractor): 'subtitles': subtitles, } + def _get_info(self, video_id, display_id, webpage): + region = self._search_regex( + r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"', + webpage, 'region', fatal=False, default='US') + data = compat_urllib_parse.urlencode({ + 'protocol': 'http', + 'region': region, + }) + query_url = ( + 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/' + '{id}?{data}'.format(id=video_id, data=data)) + query_result = self._download_json( + query_url, display_id, 'Downloading video info') + return self._extract_info(display_id, query_result, webpage) + class YahooSearchIE(SearchInfoExtractor): IE_DESC = 'Yahoo screen search' diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py index f4c0f5702..d3cc1a29f 100644 --- a/youtube_dl/extractor/yandexmusic.py +++ b/youtube_dl/extractor/yandexmusic.py @@ -1,18 +1,38 @@ -# coding=utf-8 +# coding: utf-8 from __future__ import unicode_literals import re import hashlib from .common import InfoExtractor -from ..compat import compat_str +from ..compat import ( + compat_str, + compat_urllib_parse, +) from ..utils import ( int_or_none, float_or_none, + sanitized_Request, ) -class YandexMusicBaseIE(InfoExtractor): +class YandexMusicTrackIE(InfoExtractor): + IE_NAME = 'yandexmusic:track' + IE_DESC = 'Яндекс.Музыка - Трек' + _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)' + + _TEST = { + 'url': 'http://music.yandex.ru/album/540508/track/4878838', + 'md5': 'f496818aa2f60b6c0062980d2e00dc20', + 'info_dict': { + 'id': '4878838', + 'ext': 'mp3', + 'title': 'Carlo Ambrosio - Gypsy Eyes 1', + 'filesize': 4628061, + 'duration': 193.04, + } + } + def _get_track_url(self, storage_dir, track_id): data = self._download_json( 'http://music.yandex.ru/api/v1.5/handlers/api-jsonp.jsx?action=getTrackSrc&p=download-info/%s' @@ -26,6 +46,12 @@ class YandexMusicBaseIE(InfoExtractor): % (data['host'], key, data['ts'] + data['path'], storage[1])) def _get_track_info(self, track): + thumbnail = None + cover_uri = track.get('albums', [{}])[0].get('coverUri') + if cover_uri: + thumbnail = cover_uri.replace('%%', 'orig') + if not thumbnail.startswith('http'): + thumbnail = 'http://' + thumbnail return { 'id': track['id'], 'ext': 'mp3', @@ -33,26 +59,9 @@ class YandexMusicBaseIE(InfoExtractor): 'title': '%s - %s' % (track['artists'][0]['name'], track['title']), 'filesize': int_or_none(track.get('fileSize')), 'duration': float_or_none(track.get('durationMs'), 1000), + 'thumbnail': thumbnail, } - -class YandexMusicTrackIE(YandexMusicBaseIE): - IE_NAME = 'yandexmusic:track' - IE_DESC = 'Яндекс.Музыка - Трек' - _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)' - - _TEST = { - 'url': 'http://music.yandex.ru/album/540508/track/4878838', - 'md5': 'f496818aa2f60b6c0062980d2e00dc20', - 'info_dict': { - 'id': '4878838', - 'ext': 'mp3', - 'title': 'Carlo Ambrosio - Gypsy Eyes 1', - 'filesize': 4628061, - 'duration': 193.04, - } - } - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) album_id, track_id = mobj.group('album_id'), mobj.group('id') @@ -64,7 +73,15 @@ class YandexMusicTrackIE(YandexMusicBaseIE): return self._get_track_info(track) -class YandexMusicAlbumIE(YandexMusicBaseIE): +class YandexMusicPlaylistBaseIE(InfoExtractor): + def _build_playlist(self, tracks): + return [ + self.url_result( + 'http://music.yandex.ru/album/%s/track/%s' % (track['albums'][0]['id'], track['id'])) + for track in tracks if track.get('albums') and isinstance(track.get('albums'), list)] + + +class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:album' IE_DESC = 'Яндекс.Музыка - Альбом' _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)' @@ -85,7 +102,7 @@ class YandexMusicAlbumIE(YandexMusicBaseIE): 'http://music.yandex.ru/handlers/album.jsx?album=%s' % album_id, album_id, 'Downloading album JSON') - entries = [self._get_track_info(track) for track in album['volumes'][0]] + entries = self._build_playlist(album['volumes'][0]) title = '%s - %s' % (album['artists'][0]['name'], album['title']) year = album.get('year') @@ -95,12 +112,12 @@ class YandexMusicAlbumIE(YandexMusicBaseIE): return self.playlist_result(entries, compat_str(album['id']), title) -class YandexMusicPlaylistIE(YandexMusicBaseIE): +class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): IE_NAME = 'yandexmusic:playlist' IE_DESC = 'Яндекс.Музыка - Плейлист' _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/users/[^/]+/playlists/(?P<id>\d+)' - _TEST = { + _TESTS = [{ 'url': 'http://music.yandex.ru/users/music.partners/playlists/1245', 'info_dict': { 'id': '1245', @@ -108,20 +125,54 @@ class YandexMusicPlaylistIE(YandexMusicBaseIE): 'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9', }, 'playlist_count': 6, - } + }, { + # playlist exceeding the limit of 150 tracks shipped with webpage (see + # https://github.com/rg3/youtube-dl/issues/6666) + 'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036', + 'info_dict': { + 'id': '1036', + 'title': 'Музыка 90-х', + }, + 'playlist_count': 310, + }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) - playlist = self._parse_json( + mu = self._parse_json( self._search_regex( r'var\s+Mu\s*=\s*({.+?});\s*</script>', webpage, 'player'), - playlist_id)['pageData']['playlist'] - - entries = [self._get_track_info(track) for track in playlist['tracks']] + playlist_id) + + playlist = mu['pageData']['playlist'] + tracks, track_ids = playlist['tracks'], playlist['trackIds'] + + # tracks dictionary shipped with webpage is limited to 150 tracks, + # missing tracks should be retrieved manually. + if len(tracks) < len(track_ids): + present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')]) + missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids) + request = sanitized_Request( + 'https://music.yandex.ru/handlers/track-entries.jsx', + compat_urllib_parse.urlencode({ + 'entries': ','.join(missing_track_ids), + 'lang': mu.get('settings', {}).get('lang', 'en'), + 'external-domain': 'music.yandex.ru', + 'overembed': 'false', + 'sign': mu.get('authData', {}).get('user', {}).get('sign'), + 'strict': 'true', + }).encode('utf-8')) + request.add_header('Referer', url) + request.add_header('X-Requested-With', 'XMLHttpRequest') + + missing_tracks = self._download_json( + request, playlist_id, 'Downloading missing tracks JSON', fatal=False) + if missing_tracks: + tracks.extend(missing_tracks) return self.playlist_result( - entries, compat_str(playlist_id), + self._build_playlist(tracks), + compat_str(playlist_id), playlist['title'], playlist.get('description')) diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index 78caeb8b3..3a3432be8 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -4,12 +4,13 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..utils import ExtractorError - from ..compat import ( compat_urllib_parse, compat_ord, - compat_urllib_request, +) +from ..utils import ( + ExtractorError, + sanitized_Request, ) @@ -24,8 +25,8 @@ class YoukuIE(InfoExtractor): ''' _TESTS = [{ + # MD5 is unstable 'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html', - 'md5': '5f3af4192eabacc4501508d54a8cabd7', 'info_dict': { 'id': 'XMTc1ODE5Njcy_part1', 'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.', @@ -41,6 +42,7 @@ class YoukuIE(InfoExtractor): 'title': '武媚娘传奇 85', }, 'playlist_count': 11, + 'skip': 'Available in China only', }, { 'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html', 'info_dict': { @@ -48,10 +50,20 @@ class YoukuIE(InfoExtractor): 'title': '花千骨 04', }, 'playlist_count': 13, - 'skip': 'Available in China only', + }, { + 'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html', + 'note': 'Video protected with password', + 'info_dict': { + 'id': 'XNjA1NzA2Njgw', + 'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起', + }, + 'playlist_count': 19, + 'params': { + 'videopassword': '100600', + }, }] - def construct_video_urls(self, data1, data2): + def construct_video_urls(self, data): # get sid, token def yk_t(s1, s2): ls = list(range(256)) @@ -69,34 +81,24 @@ class YoukuIE(InfoExtractor): return bytes(s) sid, token = yk_t( - b'becaf9be', base64.b64decode(data2['ep'].encode('ascii')) + b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii')) ).decode('ascii').split('_') # get oip - oip = data2['ip'] - - # get fileid - string_ls = list( - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890') - shuffled_string_ls = [] - seed = data1['seed'] - N = len(string_ls) - for ii in range(N): - seed = (seed * 0xd3 + 0x754f) % 0x10000 - idx = seed * len(string_ls) // 0x10000 - shuffled_string_ls.append(string_ls[idx]) - del string_ls[idx] + oip = data['security']['ip'] fileid_dict = {} - for format in data1['streamtypes']: - streamfileid = [ - int(i) for i in data1['streamfileids'][format].strip('*').split('*')] - fileid = ''.join( - [shuffled_string_ls[i] for i in streamfileid]) - fileid_dict[format] = fileid[:8] + '%s' + fileid[10:] + for stream in data['stream']: + format = stream.get('stream_type') + fileid = stream['stream_fileid'] + fileid_dict[format] = fileid def get_fileid(format, n): - fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2) + number = hex(int(str(n), 10))[2:].upper() + if len(number) == 1: + number = '0' + number + streamfileids = fileid_dict[format] + fileid = streamfileids[0:8] + number + streamfileids[10:] return fileid # get ep @@ -111,15 +113,15 @@ class YoukuIE(InfoExtractor): # generate video_urls video_urls_dict = {} - for format in data1['streamtypes']: + for stream in data['stream']: + format = stream.get('stream_type') video_urls = [] - for dt in data1['segs'][format]: - n = str(int(dt['no'])) + for dt in stream['segs']: + n = str(stream['segs'].index(dt)) param = { - 'K': dt['k'], + 'K': dt['key'], 'hd': self.get_hd(format), 'myp': 0, - 'ts': dt['seconds'], 'ypp': 0, 'ctype': 12, 'ev': 1, @@ -130,7 +132,7 @@ class YoukuIE(InfoExtractor): video_url = \ 'http://k.youku.com/player/getFlvPath/' + \ 'sid/' + sid + \ - '_' + str(int(n) + 1).zfill(2) + \ + '_00' + \ '/st/' + self.parse_ext_l(format) + \ '/fileid/' + get_fileid(format, n) + '?' + \ compat_urllib_parse.urlencode(param) @@ -141,23 +143,31 @@ class YoukuIE(InfoExtractor): def get_hd(self, fm): hd_id_dict = { + '3gp': '0', + '3gphd': '1', 'flv': '0', + 'flvhd': '0', 'mp4': '1', + 'mp4hd': '1', + 'mp4hd2': '1', + 'mp4hd3': '1', 'hd2': '2', 'hd3': '3', - '3gp': '0', - '3gphd': '1' } return hd_id_dict[fm] def parse_ext_l(self, fm): ext_dict = { + '3gp': 'flv', + '3gphd': 'mp4', 'flv': 'flv', + 'flvhd': 'flv', 'mp4': 'mp4', + 'mp4hd': 'mp4', + 'mp4hd2': 'flv', + 'mp4hd3': 'flv', 'hd2': 'flv', 'hd3': 'flv', - '3gp': 'flv', - '3gphd': 'mp4' } return ext_dict[fm] @@ -166,9 +176,13 @@ class YoukuIE(InfoExtractor): '3gp': 'h6', '3gphd': 'h5', 'flv': 'h4', + 'flvhd': 'h4', 'mp4': 'h3', + 'mp4hd': 'h3', + 'mp4hd2': 'h4', + 'mp4hd3': 'h4', 'hd2': 'h2', - 'hd3': 'h1' + 'hd3': 'h1', } return _dict[fm] @@ -176,39 +190,46 @@ class YoukuIE(InfoExtractor): video_id = self._match_id(url) def retrieve_data(req_url, note): - req = compat_urllib_request.Request(req_url) + headers = { + 'Referer': req_url, + } + self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com') + req = sanitized_Request(req_url, headers=headers) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: req.add_header('Ytdl-request-proxy', cn_verification_proxy) raw_data = self._download_json(req, video_id, note=note) - return raw_data['data'][0] + + return raw_data['data'] + + video_password = self._downloader.params.get('videopassword', None) # request basic data - data1 = retrieve_data( - 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id, - 'Downloading JSON metadata 1') - data2 = retrieve_data( - 'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id, - 'Downloading JSON metadata 2') - - error_code = data1.get('error_code') - if error_code: - error = data1.get('error') - if error is not None and '因版权原因无法观看此视频' in error: + basic_data_url = "http://play.youku.com/play/get.json?vid=%s&ct=12" % video_id + if video_password: + basic_data_url += '&pwd=%s' % video_password + + data = retrieve_data(basic_data_url, 'Downloading JSON metadata') + + error = data.get('error') + if error: + error_note = error.get('note') + if error_note is not None and '因版权原因无法观看此视频' in error_note: raise ExtractorError( 'Youku said: Sorry, this video is available in China only', expected=True) else: - msg = 'Youku server reported error %i' % error_code - if error is not None: - msg += ': ' + error + msg = 'Youku server reported error %i' % error.get('code') + if error_note is not None: + msg += ': ' + error_note raise ExtractorError(msg) - title = data1['title'] + # get video title + title = data['video']['title'] # generate video_urls_dict - video_urls_dict = self.construct_video_urls(data1, data2) + video_urls_dict = self.construct_video_urls(data) # construct info entries = [{ @@ -217,10 +238,11 @@ class YoukuIE(InfoExtractor): 'formats': [], # some formats are not available for all parts, we have to detect # which one has all - } for i in range(max(len(v) for v in data1['segs'].values()))] - for fm in data1['streamtypes']: + } for i in range(max(len(v.get('segs')) for v in data['stream']))] + for stream in data['stream']: + fm = stream.get('stream_type') video_urls = video_urls_dict[fm] - for video_url, seg, entry in zip(video_urls, data1['segs'][fm], entries): + for video_url, seg, entry in zip(video_urls, stream['segs'], entries): entry['formats'].append({ 'url': video_url, 'format_id': self.get_format_name(fm), diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py index 4ba7c36db..dd724085a 100644 --- a/youtube_dl/extractor/youporn.py +++ b/youtube_dl/extractor/youporn.py @@ -1,121 +1,171 @@ from __future__ import unicode_literals - -import json import re -import sys from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, -) from ..utils import ( - ExtractorError, + int_or_none, + sanitized_Request, + str_to_int, unescapeHTML, unified_strdate, ) -from ..aes import ( - aes_decrypt_text -) +from ..aes import aes_decrypt_text class YouPornIE(InfoExtractor): - _VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)' + _TESTS = [{ 'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', + 'md5': '71ec5fcfddacf80f495efa8b6a8d9a89', 'info_dict': { 'id': '505835', + 'display_id': 'sex-ed-is-it-safe-to-masturbate-daily', 'ext': 'mp4', - 'upload_date': '20101221', + 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', 'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?', + 'thumbnail': 're:^https?://.*\.jpg$', 'uploader': 'Ask Dan And Jennifer', - 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', + 'upload_date': '20101221', + 'average_rating': int, + 'view_count': int, + 'comment_count': int, + 'categories': list, + 'tags': list, 'age_limit': 18, - } - } + }, + }, { + # Anonymous User uploader + 'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4', + 'info_dict': { + 'id': '561726', + 'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show', + 'ext': 'mp4', + 'title': 'Big Tits Awesome Brunette On amazing webcam show', + 'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'Anonymous User', + 'upload_date': '20111125', + 'average_rating': int, + 'view_count': int, + 'comment_count': int, + 'categories': list, + 'tags': list, + 'age_limit': 18, + }, + 'params': { + 'skip_download': True, + }, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('videoid') - url = mobj.group('proto') + 'www.' + mobj.group('url') + video_id = mobj.group('id') + display_id = mobj.group('display_id') - req = compat_urllib_request.Request(url) - req.add_header('Cookie', 'age_verified=1') - webpage = self._download_webpage(req, video_id) - age_limit = self._rta_search(webpage) + request = sanitized_Request(url) + request.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(request, display_id) + + title = self._search_regex( + [r'(?:video_titles|videoTitle)\s*[:=]\s*(["\'])(?P<title>.+?)\1', + r'<h1[^>]+class=["\']heading\d?["\'][^>]*>([^<])<'], + webpage, 'title', group='title') - # Get JSON parameters - json_params = self._search_regex( - [r'videoJa?son\s*=\s*({.+})', - r'var\s+currentVideo\s*=\s*new\s+Video\((.+?)\)[,;]'], - webpage, 'JSON parameters') - try: - params = json.loads(json_params) - except ValueError: - raise ExtractorError('Invalid JSON') - - self.report_extraction(video_id) - try: - video_title = params['title'] - upload_date = unified_strdate(params['release_date_f']) - video_description = params['description'] - video_uploader = params['submitted_by'] - thumbnail = params['thumbnails'][0]['image'] - except KeyError: - raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1]) - - # Get all of the links from the page - DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>' - download_list_html = self._search_regex(DOWNLOAD_LIST_RE, - webpage, 'download list').strip() - LINK_RE = r'<a href="([^"]+)">' - links = re.findall(LINK_RE, download_list_html) - - # Get all encrypted links - encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage) - for encrypted_link in encrypted_links: - link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8') + links = [] + + sources = self._search_regex( + r'sources\s*:\s*({.+?})', webpage, 'sources', default=None) + if sources: + for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources): + links.append(link) + + # Fallback #1 + for _, link in re.findall( + r'(?:videoUrl|videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage): + links.append(link) + + # Fallback #2, this also contains extra low quality 180p format + for _, link in re.findall(r'<a[^>]+href=(["\'])(http.+?)\1[^>]+title=["\']Download [Vv]ideo', webpage): links.append(link) + # Fallback #3, encrypted links + for _, encrypted_link in re.findall( + r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage): + links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8')) + formats = [] - for link in links: - # A link looks like this: - # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0 - # A path looks like this: - # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4 - video_url = unescapeHTML(link) - path = compat_urllib_parse_urlparse(video_url).path - format_parts = path.split('/')[4].split('_')[:2] - - dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0] - - resolution = format_parts[0] - height = int(resolution[:-len('p')]) - bitrate = int(format_parts[1][:-len('k')]) - format = '-'.join(format_parts) + '-' + dn - - formats.append({ + for video_url in set(unescapeHTML(link) for link in links): + f = { 'url': video_url, - 'format': format, - 'format_id': format, - 'height': height, - 'tbr': bitrate, - 'resolution': resolution, - }) - + } + # Video URL's path looks like this: + # /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4 + # We will benefit from it by extracting some metadata + mobj = re.search(r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+/', video_url) + if mobj: + height = int(mobj.group('height')) + bitrate = int(mobj.group('bitrate')) + f.update({ + 'format_id': '%dp-%dk' % (height, bitrate), + 'height': height, + 'tbr': bitrate, + }) + formats.append(f) self._sort_formats(formats) - if not formats: - raise ExtractorError('ERROR: no known formats available for video') + description = self._html_search_regex( + r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>', + webpage, 'description', default=None) + thumbnail = self._search_regex( + r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1', + webpage, 'thumbnail', fatal=False, group='thumbnail') + + uploader = self._html_search_regex( + r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>', + webpage, 'uploader', fatal=False) + upload_date = unified_strdate(self._html_search_regex( + r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>', + webpage, 'upload date', fatal=False)) + + age_limit = self._rta_search(webpage) + + average_rating = int_or_none(self._search_regex( + r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>', + webpage, 'average rating', fatal=False)) + + view_count = str_to_int(self._search_regex( + r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>', + webpage, 'view count', fatal=False)) + comment_count = str_to_int(self._search_regex( + r'>All [Cc]omments? \(([\d,.]+)\)', + webpage, 'comment count', fatal=False)) + + def extract_tag_box(title): + tag_box = self._search_regex( + (r'<div[^>]+class=["\']tagBoxTitle["\'][^>]*>\s*%s\b.*?</div>\s*' + '<div[^>]+class=["\']tagBoxContent["\']>(.+?)</div>') % re.escape(title), + webpage, '%s tag box' % title, default=None) + if not tag_box: + return [] + return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box) + + categories = extract_tag_box('Category') + tags = extract_tag_box('Tags') return { 'id': video_id, - 'uploader': video_uploader, - 'upload_date': upload_date, - 'title': video_title, + 'display_id': display_id, + 'title': title, + 'description': description, 'thumbnail': thumbnail, - 'description': video_description, + 'uploader': uploader, + 'upload_date': upload_date, + 'average_rating': average_rating, + 'view_count': view_count, + 'comment_count': comment_count, + 'categories': categories, + 'tags': tags, 'age_limit': age_limit, 'formats': formats, } diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 323681960..4aac2cc03 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -19,21 +19,29 @@ from ..compat import ( compat_urllib_parse, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, - compat_urllib_request, + compat_urllib_parse_urlparse, compat_urlparse, compat_str, ) from ..utils import ( clean_html, + encode_dict, + error_to_compat_str, ExtractorError, float_or_none, get_element_by_attribute, get_element_by_id, int_or_none, orderedSet, + parse_duration, + remove_quotes, + remove_start, + sanitized_Request, + smuggle_url, str_to_int, unescapeHTML, unified_strdate, + unsmuggle_url, uppercase_escape, ISO3166Utils, ) @@ -42,7 +50,7 @@ from ..utils import ( class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' - _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor' + _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' _NETRC_MACHINE = 'youtube' # If True it will raise an error if no login info is provided _LOGIN_REQUIRED = False @@ -106,12 +114,9 @@ class YoutubeBaseInfoExtractor(InfoExtractor): 'hl': 'en_US', } - # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode - # chokes on unicode - login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items()) - login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') + login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii') - req = compat_urllib_request.Request(self._LOGIN_URL, login_data) + req = sanitized_Request(self._LOGIN_URL, login_data) login_results = self._download_webpage( req, None, note='Logging in', errnote='unable to log in', fatal=False) @@ -124,44 +129,27 @@ class YoutubeBaseInfoExtractor(InfoExtractor): # Two-Factor # TODO add SMS and phone call support - these require making a request and then prompting the user - if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None: - tfa_code = self._get_tfa_info() + if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None: + tfa_code = self._get_tfa_info('2-step verification code') - if tfa_code is None: - self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>') - self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)') + if not tfa_code: + self._downloader.report_warning( + 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>' + '(Note that only TOTP (Google Authenticator App) codes work at this time.)') return False - # Unlike the first login form, secTok and timeStmp are both required for the TFA form - - match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U) - if match is None: - self._downloader.report_warning('Failed to get secTok - did the page structure change?') - secTok = match.group(1) - match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U) - if match is None: - self._downloader.report_warning('Failed to get timeStmp - did the page structure change?') - timeStmp = match.group(1) - - tfa_form_strs = { - 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', - 'smsToken': '', - 'smsUserPin': tfa_code, - 'smsVerifyPin': 'Verify', - - 'PersistentCookie': 'yes', - 'checkConnection': '', - 'checkedDomains': 'youtube', - 'pstMsg': '1', - 'secTok': secTok, - 'timeStmp': timeStmp, - 'service': 'youtube', - 'hl': 'en_US', - } - tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items()) - tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii') + tfa_code = remove_start(tfa_code, 'G-') + + tfa_form_strs = self._form_hidden_inputs('challenge', login_results) + + tfa_form_strs.update({ + 'Pin': tfa_code, + 'TrustDevice': 'on', + }) - tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data) + tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii') + + tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data) tfa_results = self._download_webpage( tfa_req, None, note='Submitting TFA code', errnote='unable to submit tfa', fatal=False) @@ -169,8 +157,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor): if tfa_results is False: return False - if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None: - self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.') + if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None: + self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.') return False if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None: self._downloader.report_warning('unable to log in - did the page structure change?') @@ -192,6 +180,69 @@ class YoutubeBaseInfoExtractor(InfoExtractor): return +class YoutubeEntryListBaseInfoExtractor(InfoExtractor): + # Extract entries from page with "Load more" button + def _entries(self, page, playlist_id): + more_widget_html = content_html = page + for page_num in itertools.count(1): + for entry in self._process_page(content_html): + yield entry + + mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), playlist_id, + 'Downloading page #%s' % page_num, + transform_source=uppercase_escape) + content_html = more['content_html'] + if not content_html.strip(): + # Some webpages show a "Load more" button but they don't + # have more videos + break + more_widget_html = more['load_more_widget_html'] + + +class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): + def _process_page(self, content): + for video_id, video_title in self.extract_videos_from_page(content): + yield self.url_result(video_id, 'Youtube', video_id, video_title) + + def extract_videos_from_page(self, page): + ids_in_page = [] + titles_in_page = [] + for mobj in re.finditer(self._VIDEO_RE, page): + # The link with index 0 is not the first video of the playlist (not sure if still actual) + if 'index' in mobj.groupdict() and mobj.group('id') == '0': + continue + video_id = mobj.group('id') + video_title = unescapeHTML(mobj.group('title')) + if video_title: + video_title = video_title.strip() + try: + idx = ids_in_page.index(video_id) + if video_title and not titles_in_page[idx]: + titles_in_page[idx] = video_title + except ValueError: + ids_in_page.append(video_id) + titles_in_page.append(video_title) + return zip(ids_in_page, titles_in_page) + + +class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): + def _process_page(self, content): + for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content): + yield self.url_result( + 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist') + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + title = self._og_search_title(webpage, fatal=False) + return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title) + + class YoutubeIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com' _VALID_URL = r"""(?x)^ @@ -209,11 +260,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor): |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! - (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) + (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY) v= ) )) - |youtu\.be/ # just youtu.be/xxxx + |(?: + youtu\.be| # just youtu.be/xxxx + vid\.plus # or vid.plus/xxxx + )/ |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID @@ -279,13 +333,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'}, # Dash webm - '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40}, - '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'}, + '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40}, + '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'}, '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, @@ -294,12 +348,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug) '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, - '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, - '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, - '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, - '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'}, - '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'}, + '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, + '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, + '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, + '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'}, + '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, # Dash webm audio '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50}, @@ -317,7 +372,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): IE_NAME = 'youtube' _TESTS = [ { - 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc', + 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9', 'info_dict': { 'id': 'BaW_jenozKc', 'ext': 'mp4', @@ -327,8 +382,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'upload_date': '20121002', 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .', 'categories': ['Science & Technology'], + 'tags': ['youtube-dl'], 'like_count': int, 'dislike_count': int, + 'start_time': 1, + 'end_time': 9, } }, { @@ -339,9 +397,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20120506', 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]', - 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f', + 'alt_title': 'I Love It (feat. Charli XCX)', + 'description': 'md5:782e8651347686cba06e58f71ab51773', + 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli', + 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop', + 'iconic ep', 'iconic', 'love', 'it'], 'uploader': 'Icona Pop', 'uploader_id': 'IconaPop', + 'creator': 'Icona Pop', } }, { @@ -352,9 +415,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20130703', 'title': 'Justin Timberlake - Tunnel Vision (Explicit)', + 'alt_title': 'Tunnel Vision', 'description': 'md5:64249768eec3bc4276236606ea996373', 'uploader': 'justintimberlakeVEVO', 'uploader_id': 'justintimberlakeVEVO', + 'creator': 'Justin Timberlake', + 'age_limit': 18, } }, { @@ -367,10 +433,31 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', 'uploader': 'SET India', - 'uploader_id': 'setindia' + 'uploader_id': 'setindia', + 'age_limit': 18, } }, { + 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY', + 'note': 'Use the first video ID in the URL', + 'info_dict': { + 'id': 'BaW_jenozKc', + 'ext': 'mp4', + 'title': 'youtube-dl test video "\'/\\ä↭𝕐', + 'uploader': 'Philipp Hagemeister', + 'uploader_id': 'phihag', + 'upload_date': '20121002', + 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .', + 'categories': ['Science & Technology'], + 'tags': ['youtube-dl'], + 'like_count': int, + 'dislike_count': int, + }, + 'params': { + 'skip_download': True, + }, + }, + { 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I', 'note': '256k DASH audio (format 141) via DASH manifest', 'info_dict': { @@ -411,10 +498,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'nfWlot6h_JM', 'ext': 'm4a', 'title': 'Taylor Swift - Shake It Off', - 'description': 'md5:2acfda1b285bdd478ccec22f9918199d', + 'alt_title': 'Shake It Off', + 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3', 'uploader': 'TaylorSwiftVEVO', 'uploader_id': 'TaylorSwiftVEVO', 'upload_date': '20140818', + 'creator': 'Taylor Swift', }, 'params': { 'youtube_include_dash_manifest': True, @@ -445,6 +534,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader': 'The Witcher', 'uploader_id': 'WitcherGame', 'upload_date': '20140605', + 'age_limit': 18, }, }, # Age-gate video with encrypted signature @@ -458,6 +548,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader': 'LloydVEVO', 'uploader_id': 'LloydVEVO', 'upload_date': '20110629', + 'age_limit': 18, }, }, # video_info is None (https://github.com/rg3/youtube-dl/issues/4421) @@ -468,9 +559,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20100430', 'uploader_id': 'deadmau5', + 'creator': 'deadmau5', 'description': 'md5:12c56784b8032162bb936a5f76d55360', 'uploader': 'deadmau5', 'title': 'Deadmau5 - Some Chords (HD)', + 'alt_title': 'Some Chords', }, 'expected_warnings': [ 'DASH manifest missing', @@ -482,7 +575,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'info_dict': { 'id': 'lqQg6PlCWgI', 'ext': 'mp4', - 'upload_date': '20120731', + 'upload_date': '20150827', 'uploader_id': 'olympic', 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', 'uploader': 'Olympics', @@ -511,7 +604,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'url': 'qEJwOuvDf7I', 'info_dict': { 'id': 'qEJwOuvDf7I', - 'ext': 'mp4', + 'ext': 'webm', 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге', 'description': '', 'upload_date': '20150404', @@ -554,6 +647,106 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'format': '135', # bestvideo } }, + { + # Multifeed videos (multiple cameras), URL is for Main Camera + 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs', + 'info_dict': { + 'id': 'jqWvoWXjCVs', + 'title': 'teamPGP: Rocket League Noob Stream', + 'description': 'md5:dc7872fb300e143831327f1bae3af010', + }, + 'playlist': [{ + 'info_dict': { + 'id': 'jqWvoWXjCVs', + 'ext': 'mp4', + 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)', + 'description': 'md5:dc7872fb300e143831327f1bae3af010', + 'upload_date': '20150721', + 'uploader': 'Beer Games Beer', + 'uploader_id': 'beergamesbeer', + }, + }, { + 'info_dict': { + 'id': '6h8e8xoXJzg', + 'ext': 'mp4', + 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)', + 'description': 'md5:dc7872fb300e143831327f1bae3af010', + 'upload_date': '20150721', + 'uploader': 'Beer Games Beer', + 'uploader_id': 'beergamesbeer', + }, + }, { + 'info_dict': { + 'id': 'PUOgX5z9xZw', + 'ext': 'mp4', + 'title': 'teamPGP: Rocket League Noob Stream (grizzle)', + 'description': 'md5:dc7872fb300e143831327f1bae3af010', + 'upload_date': '20150721', + 'uploader': 'Beer Games Beer', + 'uploader_id': 'beergamesbeer', + }, + }, { + 'info_dict': { + 'id': 'teuwxikvS5k', + 'ext': 'mp4', + 'title': 'teamPGP: Rocket League Noob Stream (zim)', + 'description': 'md5:dc7872fb300e143831327f1bae3af010', + 'upload_date': '20150721', + 'uploader': 'Beer Games Beer', + 'uploader_id': 'beergamesbeer', + }, + }], + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'http://vid.plus/FlRa-iH7PGw', + 'only_matching': True, + }, + { + # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468) + 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg', + 'info_dict': { + 'id': 'lsguqyKfVQg', + 'ext': 'mp4', + 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21', + 'alt_title': 'Dark Walk', + 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a', + 'upload_date': '20151119', + 'uploader_id': 'IronSoulElf', + 'uploader': 'IronSoulElf', + 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan', + }, + 'params': { + 'skip_download': True, + }, + }, + { + # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468) + 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8', + 'only_matching': True, + }, + { + # Video with yt:stretch=17:0 + 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM', + 'info_dict': { + 'id': 'Q39EVAstoRM', + 'ext': 'mp4', + 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4', + 'description': 'md5:ee18a25c350637c8faff806845bddee9', + 'upload_date': '20151107', + 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA', + 'uploader': 'CH GAMER DROID', + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY', + 'only_matching': True, + } ] def __init__(self, *args, **kwargs): @@ -582,7 +775,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): def _extract_signature_function(self, video_id, player_url, example_sig): id_m = re.match( - r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$', + r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$', player_url) if not id_m: raise ExtractorError('Cannot identify player %r' % player_url) @@ -711,7 +904,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, video_id, note=False) except ExtractorError as err: - self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) + self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} sub_lang_list = {} @@ -737,16 +930,33 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return {} return sub_lang_list + def _get_ytplayer_config(self, video_id, webpage): + patterns = ( + # User data may contain arbitrary character sequences that may affect + # JSON extraction with regex, e.g. when '};' is contained the second + # regex won't capture the whole JSON. Yet working around by trying more + # concrete regex first keeping in mind proper quoted string handling + # to be implemented in future that will replace this workaround (see + # https://github.com/rg3/youtube-dl/issues/7468, + # https://github.com/rg3/youtube-dl/pull/7599) + r';ytplayer\.config\s*=\s*({.+?});ytplayer', + r';ytplayer\.config\s*=\s*({.+?});', + ) + config = self._search_regex( + patterns, webpage, 'ytplayer.config', default=None) + if config: + return self._parse_json( + uppercase_escape(config), video_id, fatal=False) + def _get_automatic_captions(self, video_id, webpage): """We need the webpage for getting the captions url, pass it as an argument to speed up the process.""" self.to_screen('%s: Looking for automatic captions' % video_id) - mobj = re.search(r';ytplayer.config = ({.*?});', webpage) + player_config = self._get_ytplayer_config(video_id, webpage) err_msg = 'Couldn\'t find automatic captions for %s' % video_id - if mobj is None: + if not player_config: self._downloader.report_warning(err_msg) return {} - player_config = json.loads(mobj.group(1)) try: args = player_config['args'] caption_url = args['ttsurl'] @@ -885,10 +1095,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return formats def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + proto = ( 'http' if self._downloader.params.get('prefer_insecure', False) else 'https') + start_time = None + end_time = None + parsed_url = compat_urllib_parse_urlparse(url) + for component in [parsed_url.fragment, parsed_url.query]: + query = compat_parse_qs(component) + if start_time is None and 't' in query: + start_time = parse_duration(query['t'][0]) + if start_time is None and 'start' in query: + start_time = parse_duration(query['start'][0]) + if end_time is None and 'end' in query: + end_time = parse_duration(query['end'][0]) + # Extract original video URL from URL with redirection, like age verification, using next_url parameter mobj = re.search(self._NEXT_URL_RE, url) if mobj: @@ -939,10 +1163,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): age_gate = False video_info = None # Try looking directly into the video webpage - mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage) - if mobj: - json_code = uppercase_escape(mobj.group(1)) - ytplayer_config = json.loads(json_code) + ytplayer_config = self._get_ytplayer_config(video_id, video_webpage) + if ytplayer_config: args = ytplayer_config['args'] if args.get('url_encoded_fmt_stream_map'): # Convert to the same format returned by compat_parse_qs @@ -972,12 +1194,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor): if not video_info: video_info = get_video_info if 'token' in get_video_info: + # Different get_video_info requests may report different results, e.g. + # some may report video unavailability, but some may serve it without + # any complaint (see https://github.com/rg3/youtube-dl/issues/7362, + # the original webpage as well as el=info and el=embedded get_video_info + # requests report video unavailability due to geo restriction while + # el=detailpage succeeds and returns valid data). This is probably + # due to YouTube measures against IP ranges of hosting providers. + # Working around by preferring the first succeeded video_info containing + # the token if no such video_info yet was found. + if 'token' not in video_info: + video_info = get_video_info break if 'token' not in video_info: if 'reason' in video_info: if 'The uploader has not made this video available in your country.' in video_info['reason']: regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None) - if regions_allowed is not None: + if regions_allowed: raise ExtractorError('YouTube said: This video is available in %s only' % ( ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))), expected=True) @@ -989,6 +1222,55 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '"token" parameter not in video info for unknown reason', video_id=video_id) + # title + if 'title' in video_info: + video_title = video_info['title'][0] + else: + self._downloader.report_warning('Unable to extract video title') + video_title = '_' + + # description + video_description = get_element_by_id("eow-description", video_webpage) + if video_description: + video_description = re.sub(r'''(?x) + <a\s+ + (?:[a-zA-Z-]+="[^"]+"\s+)*? + title="([^"]+)"\s+ + (?:[a-zA-Z-]+="[^"]+"\s+)*? + class="yt-uix-redirect-link"\s*> + [^<]+ + </a> + ''', r'\1', video_description) + video_description = clean_html(video_description) + else: + fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage) + if fd_mobj: + video_description = unescapeHTML(fd_mobj.group(1)) + else: + video_description = '' + + if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False): + if not self._downloader.params.get('noplaylist'): + entries = [] + feed_ids = [] + multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0]) + for feed in multifeed_metadata_list.split(','): + feed_data = compat_parse_qs(feed) + entries.append({ + '_type': 'url_transparent', + 'ie_key': 'Youtube', + 'url': smuggle_url( + '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]), + {'force_singlefeed': True}), + 'title': '%s (%s)' % (video_title, feed_data['title'][0]), + }) + feed_ids.append(feed_data['id'][0]) + self.to_screen( + 'Downloading multifeed video (%s) - add --no-playlist to just download video %s' + % (', '.join(feed_ids), video_id)) + return self.playlist_result(entries, video_id, video_title, video_description) + self.to_screen('Downloading just video %s because of --no-playlist' % video_id) + if 'view_count' in video_info: view_count = int(video_info['view_count'][0]) else: @@ -1014,13 +1296,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): else: self._downloader.report_warning('unable to extract uploader nickname') - # title - if 'title' in video_info: - video_title = video_info['title'][0] - else: - self._downloader.report_warning('Unable to extract video title') - video_title = '_' - # thumbnail image # We try first to get a high quality image: m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">', @@ -1045,6 +1320,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = unified_strdate(upload_date) + m_music = re.search( + r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li', + video_webpage) + if m_music: + video_alt_title = remove_quotes(unescapeHTML(m_music.group('title'))) + video_creator = clean_html(m_music.group('creator')) + else: + video_alt_title = video_creator = None + m_cat_container = self._search_regex( r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', video_webpage, 'categories', default=None) @@ -1056,25 +1340,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): else: video_categories = None - # description - video_description = get_element_by_id("eow-description", video_webpage) - if video_description: - video_description = re.sub(r'''(?x) - <a\s+ - (?:[a-zA-Z-]+="[^"]+"\s+)*? - title="([^"]+)"\s+ - (?:[a-zA-Z-]+="[^"]+"\s+)*? - class="yt-uix-redirect-link"\s*> - [^<]+ - </a> - ''', r'\1', video_description) - video_description = clean_html(video_description) - else: - fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage) - if fd_mobj: - video_description = unescapeHTML(fd_mobj.group(1)) - else: - video_description = '' + video_tags = [ + unescapeHTML(m.group('content')) + for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)] def _extract_count(count_name): return str_to_int(self._search_regex( @@ -1125,7 +1393,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0] if 'rtmpe%3Dyes' in encoded_url_map: raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True) - url_map = {} + formats = [] for url_data_str in encoded_url_map.split(','): url_data = compat_parse_qs(url_data_str) if 'itag' not in url_data or 'url' not in url_data: @@ -1171,7 +1439,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): player_desc = 'flash player %s' % player_version else: player_version = self._search_regex( - r'html5player-([^/]+?)(?:/html5player)?\.js', + [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'], player_url, 'html5 player', fatal=False) player_desc = 'html5 player %s' % player_version @@ -1185,12 +1453,57 @@ class YoutubeIE(YoutubeBaseInfoExtractor): url += '&signature=' + signature if 'ratebypass' not in url: url += '&ratebypass=yes' - url_map[format_id] = url - formats = _map_to_format_list(url_map) + + # Some itags are not included in DASH manifest thus corresponding formats will + # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993). + # Trying to extract metadata from url_encoded_fmt_stream_map entry. + mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0]) + width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None) + dct = { + 'format_id': format_id, + 'url': url, + 'player_url': player_url, + 'filesize': int_or_none(url_data.get('clen', [None])[0]), + 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000), + 'width': width, + 'height': height, + 'fps': int_or_none(url_data.get('fps', [None])[0]), + 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0], + } + type_ = url_data.get('type', [None])[0] + if type_: + type_split = type_.split(';') + kind_ext = type_split[0].split('/') + if len(kind_ext) == 2: + kind, ext = kind_ext + dct['ext'] = ext + if kind in ('audio', 'video'): + codecs = None + for mobj in re.finditer( + r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_): + if mobj.group('key') == 'codecs': + codecs = mobj.group('val') + break + if codecs: + codecs = codecs.split(',') + if len(codecs) == 2: + acodec, vcodec = codecs[0], codecs[1] + else: + acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0]) + dct.update({ + 'acodec': acodec, + 'vcodec': vcodec, + }) + if format_id in self._formats: + dct.update(self._formats[format_id]) + formats.append(dct) elif video_info.get('hlsvp'): manifest_url = video_info['hlsvp'][0] url_map = self._extract_from_m3u8(manifest_url, video_id) formats = _map_to_format_list(url_map) + # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming + for a_format in formats: + a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True' else: raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info') @@ -1228,10 +1541,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">', video_webpage) if stretched_m: - ratio = float(stretched_m.group('w')) / float(stretched_m.group('h')) - for f in formats: - if f.get('vcodec') != 'none': - f['stretched_ratio'] = ratio + w = float(stretched_m.group('w')) + h = float(stretched_m.group('h')) + # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0). + # We will only process correct ratios. + if w > 0 and h > 0: + ratio = w / h + for f in formats: + if f.get('vcodec') != 'none': + f['stretched_ratio'] = ratio self._sort_formats(formats) @@ -1240,10 +1558,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'upload_date': upload_date, + 'creator': video_creator, 'title': video_title, + 'alt_title': video_alt_title, 'thumbnail': video_thumbnail, 'description': video_description, 'categories': video_categories, + 'tags': video_tags, 'subtitles': video_subtitles, 'automatic_captions': automatic_captions, 'duration': video_duration, @@ -1256,10 +1577,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]), 'formats': formats, 'is_live': is_live, + 'start_time': start_time, + 'end_time': end_time, } -class YoutubePlaylistIE(YoutubeBaseInfoExtractor): +class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com playlists' _VALID_URL = r"""(?x)(?: (?:https?://)? @@ -1267,7 +1590,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): youtube\.com/ (?: (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries) - \? (?:.*?&)*? (?:p|a|list)= + \? (?:.*?[&;])*? (?:p|a|list)= | p/ ) ( @@ -1280,7 +1603,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,}) )""" _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s' - _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)' + _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?' IE_NAME = 'youtube:playlist' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re', @@ -1397,37 +1720,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): else: self.report_warning('Youtube gives an alert message: ' + match) - # Extract the video ids from the playlist pages - def _entries(): - more_widget_html = content_html = page - for page_num in itertools.count(1): - matches = re.finditer(self._VIDEO_RE, content_html) - # We remove the duplicates and the link with index 0 - # (it's not the first video of the playlist) - new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0') - for vid_id in new_ids: - yield self.url_result(vid_id, 'Youtube', video_id=vid_id) - - mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) - if not mobj: - break - - more = self._download_json( - 'https://youtube.com/%s' % mobj.group('more'), playlist_id, - 'Downloading page #%s' % page_num, - transform_source=uppercase_escape) - content_html = more['content_html'] - if not content_html.strip(): - # Some webpages show a "Load more" button but they don't - # have more videos - break - more_widget_html = more['load_more_widget_html'] - playlist_title = self._html_search_regex( - r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>', + r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>', page, 'title') - return self.playlist_result(_entries(), playlist_id, playlist_title) + return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title) def _real_extract(self, url): # Extract playlist id @@ -1453,35 +1750,34 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): return self._extract_playlist(playlist_id) -class YoutubeChannelIE(InfoExtractor): +class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com channels' _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos' + _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?' IE_NAME = 'youtube:channel' _TESTS = [{ 'note': 'paginated channel', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'playlist_mincount': 91, 'info_dict': { - 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', + 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w', + 'title': 'Uploads from lex will', } + }, { + 'note': 'Age restricted channel', + # from https://www.youtube.com/user/DeusExOfficial + 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w', + 'playlist_mincount': 64, + 'info_dict': { + 'id': 'UUs0ifCMCm1icqRbqhUINa0w', + 'title': 'Uploads from Deus Ex', + }, }] - @staticmethod - def extract_videos_from_page(page): - ids_in_page = [] - titles_in_page = [] - for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page): - video_id = mobj.group('id') - video_title = unescapeHTML(mobj.group('title')) - try: - idx = ids_in_page.index(video_id) - if video_title and not titles_in_page[idx]: - titles_in_page[idx] = video_title - except ValueError: - ids_in_page.append(video_id) - titles_in_page.append(video_title) - return zip(ids_in_page, titles_in_page) + @classmethod + def suitable(cls, url): + return False if YoutubePlaylistsIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url) def _real_extract(self, url): channel_id = self._match_id(url) @@ -1494,12 +1790,15 @@ class YoutubeChannelIE(InfoExtractor): channel_page = self._download_webpage( url + '?view=57', channel_id, 'Downloading channel page', fatal=False) - channel_playlist_id = self._html_search_meta( - 'channelId', channel_page, 'channel id', default=None) - if not channel_playlist_id: - channel_playlist_id = self._search_regex( - r'data-channel-external-id="([^"]+)"', - channel_page, 'channel id', default=None) + if channel_page is False: + channel_playlist_id = False + else: + channel_playlist_id = self._html_search_meta( + 'channelId', channel_page, 'channel id', default=None) + if not channel_playlist_id: + channel_playlist_id = self._search_regex( + r'data-(?:channel-external-|yt)id="([^"]+)"', + channel_page, 'channel id', default=None) if channel_playlist_id and channel_playlist_id.startswith('UC'): playlist_id = 'UU' + channel_playlist_id[2:] return self.url_result( @@ -1522,29 +1821,7 @@ class YoutubeChannelIE(InfoExtractor): for video_id, video_title in self.extract_videos_from_page(channel_page)] return self.playlist_result(entries, channel_id) - def _entries(): - more_widget_html = content_html = channel_page - for pagenum in itertools.count(1): - - for video_id, video_title in self.extract_videos_from_page(content_html): - yield self.url_result( - video_id, 'Youtube', video_id=video_id, - video_title=video_title) - - mobj = re.search( - r'data-uix-load-more-href="/?(?P<more>[^"]+)"', - more_widget_html) - if not mobj: - break - - more = self._download_json( - 'https://youtube.com/%s' % mobj.group('more'), channel_id, - 'Downloading page #%s' % (pagenum + 1), - transform_source=uppercase_escape) - content_html = more['content_html'] - more_widget_html = more['load_more_widget_html'] - - return self.playlist_result(_entries(), channel_id) + return self.playlist_result(self._entries(channel_page, channel_id), channel_id) class YoutubeUserIE(YoutubeChannelIE): @@ -1575,6 +1852,36 @@ class YoutubeUserIE(YoutubeChannelIE): return super(YoutubeUserIE, cls).suitable(url) +class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor): + IE_DESC = 'YouTube.com user/channel playlists' + _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists' + IE_NAME = 'youtube:playlists' + + _TESTS = [{ + 'url': 'http://www.youtube.com/user/ThirstForScience/playlists', + 'playlist_mincount': 4, + 'info_dict': { + 'id': 'ThirstForScience', + 'title': 'Thirst for Science', + }, + }, { + # with "Load more" button + 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd', + 'playlist_mincount': 70, + 'info_dict': { + 'id': 'igorkle1', + 'title': 'Игорь Клейнер', + }, + }, { + 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists', + 'playlist_mincount': 17, + 'info_dict': { + 'id': 'UCiU1dHvZObB2iP6xkJ__Icw', + 'title': 'Chem Player', + }, + }] + + class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE): IE_DESC = 'YouTube.com searches' # there doesn't appear to be a real limit, for example if you search for @@ -1648,7 +1955,7 @@ class YoutubeSearchURLIE(InfoExtractor): r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML') part_codes = re.findall( - r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code) + r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code) entries = [] for part_code in part_codes: part_title = self._html_search_regex( @@ -1670,13 +1977,13 @@ class YoutubeSearchURLIE(InfoExtractor): } -class YoutubeShowIE(InfoExtractor): +class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com (multi-season) shows' _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)' IE_NAME = 'youtube:show' _TESTS = [{ - 'url': 'http://www.youtube.com/show/airdisasters', - 'playlist_mincount': 3, + 'url': 'https://www.youtube.com/show/airdisasters', + 'playlist_mincount': 5, 'info_dict': { 'id': 'airdisasters', 'title': 'Air Disasters', @@ -1684,26 +1991,9 @@ class YoutubeShowIE(InfoExtractor): }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - playlist_id = mobj.group('id') - webpage = self._download_webpage( - url, playlist_id, 'Downloading show webpage') - # There's one playlist for each season of the show - m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage)) - self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons))) - entries = [ - self.url_result( - 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist') - for season in m_seasons - ] - title = self._og_search_title(webpage, fatal=False) - - return { - '_type': 'playlist', - 'id': playlist_id, - 'title': title, - 'entries': entries, - } + playlist_id = self._match_id(url) + return super(YoutubeShowIE, self)._real_extract( + 'https://www.youtube.com/show/%s/playlists' % playlist_id) class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): @@ -1810,6 +2100,7 @@ class YoutubeTruncatedURLIE(InfoExtractor): annotation_id=annotation_[^&]+| x-yt-cl=[0-9]+| hl=[^&]*| + t=[0-9]+ )? | attribution_link\?a=[^&]+ @@ -1832,6 +2123,9 @@ class YoutubeTruncatedURLIE(InfoExtractor): }, { 'url': 'https://www.youtube.com/watch?hl=en-GB', 'only_matching': True, + }, { + 'url': 'https://www.youtube.com/watch?t=2372', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index 98f15177b..a795f56b3 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -9,6 +9,7 @@ from ..utils import ( int_or_none, unified_strdate, OnDemandPagedList, + xpath_text, ) @@ -19,13 +20,11 @@ def extract_from_xml_url(ie, video_id, xml_url): errnote='Failed to download video info') title = doc.find('.//information/title').text - description = doc.find('.//information/detail').text - duration = int(doc.find('.//details/lengthSec').text) - uploader_node = doc.find('.//details/originChannelTitle') - uploader = None if uploader_node is None else uploader_node.text - uploader_id_node = doc.find('.//details/originChannelId') - uploader_id = None if uploader_id_node is None else uploader_id_node.text - upload_date = unified_strdate(doc.find('.//details/airtime').text) + description = xpath_text(doc, './/information/detail', 'description') + duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration')) + uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader') + uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id') + upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date')) def xml_to_format(fnode): video_url = fnode.find('url').text @@ -40,15 +39,14 @@ def extract_from_xml_url(ie, video_id, xml_url): ext = format_m.group('container') proto = format_m.group('proto').lower() - quality = fnode.find('./quality').text - abr = int(fnode.find('./audioBitrate').text) // 1000 - vbr_node = fnode.find('./videoBitrate') - vbr = None if vbr_node is None else int(vbr_node.text) // 1000 + quality = xpath_text(fnode, './quality', 'quality') + abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000) + vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000) - width_node = fnode.find('./width') - width = None if width_node is None else int_or_none(width_node.text) - height_node = fnode.find('./height') - height = None if height_node is None else int_or_none(height_node.text) + width = int_or_none(xpath_text(fnode, './width', 'width')) + height = int_or_none(xpath_text(fnode, './height', 'height')) + + filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize')) format_note = '' if not format_note: @@ -64,12 +62,31 @@ def extract_from_xml_url(ie, video_id, xml_url): 'vbr': vbr, 'width': width, 'height': height, - 'filesize': int_or_none(fnode.find('./filesize').text), + 'filesize': filesize, 'format_note': format_note, 'protocol': proto, '_available': is_available, } + def xml_to_thumbnails(fnode): + thumbnails = [] + for node in fnode: + thumbnail_url = node.text + if not thumbnail_url: + continue + thumbnail = { + 'url': thumbnail_url, + } + if 'key' in node.attrib: + m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key']) + if m: + thumbnail['width'] = int(m.group(1)) + thumbnail['height'] = int(m.group(2)) + thumbnails.append(thumbnail) + return thumbnails + + thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage')) + format_nodes = doc.findall('.//formitaeten/formitaet') formats = list(filter( lambda f: f['_available'], @@ -81,6 +98,7 @@ def extract_from_xml_url(ie, video_id, xml_url): 'title': title, 'description': description, 'duration': duration, + 'thumbnails': thumbnails, 'uploader': uploader, 'uploader_id': uploader_id, 'upload_date': upload_date, diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py index 7dc1e2f2b..437eecb67 100644 --- a/youtube_dl/extractor/zingmp3.py +++ b/youtube_dl/extractor/zingmp3.py @@ -9,9 +9,11 @@ from ..utils import ExtractorError class ZingMp3BaseInfoExtractor(InfoExtractor): - def _extract_item(self, item): + def _extract_item(self, item, fatal=True): error_message = item.find('./errormessage').text if error_message: + if not fatal: + return raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) @@ -43,7 +45,9 @@ class ZingMp3BaseInfoExtractor(InfoExtractor): entries = [] for i, item in enumerate(items, 1): - entry = self._extract_item(item) + entry = self._extract_item(item, fatal=False) + if not entry: + continue entry['id'] = '%s-%d' % (id, i) entries.append(entry) @@ -85,7 +89,7 @@ class ZingMp3SongIE(ZingMp3BaseInfoExtractor): class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor): - _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html' + _VALID_URL = r'https?://mp3\.zing\.vn/(?:album|playlist)/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html' _TESTS = [{ 'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', 'info_dict': { @@ -94,6 +98,9 @@ class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor): 'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless', }, 'playlist_count': 10, + }, { + 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html', + 'only_matching': True, }] IE_NAME = 'zingmp3:album' IE_DESC = 'mp3.zing.vn albums' diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py index 0e0c7d90d..a7440c582 100644 --- a/youtube_dl/jsinterp.py +++ b/youtube_dl/jsinterp.py @@ -214,7 +214,7 @@ class JSInterpreter(object): obj = {} obj_m = re.search( (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + - r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' + + r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + r'\}\s*;', self.code) fields = obj_m.group('fields') @@ -232,10 +232,10 @@ class JSInterpreter(object): def extract_function(self, funcname): func_m = re.search( r'''(?x) - (?:function\s+%s|[{;]%s\s*=\s*function)\s* + (?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s* \((?P<args>[^)]*)\)\s* \{(?P<code>[^}]+)\}''' % ( - re.escape(funcname), re.escape(funcname)), + re.escape(funcname), re.escape(funcname), re.escape(funcname)), self.code) if func_m is None: raise ExtractorError('Could not find JS function %r' % funcname) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 9016e3498..c46e136bf 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import os.path import optparse -import shlex import sys from .downloader.external import list_external_downloaders @@ -11,6 +10,7 @@ from .compat import ( compat_get_terminal_size, compat_getenv, compat_kwargs, + compat_shlex_split, ) from .utils import ( preferredencoding, @@ -28,7 +28,7 @@ def parseOpts(overrideArguments=None): try: res = [] for l in optionf: - res += shlex.split(l, comments=True) + res += compat_shlex_split(l, comments=True) finally: optionf.close() return res @@ -276,7 +276,7 @@ def parseOpts(overrideArguments=None): 'For example, to only match videos that have been liked more than ' '100 times and disliked less than 50 times (or the dislike ' 'functionality is not available at the given service), but who ' - 'also have a description, use --match-filter ' + 'also have a description, use --match-filter ' '"like_count > 100 & dislike_count <? 50 & description" .' )) selection.add_option( @@ -320,7 +320,7 @@ def parseOpts(overrideArguments=None): authentication.add_option( '--video-password', dest='videopassword', metavar='PASSWORD', - help='Video password (vimeo, smotri)') + help='Video password (vimeo, smotri, youku)') video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format.add_option( @@ -338,7 +338,7 @@ def parseOpts(overrideArguments=None): video_format.add_option( '-F', '--list-formats', action='store_true', dest='listformats', - help='List all available formats') + help='List all available formats of requested videos') video_format.add_option( '--youtube-include-dash-manifest', action='store_true', dest='youtube_include_dash_manifest', default=True, @@ -363,7 +363,7 @@ def parseOpts(overrideArguments=None): subtitles.add_option( '--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', default=False, - help='Write automatic subtitle file (YouTube only)') + help='Write automatically generated subtitle file (YouTube only)') subtitles.add_option( '--all-subs', action='store_true', dest='allsubtitles', default=False, @@ -602,7 +602,7 @@ def parseOpts(overrideArguments=None): filesystem.add_option( '-A', '--auto-number', action='store_true', dest='autonumber', default=False, - help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000') + help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000') filesystem.add_option( '-t', '--title', action='store_true', dest='usetitle', default=False, diff --git a/youtube_dl/postprocessor/common.py b/youtube_dl/postprocessor/common.py index 4191d040b..599dd1df2 100644 --- a/youtube_dl/postprocessor/common.py +++ b/youtube_dl/postprocessor/common.py @@ -4,6 +4,7 @@ import os from ..utils import ( PostProcessingError, + cli_configuration_args, encodeFilename, ) @@ -61,11 +62,7 @@ class PostProcessor(object): self._downloader.report_warning(errnote) def _configuration_args(self, default=[]): - pp_args = self._downloader.params.get('postprocessor_args') - if pp_args is None: - return default - assert isinstance(pp_args, list) - return pp_args + return cli_configuration_args(self._downloader.params, 'postprocessor_args', default) class AudioConversionError(PostProcessingError): diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py index 1f723908b..daca5d814 100644 --- a/youtube_dl/postprocessor/ffmpeg.py +++ b/youtube_dl/postprocessor/ffmpeg.py @@ -52,7 +52,7 @@ class FFmpegPostProcessor(PostProcessor): def _determine_executables(self): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] - prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) + prefer_ffmpeg = False self.basename = None self.probe_basename = None @@ -60,6 +60,7 @@ class FFmpegPostProcessor(PostProcessor): self._paths = None self._versions = None if self._downloader: + prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) location = self._downloader.params.get('ffmpeg_location') if location is not None: if not os.path.exists(location): @@ -135,7 +136,10 @@ class FFmpegPostProcessor(PostProcessor): files_cmd = [] for path in input_paths: - files_cmd.extend([encodeArgument('-i'), encodeFilename(path, True)]) + files_cmd.extend([ + encodeArgument('-i'), + encodeFilename(self._ffmpeg_filename_argument(path), True) + ]) cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] + files_cmd + [encodeArgument(o) for o in opts] + @@ -155,10 +159,10 @@ class FFmpegPostProcessor(PostProcessor): self.run_ffmpeg_multiple_files([path], out_path, opts) def _ffmpeg_filename_argument(self, fn): - # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details - if fn.startswith('-'): - return './' + fn - return fn + # Always use 'file:' because the filename may contain ':' (ffmpeg + # interprets that as a protocol) or can start with '-' (-- is broken in + # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details) + return 'file:' + fn class FFmpegExtractAudioPP(FFmpegPostProcessor): @@ -269,7 +273,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): return [], information try: - self._downloader.to_screen('[' + self.basename + '] Destination: ' + new_path) + self._downloader.to_screen('[ffmpeg] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except AudioConversionError as e: raise PostProcessingError( diff --git a/youtube_dl/update.py b/youtube_dl/update.py index fc7ac8305..995b8ed96 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -9,11 +9,8 @@ import subprocess import sys from zipimport import zipimporter -from .compat import ( - compat_str, - compat_urllib_request, -) -from .utils import make_HTTPS_handler +from .utils import encode_compat_str + from .version import __version__ @@ -47,7 +44,7 @@ def rsa_verify(message, signature, key): return True -def update_self(to_screen, verbose): +def update_self(to_screen, verbose, opener): """Update the program file with the latest version from the repository""" UPDATE_URL = "https://rg3.github.io/youtube-dl/update/" @@ -59,15 +56,12 @@ def update_self(to_screen, verbose): to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') return - https_handler = make_HTTPS_handler({}) - opener = compat_urllib_request.build_opener(https_handler) - # Check if there is a new version try: newversion = opener.open(VERSION_URL).read().decode('utf-8').strip() except Exception: if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t find the current version. Please try again later.') return if newversion == __version__: @@ -80,7 +74,7 @@ def update_self(to_screen, verbose): versions_info = json.loads(versions_info) except Exception: if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t obtain versions info. Please try again later.') return if 'signature' not in versions_info: @@ -129,7 +123,7 @@ def update_self(to_screen, verbose): urlh.close() except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return @@ -143,7 +137,7 @@ def update_self(to_screen, verbose): outf.write(newcontent) except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to write the new version') return @@ -163,7 +157,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" return # Do not show premature success messages except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return @@ -175,7 +169,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" urlh.close() except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return @@ -189,7 +183,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" outf.write(newcontent) except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index ae813099d..1737ac5f6 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals +import base64 import calendar import codecs import contextlib @@ -35,6 +36,7 @@ import zlib from .compat import ( compat_basestring, compat_chr, + compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, @@ -139,21 +141,24 @@ def write_json_file(obj, fn): if sys.version_info >= (2, 7): - def find_xpath_attr(node, xpath, key, val): + def find_xpath_attr(node, xpath, key, val=None): """ Find the xpath xpath[@key=val] """ - assert re.match(r'^[a-zA-Z-]+$', key) - assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) - expr = xpath + "[@%s='%s']" % (key, val) + assert re.match(r'^[a-zA-Z_-]+$', key) + if val: + assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val) + expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) return node.find(expr) else: - def find_xpath_attr(node, xpath, key, val): + def find_xpath_attr(node, xpath, key, val=None): # Here comes the crazy part: In 2.6, if the xpath is a unicode, # .//node does not match if a node is a direct child of . ! if isinstance(xpath, compat_str): xpath = xpath.encode('ascii') for f in node.findall(xpath): - if f.attrib.get(key) == val: + if key not in f.attrib: + continue + if val is None or f.attrib.get(key) == val: return f return None @@ -173,12 +178,21 @@ def xpath_with_ns(path, ns_map): return '/'.join(replaced) -def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): - if sys.version_info < (2, 7): # Crazy 2.6 - xpath = xpath.encode('ascii') +def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): + def _find_xpath(xpath): + if sys.version_info < (2, 7): # Crazy 2.6 + xpath = xpath.encode('ascii') + return node.find(xpath) - n = node.find(xpath) - if n is None or n.text is None: + if isinstance(xpath, (str, compat_str)): + n = _find_xpath(xpath) + else: + for xp in xpath: + n = _find_xpath(xp) + if n is not None: + break + + if n is None: if default is not NO_DEFAULT: return default elif fatal: @@ -186,9 +200,37 @@ def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): raise ExtractorError('Could not find XML element %s' % name) else: return None + return n + + +def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT): + n = xpath_element(node, xpath, name, fatal=fatal, default=default) + if n is None or n == default: + return n + if n.text is None: + if default is not NO_DEFAULT: + return default + elif fatal: + name = xpath if name is None else name + raise ExtractorError('Could not find XML element\'s text %s' % name) + else: + return None return n.text +def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): + n = find_xpath_attr(node, xpath, key) + if n is None: + if default is not NO_DEFAULT: + return default + elif fatal: + name = '%s[@%s]' % (xpath, key) if name is None else name + raise ExtractorError('Could not find XML attribute %s' % name) + else: + return None + return n.attrib[key] + + def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute("id", id, html) @@ -324,13 +366,20 @@ def sanitize_path(s): if drive_or_unc: norm_path.pop(0) sanitized_path = [ - path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part) + path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) +# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of +# unwanted failures due to missing protocol +def sanitized_Request(url, *args, **kwargs): + return compat_urllib_request.Request( + 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) + + def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] @@ -354,10 +403,14 @@ def _htmlentity_transform(entity): numstr = '0%s' % numstr else: base = 10 - return compat_chr(int(numstr, base)) + # See https://github.com/rg3/youtube-dl/issues/7518 + try: + return compat_chr(int(numstr, base)) + except ValueError: + pass # Unknown entity in name, return its literal representation - return ('&%s;' % entity) + return '&%s;' % entity def unescapeHTML(s): @@ -576,16 +629,19 @@ class ContentTooShortError(Exception): download is too small for what the server announced first, indicating the connection was probably interrupted. """ - # Both in bytes - downloaded = None - expected = None def __init__(self, downloaded, expected): + # Both in bytes self.downloaded = downloaded self.expected = expected def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): + # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting + # expected HTTP responses to meet HTTP/1.0 or later (see also + # https://github.com/rg3/youtube-dl/issues/6727) + if sys.version_info < (3, 0): + kwargs[b'strict'] = True hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: @@ -607,6 +663,16 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): return hc +def handle_youtubedl_headers(headers): + filtered_headers = headers + + if 'Youtubedl-no-compression' in filtered_headers: + filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') + del filtered_headers['Youtubedl-no-compression'] + + return filtered_headers + + class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. @@ -614,7 +680,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has - to include the HTTP header "Youtubedl-No-Compression", which will be + to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: @@ -650,15 +716,33 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): return ret def http_request(self, req): + # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not + # always respected by websites, some tend to give out URLs with non percent-encoded + # non-ASCII characters (see telemb.py, ard.py [#3412]) + # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991) + # To work around aforementioned issue we will replace request's original URL with + # percent-encoded one + # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09) + # the code of this workaround has been moved here from YoutubeDL.urlopen() + url = req.get_full_url() + url_escaped = escape_url(url) + + # Substitute URL if any change after escaping + if url != url_escaped: + req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request + new_req = req_type( + url_escaped, data=req.data, headers=req.headers, + origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) + new_req.timeout = req.timeout + req = new_req + for h, v in std_headers.items(): # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275 # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) - if 'Youtubedl-no-compression' in req.headers: - if 'Accept-encoding' in req.headers: - del req.headers['Accept-encoding'] - del req.headers['Youtubedl-no-compression'] + + req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments @@ -694,6 +778,18 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg + # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see + # https://github.com/rg3/youtube-dl/issues/6457). + if 300 <= resp.code < 400: + location = resp.headers.get('Location') + if location: + # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 + if sys.version_info >= (3, 0): + location = location.encode('iso-8859-1').decode('utf-8') + location_escaped = escape_url(location) + if location != location_escaped: + del resp.headers['Location'] + resp.headers['Location'] = location_escaped return resp https_request = http_request @@ -717,15 +813,41 @@ class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): req, **kwargs) +class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): + def __init__(self, cookiejar=None): + compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) + + def http_response(self, request, response): + # Python 2 will choke on next HTTP request in row if there are non-ASCII + # characters in Set-Cookie HTTP header of last response (see + # https://github.com/rg3/youtube-dl/issues/6769). + # In order to at least prevent crashing we will percent encode Set-Cookie + # header before HTTPCookieProcessor starts processing it. + # if sys.version_info < (3, 0) and response.headers: + # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): + # set_cookie = response.headers.get(set_cookie_header) + # if set_cookie: + # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") + # if set_cookie != set_cookie_escaped: + # del response.headers[set_cookie_header] + # response.headers[set_cookie_header] = set_cookie_escaped + return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) + + https_request = compat_urllib_request.HTTPCookieProcessor.http_request + https_response = http_response + + def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None + date_str = re.sub(r'\.[0-9]+', '', date_str) + if timezone is None: m = re.search( - r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', + r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() @@ -738,9 +860,12 @@ def parse_iso8601(date_str, delimiter='T', timezone=None): timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) - date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) - dt = datetime.datetime.strptime(date_str, date_format) - timezone - return calendar.timegm(dt.timetuple()) + try: + date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) + dt = datetime.datetime.strptime(date_str, date_format) - timezone + return calendar.timegm(dt.timetuple()) + except ValueError: + pass def unified_strdate(date_str, day_first=True): @@ -805,7 +930,8 @@ def unified_strdate(date_str, day_first=True): timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') - return upload_date + if upload_date is not None: + return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): @@ -814,6 +940,21 @@ def determine_ext(url, default_ext='unknown_video'): guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess + elif guess.rstrip('/') in ( + 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', + 'flv', 'f4v', 'f4a', 'f4b', + 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', + 'mkv', 'mka', 'mk3d', + 'avi', 'divx', + 'mov', + 'asf', 'wmv', 'wma', + '3gp', '3g2', + 'mp3', + 'flac', + 'ape', + 'wav', + 'f4f', 'f4m', 'm3u8', 'smil'): + return guess.rstrip('/') else: return default_ext @@ -1265,6 +1406,15 @@ def remove_end(s, end): return s +def remove_quotes(s): + if s is None or len(s) < 2: + return s + for quote in ('"', "'", ): + if s[0] == quote and s[-1] == quote: + return s[1:-1] + return s + + def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] @@ -1281,7 +1431,12 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): v = getattr(v, get_attr, None) if v == '': v = None - return default if v is None else (int(v) * invscale // scale) + if v is None: + return default + try: + return int(v) * invscale // scale + except ValueError: + return default def str_or_none(v, default=None): @@ -1297,7 +1452,12 @@ def str_to_int(int_str): def float_or_none(v, scale=1, invscale=1, default=None): - return default if v is None else (float(v) * invscale / scale) + if v is None: + return default + try: + return float(v) * invscale / scale + except ValueError: + return default def parse_duration(s): @@ -1546,27 +1706,14 @@ def urlencode_postdata(*args, **kargs): return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') -try: - etree_iter = xml.etree.ElementTree.Element.iter -except AttributeError: # Python <=2.6 - etree_iter = lambda n: n.findall('.//*') +def encode_dict(d, encoding='utf-8'): + def encode(v): + return v.encode(encoding) if isinstance(v, compat_basestring) else v + return dict((encode(k), encode(v)) for k, v in d.items()) -def parse_xml(s): - class TreeBuilder(xml.etree.ElementTree.TreeBuilder): - def doctype(self, name, pubid, system): - pass # Ignore doctypes - - parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) - kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} - tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) - # Fix up XML parser in Python 2.x - if sys.version_info < (3, 0): - for n in etree_iter(tree): - if n.text is not None: - if not isinstance(n.text, compat_str): - n.text = n.text.decode('utf-8') - return tree +def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): + return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { @@ -1596,8 +1743,8 @@ def js_to_json(code): if v in ('true', 'false', 'null'): return v if v.startswith('"'): - return v - if v.startswith("'"): + v = re.sub(r"\\'", "'", v[1:-1]) + elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', @@ -1663,6 +1810,15 @@ def args_to_str(args): return ' '.join(shlex_quote(a) for a in args) +def error_to_compat_str(err): + err_str = str(err) + # On python 2 error byte string must be decoded with proper + # encoding rather than ascii + if sys.version_info[0] < 3: + err_str = err_str.decode(preferredencoding()) + return err_str + + def mimetype2ext(mt): _, _, res = mt.rpartition('/') @@ -1691,6 +1847,10 @@ def urlhandle_detect_ext(url_handle): return mimetype2ext(getheader('Content-Type')) +def encode_data_uri(data, mime_type): + return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) + + def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ @@ -1829,15 +1989,15 @@ def match_filter_func(filter_str): def parse_dfxp_time_expr(time_expr): if not time_expr: - return 0.0 + return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) - mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) + mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: - return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) + return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): @@ -1865,7 +2025,7 @@ def dfxp2srt(dfxp_data): return out - dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8')) + dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') @@ -1873,10 +2033,15 @@ def dfxp2srt(dfxp_data): raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): - begin_time = parse_dfxp_time_expr(para.attrib['begin']) + begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) + dur = parse_dfxp_time_expr(para.attrib.get('dur')) + if begin_time is None: + continue if not end_time: - end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) + if not dur: + continue + end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), @@ -1886,6 +2051,32 @@ def dfxp2srt(dfxp_data): return ''.join(out) +def cli_option(params, command_option, param): + param = params.get(param) + return [command_option, param] if param is not None else [] + + +def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): + param = params.get(param) + assert isinstance(param, bool) + if separator: + return [command_option + separator + (true_value if param else false_value)] + return [command_option, true_value if param else false_value] + + +def cli_valueless_option(params, command_option, param, expected_value=True): + param = params.get(param) + return [command_option] if param == expected_value else [] + + +def cli_configuration_args(params, param, default=[]): + ex_args = params.get(param) + if ex_args is None: + return default + assert isinstance(ex_args, list) + return ex_args + + class ISO639Utils(object): # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 280afdd7f..01607693e 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2015.07.21' +__version__ = '2015.12.18' |