aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml5
-rw-r--r--AUTHORS11
-rw-r--r--CONTRIBUTING.md10
-rw-r--r--README.md463
-rwxr-xr-xdevscripts/bash-completion.py2
-rwxr-xr-xdevscripts/fish-completion.py2
-rwxr-xr-xdevscripts/gh-pages/update-sites.py2
-rw-r--r--devscripts/make_supportedsites.py2
-rw-r--r--devscripts/prepare_manpage.py31
-rwxr-xr-xdevscripts/zsh-completion.py2
-rw-r--r--docs/supportedsites.md61
-rw-r--r--test/helper.py131
-rw-r--r--test/test_InfoExtractor.py8
-rw-r--r--test/test_YoutubeDL.py98
-rw-r--r--test/test_compat.py4
-rw-r--r--test/test_download.py4
-rw-r--r--test/test_subtitles.py13
-rw-r--r--test/test_utils.py86
-rw-r--r--test/test_write_annotations.py2
-rw-r--r--test/test_youtube_lists.py9
-rw-r--r--tox.ini2
-rwxr-xr-xyoutube_dl/YoutubeDL.py434
-rw-r--r--youtube_dl/__init__.py6
-rwxr-xr-xyoutube_dl/__main__.py2
-rw-r--r--youtube_dl/compat.py65
-rw-r--r--youtube_dl/downloader/common.py2
-rw-r--r--youtube_dl/downloader/dash.py2
-rw-r--r--youtube_dl/downloader/external.py45
-rw-r--r--youtube_dl/downloader/f4m.py123
-rw-r--r--youtube_dl/downloader/fragment.py111
-rw-r--r--youtube_dl/downloader/hls.py100
-rw-r--r--youtube_dl/downloader/http.py19
-rw-r--r--youtube_dl/downloader/rtmp.py2
-rw-r--r--youtube_dl/extractor/__init__.py67
-rw-r--r--youtube_dl/extractor/abc.py51
-rw-r--r--youtube_dl/extractor/academicearth.py2
-rw-r--r--youtube_dl/extractor/adultswim.py48
-rw-r--r--youtube_dl/extractor/airmozilla.py4
-rw-r--r--youtube_dl/extractor/aljazeera.py1
-rw-r--r--youtube_dl/extractor/appletrailers.py68
-rw-r--r--youtube_dl/extractor/arte.py12
-rw-r--r--youtube_dl/extractor/bandcamp.py12
-rw-r--r--youtube_dl/extractor/bbc.py904
-rw-r--r--youtube_dl/extractor/bbccouk.py379
-rw-r--r--youtube_dl/extractor/beeg.py68
-rw-r--r--youtube_dl/extractor/bild.py22
-rw-r--r--youtube_dl/extractor/breakcom.py1
-rw-r--r--youtube_dl/extractor/canalplus.py15
-rw-r--r--youtube_dl/extractor/ceskatelevize.py152
-rw-r--r--youtube_dl/extractor/channel9.py66
-rw-r--r--youtube_dl/extractor/chaturbate.py50
-rw-r--r--youtube_dl/extractor/cinemassacre.py7
-rw-r--r--youtube_dl/extractor/clipfish.py67
-rw-r--r--youtube_dl/extractor/clubic.py9
-rw-r--r--youtube_dl/extractor/comcarcoff.py2
-rw-r--r--youtube_dl/extractor/comedycentral.py7
-rw-r--r--youtube_dl/extractor/common.py395
-rw-r--r--youtube_dl/extractor/condenast.py50
-rw-r--r--youtube_dl/extractor/criterion.py4
-rw-r--r--youtube_dl/extractor/crunchyroll.py141
-rw-r--r--youtube_dl/extractor/dailymotion.py209
-rw-r--r--youtube_dl/extractor/dcn.py84
-rw-r--r--youtube_dl/extractor/dhm.py30
-rw-r--r--youtube_dl/extractor/divxstage.py27
-rw-r--r--youtube_dl/extractor/dumpert.py10
-rw-r--r--youtube_dl/extractor/eagleplatform.py27
-rw-r--r--youtube_dl/extractor/engadget.py2
-rw-r--r--youtube_dl/extractor/eroprofile.py3
-rw-r--r--youtube_dl/extractor/esri.py74
-rw-r--r--youtube_dl/extractor/europa.py93
-rw-r--r--youtube_dl/extractor/expotv.py31
-rw-r--r--youtube_dl/extractor/facebook.py6
-rw-r--r--youtube_dl/extractor/fc2.py15
-rw-r--r--youtube_dl/extractor/fczenit.py41
-rw-r--r--youtube_dl/extractor/fivemin.py84
-rw-r--r--youtube_dl/extractor/fktv.py89
-rw-r--r--youtube_dl/extractor/folketinget.py4
-rw-r--r--youtube_dl/extractor/fourtube.py30
-rw-r--r--youtube_dl/extractor/foxnews.py15
-rw-r--r--youtube_dl/extractor/francetv.py21
-rw-r--r--youtube_dl/extractor/funnyordie.py2
-rw-r--r--youtube_dl/extractor/gdcvault.py33
-rw-r--r--youtube_dl/extractor/generic.py171
-rw-r--r--youtube_dl/extractor/globo.py11
-rw-r--r--youtube_dl/extractor/gorillavid.py20
-rw-r--r--youtube_dl/extractor/hostingbulk.py80
-rw-r--r--youtube_dl/extractor/iconosquare.py24
-rw-r--r--youtube_dl/extractor/imgur.py27
-rw-r--r--youtube_dl/extractor/indavideo.py142
-rw-r--r--youtube_dl/extractor/iqiyi.py20
-rw-r--r--youtube_dl/extractor/ir90tv.py42
-rw-r--r--youtube_dl/extractor/jeuxvideo.py2
-rw-r--r--youtube_dl/extractor/kaltura.py34
-rw-r--r--youtube_dl/extractor/keek.py39
-rw-r--r--youtube_dl/extractor/kontrtube.py40
-rw-r--r--youtube_dl/extractor/krasview.py3
-rw-r--r--youtube_dl/extractor/kuwo.py6
-rw-r--r--youtube_dl/extractor/lecture2go.py62
-rw-r--r--youtube_dl/extractor/letv.py3
-rw-r--r--youtube_dl/extractor/libsyn.py30
-rw-r--r--youtube_dl/extractor/limelight.py229
-rw-r--r--youtube_dl/extractor/lynda.py34
-rw-r--r--youtube_dl/extractor/mailru.py2
-rw-r--r--youtube_dl/extractor/mdr.py2
-rw-r--r--youtube_dl/extractor/megavideoz.py56
-rw-r--r--youtube_dl/extractor/mit.py10
-rw-r--r--youtube_dl/extractor/mitele.py107
-rw-r--r--youtube_dl/extractor/moniker.py18
-rw-r--r--youtube_dl/extractor/mtv.py89
-rw-r--r--youtube_dl/extractor/musicvault.py63
-rw-r--r--youtube_dl/extractor/mwave.py58
-rw-r--r--youtube_dl/extractor/nationalgeographic.py37
-rw-r--r--youtube_dl/extractor/naver.py11
-rw-r--r--youtube_dl/extractor/nbc.py31
-rw-r--r--youtube_dl/extractor/ndr.py440
-rw-r--r--youtube_dl/extractor/nextmedia.py16
-rw-r--r--youtube_dl/extractor/nfl.py164
-rw-r--r--youtube_dl/extractor/nhl.py26
-rw-r--r--youtube_dl/extractor/niconico.py6
-rw-r--r--youtube_dl/extractor/ninegag.py95
-rw-r--r--youtube_dl/extractor/nowness.py166
-rw-r--r--youtube_dl/extractor/nowtv.py77
-rw-r--r--youtube_dl/extractor/nowvideo.py2
-rw-r--r--youtube_dl/extractor/npo.py1
-rw-r--r--youtube_dl/extractor/nrk.py30
-rw-r--r--youtube_dl/extractor/odnoklassniki.py71
-rw-r--r--youtube_dl/extractor/openfilm.py70
-rw-r--r--youtube_dl/extractor/pbs.py77
-rw-r--r--youtube_dl/extractor/periscope.py99
-rw-r--r--youtube_dl/extractor/playtvak.py181
-rw-r--r--youtube_dl/extractor/playwire.py2
-rw-r--r--youtube_dl/extractor/pluralsight.py207
-rw-r--r--youtube_dl/extractor/porn91.py2
-rw-r--r--youtube_dl/extractor/pornhub.py9
-rw-r--r--youtube_dl/extractor/prosiebensat1.py11
-rw-r--r--youtube_dl/extractor/qqmusic.py37
-rw-r--r--youtube_dl/extractor/rai.py22
-rw-r--r--youtube_dl/extractor/rtbf.py2
-rw-r--r--youtube_dl/extractor/rte.py12
-rw-r--r--youtube_dl/extractor/rtl2.py27
-rw-r--r--youtube_dl/extractor/rtlnl.py17
-rw-r--r--youtube_dl/extractor/rtp.py4
-rw-r--r--youtube_dl/extractor/rts.py31
-rw-r--r--youtube_dl/extractor/rtve.py6
-rw-r--r--youtube_dl/extractor/rtvnh.py47
-rw-r--r--youtube_dl/extractor/rutube.py1
-rw-r--r--youtube_dl/extractor/ruutu.py45
-rw-r--r--youtube_dl/extractor/safari.py5
-rw-r--r--youtube_dl/extractor/screenwavemedia.py113
-rw-r--r--youtube_dl/extractor/sexykarma.py1
-rw-r--r--youtube_dl/extractor/shahid.py107
-rw-r--r--youtube_dl/extractor/shared.py17
-rw-r--r--youtube_dl/extractor/smotri.py5
-rw-r--r--youtube_dl/extractor/soundcloud.py145
-rw-r--r--youtube_dl/extractor/southpark.py8
-rw-r--r--youtube_dl/extractor/spankwire.py60
-rw-r--r--youtube_dl/extractor/spiegel.py5
-rw-r--r--youtube_dl/extractor/sportdeutschland.py10
-rw-r--r--youtube_dl/extractor/tagesschau.py65
-rw-r--r--youtube_dl/extractor/tapely.py6
-rw-r--r--youtube_dl/extractor/telecinco.py82
-rw-r--r--youtube_dl/extractor/telegraaf.py35
-rw-r--r--youtube_dl/extractor/theplatform.py262
-rw-r--r--youtube_dl/extractor/tubitv.py4
-rw-r--r--youtube_dl/extractor/tudou.py45
-rw-r--r--youtube_dl/extractor/tumblr.py57
-rw-r--r--youtube_dl/extractor/tvplay.py1
-rw-r--r--youtube_dl/extractor/tweakers.py50
-rw-r--r--youtube_dl/extractor/twitch.py90
-rw-r--r--youtube_dl/extractor/udemy.py13
-rw-r--r--youtube_dl/extractor/ustream.py92
-rw-r--r--youtube_dl/extractor/videobam.py81
-rw-r--r--youtube_dl/extractor/videolecturesnet.py106
-rw-r--r--youtube_dl/extractor/vidme.py144
-rw-r--r--youtube_dl/extractor/vier.py13
-rw-r--r--youtube_dl/extractor/viewster.py60
-rw-r--r--youtube_dl/extractor/viki.py46
-rw-r--r--youtube_dl/extractor/vimeo.py88
-rw-r--r--youtube_dl/extractor/vine.py66
-rw-r--r--youtube_dl/extractor/vk.py5
-rw-r--r--youtube_dl/extractor/vlive.py86
-rw-r--r--youtube_dl/extractor/washingtonpost.py8
-rw-r--r--youtube_dl/extractor/wimp.py23
-rw-r--r--youtube_dl/extractor/xhamster.py39
-rw-r--r--youtube_dl/extractor/xuite.py2
-rw-r--r--youtube_dl/extractor/yahoo.py62
-rw-r--r--youtube_dl/extractor/yandexmusic.py113
-rw-r--r--youtube_dl/extractor/youku.py19
-rw-r--r--youtube_dl/extractor/youtube.py493
-rw-r--r--youtube_dl/extractor/zingmp3.py13
-rw-r--r--youtube_dl/options.py10
-rw-r--r--youtube_dl/postprocessor/common.py7
-rw-r--r--youtube_dl/postprocessor/ffmpeg.py13
-rw-r--r--youtube_dl/utils.py160
-rw-r--r--youtube_dl/version.py2
195 files changed, 8490 insertions, 3346 deletions
diff --git a/.travis.yml b/.travis.yml
index 511bee64c..cc21fae8f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,9 +5,8 @@ python:
- "3.2"
- "3.3"
- "3.4"
-before_install:
- - sudo apt-get update -qq
- - sudo apt-get install -yqq rtmpdump
+ - "3.5"
+sudo: false
script: nosetests test --verbose
notifications:
email:
diff --git a/AUTHORS b/AUTHORS
index 4fd65f46f..cc552bcb2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -133,3 +133,14 @@ Remita Amine
Aurélio A. Heckert
Bernhard Minks
sceext
+Zach Bruggeman
+Tjark Saul
+slangangular
+Behrouz Abbasi
+ngld
+nyuszika7h
+Shaun Walbridge
+Lee Jenkins
+Anssi Hannula
+Lukáš Lalinský
+Qijiang Fan
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 588b15bde..32c2fd84c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -16,15 +16,15 @@ So please elaborate on what feature you are requesting, or what bug you want to
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
-For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
-If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
+If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
### Are you using the latest version?
-Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
### Is the issue already documented?
@@ -124,8 +124,8 @@ If you want to add support for a new site, you can follow this quick list (assum
}
```
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
-7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
diff --git a/README.md b/README.md
index ac54d7b67..cf4aebf3d 100644
--- a/README.md
+++ b/README.md
@@ -9,6 +9,7 @@ youtube-dl - download videos from youtube.com or other video platforms
- [VIDEO SELECTION](#video-selection)
- [FAQ](#faq)
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
+- [EMBEDDING YOUTUBE-DL](#embedding-youtube-dl)
- [BUGS](#bugs)
- [COPYRIGHT](#copyright)
@@ -34,7 +35,7 @@ You can also use pip:
sudo pip install youtube-dl
-Alternatively, refer to the developer instructions below for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
+Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
# DESCRIPTION
**youtube-dl** is a small command-line program to download videos from
@@ -48,110 +49,220 @@ which means you can modify it, redistribute it or use it however you like.
# OPTIONS
-h, --help Print this help text and exit
--version Print program version and exit
- -U, --update Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)
- -i, --ignore-errors Continue on download errors, for example to skip unavailable videos in a playlist
- --abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs
+ -U, --update Update this program to latest version. Make
+ sure that you have sufficient permissions
+ (run with sudo if needed)
+ -i, --ignore-errors Continue on download errors, for example to
+ skip unavailable videos in a playlist
+ --abort-on-error Abort downloading of further videos (in the
+ playlist or the command line) if an error
+ occurs
--dump-user-agent Display the current browser identification
--list-extractors List all supported extractors
- --extractor-descriptions Output descriptions of all supported extractors
- --force-generic-extractor Force extraction to use the generic extractor
- --default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple".
- Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The
- default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.
- --ignore-config Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: Do not read the user configuration
- in ~/.config/youtube-dl/config (%APPDATA%/youtube-dl/config.txt on Windows)
- --flat-playlist Do not extract the videos of a playlist, only list them.
+ --extractor-descriptions Output descriptions of all supported
+ extractors
+ --force-generic-extractor Force extraction to use the generic
+ extractor
+ --default-search PREFIX Use this prefix for unqualified URLs. For
+ example "gvsearch2:" downloads two videos
+ from google videos for youtube-dl "large
+ apple". Use the value "auto" to let
+ youtube-dl guess ("auto_warning" to emit a
+ warning when guessing). "error" just throws
+ an error. The default value "fixup_error"
+ repairs broken URLs, but emits an error if
+ this is not possible instead of searching.
+ --ignore-config Do not read configuration files. When given
+ in the global configuration file /etc
+ /youtube-dl.conf: Do not read the user
+ configuration in ~/.config/youtube-
+ dl/config (%APPDATA%/youtube-dl/config.txt
+ on Windows)
+ --flat-playlist Do not extract the videos of a playlist,
+ only list them.
--no-color Do not emit color codes in output
## Network Options:
- --proxy URL Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection
+ --proxy URL Use the specified HTTP/HTTPS proxy. Pass in
+ an empty string (--proxy "") for direct
+ connection
--socket-timeout SECONDS Time to wait before giving up, in seconds
- --source-address IP Client-side IP address to bind to (experimental)
- -4, --force-ipv4 Make all connections via IPv4 (experimental)
- -6, --force-ipv6 Make all connections via IPv6 (experimental)
- --cn-verification-proxy URL Use this proxy to verify the IP address for some Chinese sites. The default proxy specified by --proxy (or none, if the options is
- not present) is used for the actual downloading. (experimental)
+ --source-address IP Client-side IP address to bind to
+ (experimental)
+ -4, --force-ipv4 Make all connections via IPv4
+ (experimental)
+ -6, --force-ipv6 Make all connections via IPv6
+ (experimental)
+ --cn-verification-proxy URL Use this proxy to verify the IP address for
+ some Chinese sites. The default proxy
+ specified by --proxy (or none, if the
+ options is not present) is used for the
+ actual downloading. (experimental)
## Video Selection:
--playlist-start NUMBER Playlist video to start at (default is 1)
--playlist-end NUMBER Playlist video to end at (default is last)
- --playlist-items ITEM_SPEC Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8"
- if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will
- download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.
- --match-title REGEX Download only matching titles (regex or caseless sub-string)
- --reject-title REGEX Skip download for matching titles (regex or caseless sub-string)
+ --playlist-items ITEM_SPEC Playlist video items to download. Specify
+ indices of the videos in the playlist
+ separated by commas like: "--playlist-items
+ 1,2,5,8" if you want to download videos
+ indexed 1, 2, 5, 8 in the playlist. You can
+ specify range: "--playlist-items
+ 1-3,7,10-13", it will download the videos
+ at index 1, 2, 3, 7, 10, 11, 12 and 13.
+ --match-title REGEX Download only matching titles (regex or
+ caseless sub-string)
+ --reject-title REGEX Skip download for matching titles (regex or
+ caseless sub-string)
--max-downloads NUMBER Abort after downloading NUMBER files
- --min-filesize SIZE Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)
- --max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m)
+ --min-filesize SIZE Do not download any videos smaller than
+ SIZE (e.g. 50k or 44.6m)
+ --max-filesize SIZE Do not download any videos larger than SIZE
+ (e.g. 50k or 44.6m)
--date DATE Download only videos uploaded in this date
- --datebefore DATE Download only videos uploaded on or before this date (i.e. inclusive)
- --dateafter DATE Download only videos uploaded on or after this date (i.e. inclusive)
- --min-views COUNT Do not download any videos with less than COUNT views
- --max-views COUNT Do not download any videos with more than COUNT views
- --match-filter FILTER Generic video filter (experimental). Specify any key (see help for -o for a list of available keys) to match if the key is present,
- !key to check if the key is not present,key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against
- a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) after the
- operator.For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike
- functionality is not available at the given service), but who also have a description, use --match-filter "like_count > 100 &
+ --datebefore DATE Download only videos uploaded on or before
+ this date (i.e. inclusive)
+ --dateafter DATE Download only videos uploaded on or after
+ this date (i.e. inclusive)
+ --min-views COUNT Do not download any videos with less than
+ COUNT views
+ --max-views COUNT Do not download any videos with more than
+ COUNT views
+ --match-filter FILTER Generic video filter (experimental).
+ Specify any key (see help for -o for a list
+ of available keys) to match if the key is
+ present, !key to check if the key is not
+ present,key > NUMBER (like "comment_count >
+ 12", also works with >=, <, <=, !=, =) to
+ compare against a number, and & to require
+ multiple matches. Values which are not
+ known are excluded unless you put a
+ question mark (?) after the operator.For
+ example, to only match videos that have
+ been liked more than 100 times and disliked
+ less than 50 times (or the dislike
+ functionality is not available at the given
+ service), but who also have a description,
+ use --match-filter "like_count > 100 &
dislike_count <? 50 & description" .
- --no-playlist Download only the video, if the URL refers to a video and a playlist.
- --yes-playlist Download the playlist, if the URL refers to a video and a playlist.
- --age-limit YEARS Download only videos suitable for the given age
- --download-archive FILE Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.
- --include-ads Download advertisements as well (experimental)
+ --no-playlist Download only the video, if the URL refers
+ to a video and a playlist.
+ --yes-playlist Download the playlist, if the URL refers to
+ a video and a playlist.
+ --age-limit YEARS Download only videos suitable for the given
+ age
+ --download-archive FILE Download only videos not listed in the
+ archive file. Record the IDs of all
+ downloaded videos in it.
+ --include-ads Download advertisements as well
+ (experimental)
## Download Options:
- -r, --rate-limit LIMIT Maximum download rate in bytes per second (e.g. 50K or 4.2M)
- -R, --retries RETRIES Number of retries (default is 10), or "infinite".
- --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K) (default is 1024)
- --no-resize-buffer Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.
+ -r, --rate-limit LIMIT Maximum download rate in bytes per second
+ (e.g. 50K or 4.2M)
+ -R, --retries RETRIES Number of retries (default is 10), or
+ "infinite".
+ --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K)
+ (default is 1024)
+ --no-resize-buffer Do not automatically adjust the buffer
+ size. By default, the buffer size is
+ automatically resized from an initial value
+ of SIZE.
--playlist-reverse Download playlist videos in reverse order
- --xattr-set-filesize Set file xattribute ytdl.filesize with expected filesize (experimental)
- --hls-prefer-native Use the native HLS downloader instead of ffmpeg (experimental)
- --external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,httpie,wget
- --external-downloader-args ARGS Give these arguments to the external downloader
+ --xattr-set-filesize Set file xattribute ytdl.filesize with
+ expected filesize (experimental)
+ --hls-prefer-native Use the native HLS downloader instead of
+ ffmpeg (experimental)
+ --external-downloader COMMAND Use the specified external downloader.
+ Currently supports
+ aria2c,axel,curl,httpie,wget
+ --external-downloader-args ARGS Give these arguments to the external
+ downloader
## Filesystem Options:
- -a, --batch-file FILE File containing URLs to download ('-' for stdin)
+ -a, --batch-file FILE File containing URLs to download ('-' for
+ stdin)
--id Use only video ID in file name
- -o, --output TEMPLATE Output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader
- nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for
- the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like YouTube's itags: "137"),
- %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id,
- %(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in,
- %(playlist_index)s for the position in the playlist. %(height)s and %(width)s for the width and height of the video format.
- %(resolution)s for a textual description of the resolution of the video format. %% for a literal percent. Use - to output to stdout.
- Can also be used to download to a different directory, for example with -o '/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
- --autonumber-size NUMBER Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given
- --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames
- -A, --auto-number [deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000
- -t, --title [deprecated] Use title in file name (default)
+ -o, --output TEMPLATE Output filename template. Use %(title)s to
+ get the title, %(uploader)s for the
+ uploader name, %(uploader_id)s for the
+ uploader nickname if different,
+ %(autonumber)s to get an automatically
+ incremented number, %(ext)s for the
+ filename extension, %(format)s for the
+ format description (like "22 - 1280x720" or
+ "HD"), %(format_id)s for the unique id of
+ the format (like YouTube's itags: "137"),
+ %(upload_date)s for the upload date
+ (YYYYMMDD), %(extractor)s for the provider
+ (youtube, metacafe, etc), %(id)s for the
+ video id, %(playlist_title)s,
+ %(playlist_id)s, or %(playlist)s (=title if
+ present, ID otherwise) for the playlist the
+ video is in, %(playlist_index)s for the
+ position in the playlist. %(height)s and
+ %(width)s for the width and height of the
+ video format. %(resolution)s for a textual
+ description of the resolution of the video
+ format. %% for a literal percent. Use - to
+ output to stdout. Can also be used to
+ download to a different directory, for
+ example with -o '/my/downloads/%(uploader)s
+ /%(title)s-%(id)s.%(ext)s' .
+ --autonumber-size NUMBER Specify the number of digits in
+ %(autonumber)s when it is present in output
+ filename template or --auto-number option
+ is given
+ --restrict-filenames Restrict filenames to only ASCII
+ characters, and avoid "&" and spaces in
+ filenames
+ -A, --auto-number [deprecated; use -o
+ "%(autonumber)s-%(title)s.%(ext)s" ] Number
+ downloaded files starting from 00000
+ -t, --title [deprecated] Use title in file name
+ (default)
-l, --literal [deprecated] Alias of --title
-w, --no-overwrites Do not overwrite files
- -c, --continue Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.
- --no-continue Do not resume partially downloaded files (restart from beginning)
- --no-part Do not use .part files - write directly into output file
- --no-mtime Do not use the Last-modified header to set the file modification time
- --write-description Write video description to a .description file
+ -c, --continue Force resume of partially downloaded files.
+ By default, youtube-dl will resume
+ downloads if possible.
+ --no-continue Do not resume partially downloaded files
+ (restart from beginning)
+ --no-part Do not use .part files - write directly
+ into output file
+ --no-mtime Do not use the Last-modified header to set
+ the file modification time
+ --write-description Write video description to a .description
+ file
--write-info-json Write video metadata to a .info.json file
- --write-annotations Write video annotations to a .annotations.xml file
- --load-info FILE JSON file containing the video information (created with the "--write-info-json" option)
- --cookies FILE File to read cookies from and dump cookie jar in
- --cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl
- or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may
- change.
+ --write-annotations Write video annotations to a
+ .annotations.xml file
+ --load-info FILE JSON file containing the video information
+ (created with the "--write-info-json"
+ option)
+ --cookies FILE File to read cookies from and dump cookie
+ jar in
+ --cache-dir DIR Location in the filesystem where youtube-dl
+ can store some downloaded information
+ permanently. By default $XDG_CACHE_HOME
+ /youtube-dl or ~/.cache/youtube-dl . At the
+ moment, only YouTube player files (for
+ videos with obfuscated signatures) are
+ cached, but that may change.
--no-cache-dir Disable filesystem caching
--rm-cache-dir Delete all filesystem cache files
## Thumbnail images:
--write-thumbnail Write thumbnail image to disk
--write-all-thumbnails Write all thumbnail image formats to disk
- --list-thumbnails Simulate and list all available thumbnail formats
+ --list-thumbnails Simulate and list all available thumbnail
+ formats
## Verbosity / Simulation Options:
-q, --quiet Activate quiet mode
--no-warnings Ignore warnings
- -s, --simulate Do not download the video and do not write anything to disk
+ -s, --simulate Do not download the video and do not write
+ anything to disk
--skip-download Do not download the video
-g, --get-url Simulate, quiet but print URL
-e, --get-title Simulate, quiet but print title
@@ -161,86 +272,150 @@ which means you can modify it, redistribute it or use it however you like.
--get-duration Simulate, quiet but print video length
--get-filename Simulate, quiet but print output filename
--get-format Simulate, quiet but print output format
- -j, --dump-json Simulate, quiet but print JSON information. See --output for a description of available keys.
- -J, --dump-single-json Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist
- information in a single line.
- --print-json Be quiet and print the video information as JSON (video is still being downloaded).
+ -j, --dump-json Simulate, quiet but print JSON information.
+ See --output for a description of available
+ keys.
+ -J, --dump-single-json Simulate, quiet but print JSON information
+ for each command-line argument. If the URL
+ refers to a playlist, dump the whole
+ playlist information in a single line.
+ --print-json Be quiet and print the video information as
+ JSON (video is still being downloaded).
--newline Output progress bar as new lines
--no-progress Do not print progress bar
--console-title Display progress in console titlebar
-v, --verbose Print various debugging information
- --dump-pages Print downloaded pages encoded using base64 to debug problems (very verbose)
- --write-pages Write downloaded intermediary pages to files in the current directory to debug problems
+ --dump-pages Print downloaded pages encoded using base64
+ to debug problems (very verbose)
+ --write-pages Write downloaded intermediary pages to
+ files in the current directory to debug
+ problems
--print-traffic Display sent and read HTTP traffic
-C, --call-home Contact the youtube-dl server for debugging
- --no-call-home Do NOT contact the youtube-dl server for debugging
+ --no-call-home Do NOT contact the youtube-dl server for
+ debugging
## Workarounds:
--encoding ENCODING Force the specified encoding (experimental)
--no-check-certificate Suppress HTTPS certificate validation
- --prefer-insecure Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)
+ --prefer-insecure Use an unencrypted connection to retrieve
+ information about the video. (Currently
+ supported only for YouTube)
--user-agent UA Specify a custom user agent
- --referer URL Specify a custom referer, use if the video access is restricted to one domain
- --add-header FIELD:VALUE Specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times
- --bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH
- --sleep-interval SECONDS Number of seconds to sleep before each download.
+ --referer URL Specify a custom referer, use if the video
+ access is restricted to one domain
+ --add-header FIELD:VALUE Specify a custom HTTP header and its value,
+ separated by a colon ':'. You can use this
+ option multiple times
+ --bidi-workaround Work around terminals that lack
+ bidirectional text support. Requires bidiv
+ or fribidi executable in PATH
+ --sleep-interval SECONDS Number of seconds to sleep before each
+ download.
## Video Format Options:
- -f, --format FORMAT Video format code, see the "FORMAT SELECTION" for all the info
+ -f, --format FORMAT Video format code, see the "FORMAT
+ SELECTION" for all the info
--all-formats Download all available video formats
- --prefer-free-formats Prefer free video formats unless a specific one is requested
+ --prefer-free-formats Prefer free video formats unless a specific
+ one is requested
-F, --list-formats List all available formats
- --youtube-skip-dash-manifest Do not download the DASH manifests and related data on YouTube videos
- --merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no
- merge is required
+ --youtube-skip-dash-manifest Do not download the DASH manifests and
+ related data on YouTube videos
+ --merge-output-format FORMAT If a merge is required (e.g.
+ bestvideo+bestaudio), output to given
+ container format. One of mkv, mp4, ogg,
+ webm, flv. Ignored if no merge is required
## Subtitle Options:
--write-sub Write subtitle file
- --write-auto-sub Write automatic subtitle file (YouTube only)
- --all-subs Download all the available subtitles of the video
+ --write-auto-sub Write automatic subtitle file (YouTube
+ only)
+ --all-subs Download all the available subtitles of the
+ video
--list-subs List all available subtitles for the video
- --sub-format FORMAT Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"
- --sub-lang LANGS Languages of the subtitles to download (optional) separated by commas, use IETF language tags like 'en,pt'
+ --sub-format FORMAT Subtitle format, accepts formats
+ preference, for example: "srt" or
+ "ass/srt/best"
+ --sub-lang LANGS Languages of the subtitles to download
+ (optional) separated by commas, use IETF
+ language tags like 'en,pt'
## Authentication Options:
-u, --username USERNAME Login with this account ID
- -p, --password PASSWORD Account password. If this option is left out, youtube-dl will ask interactively.
+ -p, --password PASSWORD Account password. If this option is left
+ out, youtube-dl will ask interactively.
-2, --twofactor TWOFACTOR Two-factor auth code
-n, --netrc Use .netrc authentication data
- --video-password PASSWORD Video password (vimeo, smotri)
+ --video-password PASSWORD Video password (vimeo, smotri, youku)
## Post-processing Options:
- -x, --extract-audio Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)
- --audio-format FORMAT Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default
- --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default
- 5)
- --recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)
+ -x, --extract-audio Convert video files to audio-only files
+ (requires ffmpeg or avconv and ffprobe or
+ avprobe)
+ --audio-format FORMAT Specify audio format: "best", "aac",
+ "vorbis", "mp3", "m4a", "opus", or "wav";
+ "best" by default
+ --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert
+ a value between 0 (better) and 9 (worse)
+ for VBR or a specific bitrate like 128K
+ (default 5)
+ --recode-video FORMAT Encode the video to another format if
+ necessary (currently supported:
+ mp4|flv|ogg|webm|mkv|avi)
--postprocessor-args ARGS Give these arguments to the postprocessor
- -k, --keep-video Keep the video file on disk after the post-processing; the video is erased by default
- --no-post-overwrites Do not overwrite post-processed files; the post-processed files are overwritten by default
- --embed-subs Embed subtitles in the video (only for mkv and mp4 videos)
+ -k, --keep-video Keep the video file on disk after the post-
+ processing; the video is erased by default
+ --no-post-overwrites Do not overwrite post-processed files; the
+ post-processed files are overwritten by
+ default
+ --embed-subs Embed subtitles in the video (only for mkv
+ and mp4 videos)
--embed-thumbnail Embed thumbnail in the audio as cover art
--add-metadata Write metadata to the video file
- --metadata-from-title FORMAT Parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed
- parameters replace existing values. Additional templates: %(album)s, %(artist)s. Example: --metadata-from-title "%(artist)s -
- %(title)s" matches a title like "Coldplay - Paradise"
- --xattrs Write metadata to the video file's xattrs (using dublin core and xdg standards)
- --fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn (the default;
- fix file if we can, warn otherwise)
- --prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default)
- --prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors
- --ffmpeg-location PATH Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.
- --exec CMD Execute a command on the file after downloading, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm
- {}'
- --convert-subtitles FORMAT Convert the subtitles to other format (currently supported: srt|ass|vtt)
+ --metadata-from-title FORMAT Parse additional metadata like song title /
+ artist from the video title. The format
+ syntax is the same as --output, the parsed
+ parameters replace existing values.
+ Additional templates: %(album)s,
+ %(artist)s. Example: --metadata-from-title
+ "%(artist)s - %(title)s" matches a title
+ like "Coldplay - Paradise"
+ --xattrs Write metadata to the video file's xattrs
+ (using dublin core and xdg standards)
+ --fixup POLICY Automatically correct known faults of the
+ file. One of never (do nothing), warn (only
+ emit a warning), detect_or_warn (the
+ default; fix file if we can, warn
+ otherwise)
+ --prefer-avconv Prefer avconv over ffmpeg for running the
+ postprocessors (default)
+ --prefer-ffmpeg Prefer ffmpeg over avconv for running the
+ postprocessors
+ --ffmpeg-location PATH Location of the ffmpeg/avconv binary;
+ either the path to the binary or its
+ containing directory.
+ --exec CMD Execute a command on the file after
+ downloading, similar to find's -exec
+ syntax. Example: --exec 'adb push {}
+ /sdcard/Music/ && rm {}'
+ --convert-subtitles FORMAT Convert the subtitles to other format
+ (currently supported: srt|ass|vtt)
# CONFIGURATION
-You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<user name>\youtube-dl.conf`.
+You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime and use a proxy:
+```
+--extract-audio
+--no-mtime
+--proxy 127.0.0.1:3128
+```
+
+You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run.
### Authentication with `.netrc` file ###
-You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in shell command history. You can achieve this using [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create `.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
+You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create a`.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
```
touch $HOME/.netrc
chmod a-rwx,u+rw $HOME/.netrc
@@ -254,13 +429,13 @@ For example:
machine youtube login myaccount@gmail.com password my_youtube_password
machine twitch login my_twitch_account_name password my_twitch_password
```
-To activate authentication with `.netrc` file you should pass `--netrc` to youtube-dl or to place it in [configuration file](#configuration).
+To activate authentication with the `.netrc` file you should pass `--netrc` to youtube-dl or place it in the [configuration file](#configuration).
-On Windows you may also need to setup `%HOME%` environment variable manually.
+On Windows you may also need to setup the `%HOME%` environment variable manually.
# OUTPUT TEMPLATE
-The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
+The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
- `id`: The sequence will be replaced by the video identifier.
- `url`: The sequence will be replaced by the video URL.
@@ -270,8 +445,10 @@ The `-o` option allows users to indicate a template for the output file names. T
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
- - `playlist`: The name or the id of the playlist that contains the video.
- - `playlist_index`: The index of the video in the playlist, a five-digit number.
+ - `playlist`: The sequence will be replaced by the name or the id of the playlist that contains the video.
+ - `playlist_index`: The sequence will be replaced by the index of the video in the playlist padded with leading zeros according to the total length of the playlist.
+ - `format_id`: The sequence will be replaced by the format code specified by `--format`.
+ - `duration`: The sequence will be replaced by the length of the video in seconds.
The current default template is `%(title)s-%(id)s.%(ext)s`.
@@ -286,18 +463,18 @@ youtube-dl_test_video_.mp4 # A simple file name
# FORMAT SELECTION
-By default youtube-dl tries to download the best quality, but sometimes you may want to download other format.
+By default youtube-dl tries to download the best quality, but sometimes you may want to download in a different format.
The simplest case is requesting a specific format, for example `-f 22`. You can get the list of available formats using `--list-formats`, you can also use a file extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names `best`, `bestvideo`, `bestaudio` and `worst`.
-If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`.
+If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
-Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
+Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
-If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
+If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
# VIDEO SELECTION
-Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats:
+Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`. They accept dates in two formats:
- Absolute dates: Dates in the format `YYYYMMDD`.
- Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
@@ -311,7 +488,7 @@ $ youtube-dl --dateafter now-6months
# Download only the videos uploaded on January 1, 1970
$ youtube-dl --date 19700101
-$ # will only download the videos uploaded in the 200x decade
+$ # Download only the videos uploaded in the 200x decade
$ youtube-dl --dateafter 20000101 --datebefore 20091231
```
@@ -323,7 +500,7 @@ If you've followed [our manual installation instructions](http://rg3.github.io/y
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
-If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
+If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distribution serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
@@ -349,7 +526,7 @@ If you have installed youtube-dl with a package manager, pip, setup.py or a tarb
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, the only option out of `-citw` that is regularly useful is `-i`.
-### Can you please put the -b option back?
+### Can you please put the `-b` option back?
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
@@ -361,13 +538,13 @@ Apparently YouTube requires you to pass a CAPTCHA test if you download too much.
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
-### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser.
+### I extracted a video URL with `-g`, but it does not play on another machine / in my webbrowser.
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
-Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well.
+Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using `-g`, your own downloader must support these as well.
If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn.
@@ -381,7 +558,7 @@ YouTube requires an additional signature since September 2012 which is not suppo
### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command` ###
-That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell).
+That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by the shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell).
For example if your URL is https://www.youtube.com/watch?t=4&v=BaW_jenozKc you should end up with following command:
@@ -439,6 +616,12 @@ Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the opt
youtube-dl -- -wNyEUrxzFU
youtube-dl "http://www.youtube.com/watch?v=-wNyEUrxzFU"
+### How do I pass cookies to youtube-dl?
+
+Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
+
+Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly.
+
### Can you add support for this anime video site, or site which shows current movies for free?
As a matter of policy (as well as legality), youtube-dl does not include support for services that specialize in infringing copyright. As a rule of thumb, if you cannot easily find a video that the service is quite obviously allowed to distribute (i.e. that has been uploaded by the creator, the creator's distributor, or is published under a free license), the service is probably unfit for inclusion to youtube-dl.
@@ -537,8 +720,8 @@ If you want to add support for a new site, you can follow this quick list (assum
}
```
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
-7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
@@ -566,7 +749,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
```
-Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L117-L265). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
@@ -627,15 +810,15 @@ So please elaborate on what feature you are requesting, or what bug you want to
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
-For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
-If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
+If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
### Are you using the latest version?
-Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
### Is the issue already documented?
diff --git a/devscripts/bash-completion.py b/devscripts/bash-completion.py
index cd26cc089..ce68f26f9 100755
--- a/devscripts/bash-completion.py
+++ b/devscripts/bash-completion.py
@@ -5,7 +5,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py
index c2f238798..41629d87d 100755
--- a/devscripts/fish-completion.py
+++ b/devscripts/fish-completion.py
@@ -6,7 +6,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
from youtube_dl.utils import shell_quote
diff --git a/devscripts/gh-pages/update-sites.py b/devscripts/gh-pages/update-sites.py
index d3ef5f0b5..503c1372f 100755
--- a/devscripts/gh-pages/update-sites.py
+++ b/devscripts/gh-pages/update-sites.py
@@ -6,7 +6,7 @@ import os
import textwrap
# We must be able to import youtube_dl
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import youtube_dl
diff --git a/devscripts/make_supportedsites.py b/devscripts/make_supportedsites.py
index 3df4385a6..8cb4a4638 100644
--- a/devscripts/make_supportedsites.py
+++ b/devscripts/make_supportedsites.py
@@ -9,7 +9,7 @@ import sys
# Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
-sys.path.append(ROOT_DIR)
+sys.path.insert(0, ROOT_DIR)
import youtube_dl
diff --git a/devscripts/prepare_manpage.py b/devscripts/prepare_manpage.py
index 7ece37754..776e6556e 100644
--- a/devscripts/prepare_manpage.py
+++ b/devscripts/prepare_manpage.py
@@ -8,6 +8,35 @@ import re
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
README_FILE = os.path.join(ROOT_DIR, 'README.md')
+
+def filter_options(readme):
+ ret = ''
+ in_options = False
+ for line in readme.split('\n'):
+ if line.startswith('# '):
+ if line[2:].startswith('OPTIONS'):
+ in_options = True
+ else:
+ in_options = False
+
+ if in_options:
+ if line.lstrip().startswith('-'):
+ option, description = re.split(r'\s{2,}', line.lstrip())
+ split_option = option.split(' ')
+
+ if not split_option[-1].startswith('-'): # metavar
+ option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
+
+ # Pandoc's definition_lists. See http://pandoc.org/README.html
+ # for more information.
+ ret += '\n%s\n: %s\n' % (option, description)
+ else:
+ ret += line.lstrip() + '\n'
+ else:
+ ret += line + '\n'
+
+ return ret
+
with io.open(README_FILE, encoding='utf-8') as f:
readme = f.read()
@@ -26,6 +55,8 @@ readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
readme = PREFIX + readme
+readme = filter_options(readme)
+
if sys.version_info < (3, 0):
print(readme.encode('utf-8'))
else:
diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py
index f200f2c80..04728e8e2 100755
--- a/devscripts/zsh-completion.py
+++ b/devscripts/zsh-completion.py
@@ -5,7 +5,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 73445137f..47f7da86d 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -51,6 +51,7 @@
- **bambuser:channel**
- **Bandcamp**
- **Bandcamp:album**
+ - **bbc**: BBC
- **bbc.co.uk**: BBC iPlayer
- **BeatportPro**
- **Beeg**
@@ -80,12 +81,13 @@
- **CBSSports**
- **CeskaTelevize**
- **channel9**: Channel 9
+ - **Chaturbate**
- **Chilloutzone**
- **chirbit**
- **chirbit:profile**
- **Cinchcast**
- **Cinemassacre**
- - **clipfish**
+ - **Clipfish**
- **cliphunter**
- **Clipsyndicate**
- **Cloudy**
@@ -100,7 +102,7 @@
- **ComCarCoff**
- **ComedyCentral**
- **ComedyCentralShows**: The Daily Show / The Colbert Report
- - **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED
+ - **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
- **Cracked**
- **Criterion**
- **CrooksAndLiars**
@@ -115,12 +117,12 @@
- **DailymotionCloud**
- **daum.net**
- **DBTV**
+ - **DCN**
- **DctpTv**
- **DeezerPlaylist**
- **defense.gouv.fr**
- **DHM**: Filmarchiv - Deutsches Historisches Museum
- **Discovery**
- - **divxstage**: DivxStage
- **Dotsub**
- **DouyuTV**: 斗鱼
- **dramafever**
@@ -148,6 +150,8 @@
- **EroProfile**
- **Escapist**
- **ESPN** (Currently broken)
+ - **EsriVideo**
+ - **Europa**
- **EveryonesMixtape**
- **exfm**: ex.fm
- **ExpoTV**
@@ -155,15 +159,15 @@
- **facebook**
- **faz.net**
- **fc2**
+ - **Fczenit**
- **fernsehkritik.tv**
- - **fernsehkritik.tv:postecke**
- **Firstpost**
- **FiveTV**
- **Flickr**
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
- **FootyRoom**
- **Foxgay**
- - **FoxNews**
+ - **FoxNews**: Fox News and Fox Business Video
- **FoxSports**
- **france2.fr:generation-quoi**
- **FranceCulture**
@@ -192,7 +196,7 @@
- **GodTube**
- **GoldenMoustache**
- **Golem**
- - **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net
+ - **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com
- **Goshgay**
- **Groupon**
- **Hark**
@@ -206,7 +210,6 @@
- **hitbox**
- **hitbox:live**
- **HornBunny**
- - **HostingBulk**
- **HotNewHipHop**
- **Howcast**
- **HowStuffWorks**
@@ -217,13 +220,17 @@
- **imdb**: Internet Movie Database trailers
- **imdb:list**: Internet Movie Database lists
- **Imgur**
+ - **ImgurAlbum**
- **Ina**
+ - **Indavideo**
+ - **IndavideoEmbed**
- **InfoQ**
- **Instagram**
- **instagram:user**: Instagram user profile
- **InternetVideoArchive**
- **IPrima**
- **iqiyi**: 爱奇艺
+ - **Ir90Tv**
- **ivi**: ivi.ru
- **ivi:compilation**: ivi.ru compilations
- **Izlesene**
@@ -252,12 +259,16 @@
- **kuwo:song**: 酷我音乐
- **la7.tv**
- **Laola1Tv**
+ - **Lecture2Go**
- **Letv**: 乐视网
- **LetvPlaylist**
- **LetvTv**
- **Libsyn**
- **life:embed**
- **lifenews**: LIFE | NEWS
+ - **limelight**
+ - **limelight:channel**
+ - **limelight:channel_list**
- **LiveLeak**
- **livestream**
- **livestream:original**
@@ -271,14 +282,13 @@
- **Malemotion**
- **MDR**
- **media.ccc.de**
- - **MegaVideoz**
- **metacafe**
- **Metacritic**
- **Mgoon**
- **Minhateca**
- **MinistryGrid**
- **miomio.tv**
- - **mitele.es**
+ - **MiTele**: mitele.es
- **mixcloud**
- **MLB**
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
@@ -294,20 +304,21 @@
- **Moviezine**
- **movshare**: MovShare
- **MPORA**
+ - **MSNBC**
- **MTV**
+ - **mtv.de**
- **mtviggy.com**
- **mtvservices:embedded**
- **MuenchenTV**: münchen.tv
- **MusicPlayOn**
- - **MusicVault**
- **muzu.tv**
+ - **Mwave**
- **MySpace**
- **MySpace:album**
- **MySpass**
- **Myvi**
- **myvideo**
- **MyVidster**
- - **N-JOY**
- **n-tv.de**
- **NationalGeographic**
- **Naver**
@@ -316,7 +327,9 @@
- **NBCNews**
- **NBCSports**
- **NBCSportsVPlayer**
- - **ndr**: NDR.de - Mediathek
+ - **ndr**: NDR.de - Norddeutscher Rundfunk
+ - **ndr:embed**
+ - **ndr:embed:base**
- **NDTV**
- **NerdCubedFeed**
- **Nerdist**
@@ -339,16 +352,19 @@
- **nhl.com:videocenter**: NHL videocenter category
- **niconico**: ニコニコ動画
- **NiconicoPlaylist**
+ - **njoy**: N-JOY
+ - **njoy:embed**
- **Noco**
- **Normalboots**
- **NosVideo**
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
- **novamov**: NovaMov
- - **Nowness**
+ - **nowness**
+ - **nowness:playlist**
+ - **nowness:series**
- **NowTV**
- **nowvideo**: NowVideo
- **npo**: npo.nl and ntr.nl
- - **npo**: npo.nl and ntr.nl
- **npo.nl:live**
- **npo.nl:radio**
- **npo.nl:radio:fragment**
@@ -366,7 +382,6 @@
- **OnionStudios**
- **Ooyala**
- **OoyalaExternal**
- - **OpenFilm**
- **orf:fm4**: radio FM4
- **orf:iptv**: iptv.ORF.at
- **orf:oe1**: Radio Österreich 1
@@ -374,6 +389,7 @@
- **parliamentlive.tv**: UK parliament videos
- **Patreon**
- **PBS**
+ - **Periscope**: Periscope
- **PhilharmonieDeParis**: Philharmonie de Paris
- **Phoenix**
- **Photobucket**
@@ -382,8 +398,11 @@
- **PlanetaPlay**
- **play.fm**
- **played.to**
+ - **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
- **Playvid**
- **Playwire**
+ - **pluralsight**
+ - **pluralsight:course**
- **plus.google**: Google Plus
- **pluzz.francetv.fr**
- **podomatic**
@@ -403,6 +422,7 @@
- **qqmusic:playlist**: QQ音乐 - 歌单
- **qqmusic:singer**: QQ音乐 - 歌手
- **qqmusic:toplist**: QQ音乐 - 排行榜
+ - **Quickscope**: Quick Scope
- **QuickVid**
- **R7**
- **radio.de**
@@ -427,6 +447,7 @@
- **rtve.es:alacarta**: RTVE a la carta
- **rtve.es:infantil**: RTVE infantil
- **rtve.es:live**: RTVE.es live streams
+ - **RTVNH**
- **RUHD**
- **rutube**: Rutube videos
- **rutube:channel**: Rutube channels
@@ -450,7 +471,8 @@
- **ServingSys**
- **Sexu**
- **SexyKarma**: Sexy Karma and Watch Indian Porn
- - **Shared**
+ - **Shahid**
+ - **Shared**: shared.sx and vivo.sx
- **ShareSix**
- **Sina**
- **Slideshare**
@@ -514,7 +536,8 @@
- **techtv.mit.edu**
- **ted**
- **TeleBruxelles**
- - **telecinco.es**
+ - **Telecinco**: telecinco.es, cuatro.com and mediaset.es
+ - **Telegraaf**
- **TeleMB**
- **TeleTask**
- **TenPlay**
@@ -522,6 +545,7 @@
- **TF1**
- **TheOnion**
- **ThePlatform**
+ - **ThePlatformFeed**
- **TheSixtyOne**
- **ThisAmericanLife**
- **ThisAV**
@@ -587,7 +611,6 @@
- **Viddler**
- **video.google:search**: Google Video search
- **video.mit.edu**
- - **VideoBam**
- **VideoDetective**
- **videofy.me**
- **videolectures.net**
@@ -615,9 +638,11 @@
- **vine:user**
- **vk**: VK
- **vk:uservideos**: VK - User's Videos
+ - **vlive**
- **Vodlocker**
- **VoiceRepublic**
- **Vporn**
+ - **vpro**: npo.nl and ntr.nl
- **VRT**
- **vube**: Vube.com
- **VuClip**
diff --git a/test/helper.py b/test/helper.py
index e1129e58f..bdd7acca4 100644
--- a/test/helper.py
+++ b/test/helper.py
@@ -89,66 +89,81 @@ def gettestcases(include_onlymatching=False):
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
-def expect_info_dict(self, got_dict, expected_dict):
+def expect_value(self, got, expected, field):
+ if isinstance(expected, compat_str) and expected.startswith('re:'):
+ match_str = expected[len('re:'):]
+ match_rex = re.compile(match_str)
+
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ match_rex.match(got),
+ 'field %s (value: %r) should match %r' % (field, got, match_str))
+ elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
+ start_str = expected[len('startswith:'):]
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ got.startswith(start_str),
+ 'field %s (value: %r) should start with %r' % (field, got, start_str))
+ elif isinstance(expected, compat_str) and expected.startswith('contains:'):
+ contains_str = expected[len('contains:'):]
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ contains_str in got,
+ 'field %s (value: %r) should contain %r' % (field, got, contains_str))
+ elif isinstance(expected, type):
+ self.assertTrue(
+ isinstance(got, expected),
+ 'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
+ elif isinstance(expected, dict) and isinstance(got, dict):
+ expect_dict(self, got, expected)
+ elif isinstance(expected, list) and isinstance(got, list):
+ self.assertEqual(
+ len(expected), len(got),
+ 'Expect a list of length %d, but got a list of length %d for field %s' % (
+ len(expected), len(got), field))
+ for index, (item_got, item_expected) in enumerate(zip(got, expected)):
+ type_got = type(item_got)
+ type_expected = type(item_expected)
+ self.assertEqual(
+ type_expected, type_got,
+ 'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
+ index, field, type_expected, type_got))
+ expect_value(self, item_got, item_expected, field)
+ else:
+ if isinstance(expected, compat_str) and expected.startswith('md5:'):
+ got = 'md5:' + md5(got)
+ elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
+ self.assertTrue(
+ isinstance(got, (list, dict)),
+ 'Expected field %s to be a list or a dict, but it is of type %s' % (
+ field, type(got).__name__))
+ expected_num = int(expected.partition(':')[2])
+ assertGreaterEqual(
+ self, len(got), expected_num,
+ 'Expected %d items in field %s, but only got %d' % (expected_num, field, len(got)))
+ return
+ self.assertEqual(
+ expected, got,
+ 'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
+
+
+def expect_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
- if isinstance(expected, compat_str) and expected.startswith('re:'):
- got = got_dict.get(info_field)
- match_str = expected[len('re:'):]
- match_rex = re.compile(match_str)
+ got = got_dict.get(info_field)
+ expect_value(self, got, expected, info_field)
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- match_rex.match(got),
- 'field %s (value: %r) should match %r' % (info_field, got, match_str))
- elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
- got = got_dict.get(info_field)
- start_str = expected[len('startswith:'):]
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- got.startswith(start_str),
- 'field %s (value: %r) should start with %r' % (info_field, got, start_str))
- elif isinstance(expected, compat_str) and expected.startswith('contains:'):
- got = got_dict.get(info_field)
- contains_str = expected[len('contains:'):]
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- contains_str in got,
- 'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
- elif isinstance(expected, type):
- got = got_dict.get(info_field)
- self.assertTrue(isinstance(got, expected),
- 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
- else:
- if isinstance(expected, compat_str) and expected.startswith('md5:'):
- got = 'md5:' + md5(got_dict.get(info_field))
- elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
- got = got_dict.get(info_field)
- self.assertTrue(
- isinstance(got, list),
- 'Expected field %s to be a list, but it is of type %s' % (
- info_field, type(got).__name__))
- expected_num = int(expected.partition(':')[2])
- assertGreaterEqual(
- self, len(got), expected_num,
- 'Expected %d items in field %s, but only got %d' % (
- expected_num, info_field, len(got)
- )
- )
- continue
- else:
- got = got_dict.get(info_field)
- self.assertEqual(expected, got,
- 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+def expect_info_dict(self, got_dict, expected_dict):
+ expect_dict(self, got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
@@ -160,7 +175,7 @@ def expect_info_dict(self, got_dict, expected_dict):
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
- if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
+ if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py
index be8d12997..938466a80 100644
--- a/test/test_InfoExtractor.py
+++ b/test/test_InfoExtractor.py
@@ -35,10 +35,18 @@ class TestInfoExtractor(unittest.TestCase):
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&amp;key2=val2'/>
+ <meta content='application/x-shockwave-flash' property='og:video:type'>
+ <meta content='Foo' property=og:foobar>
+ <meta name="og:test1" content='foo > < bar'/>
+ <meta name="og:test2" content="foo >//< bar"/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
+ self.assertEqual(ie._og_search_video_url(html, default=None), None)
+ self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
+ self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
+ self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
def test_html_search_meta(self):
ie = self.ie
diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py
index a13c09ef4..0388c0bf3 100644
--- a/test/test_YoutubeDL.py
+++ b/test/test_YoutubeDL.py
@@ -15,7 +15,7 @@ from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_str
from youtube_dl.extractor import YoutubeIE
from youtube_dl.postprocessor.common import PostProcessor
-from youtube_dl.utils import match_filter_func
+from youtube_dl.utils import ExtractorError, match_filter_func
TEST_URL = 'http://localhost/sample.mp4'
@@ -105,6 +105,7 @@ class TestFormatSelection(unittest.TestCase):
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
+ {'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
@@ -136,6 +137,11 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
+ ydl = YDL({'format': 'example-with-dashes'})
+ ydl.process_ie_result(info_dict.copy())
+ downloaded = ydl.downloaded_info_dicts[0]
+ self.assertEqual(downloaded['format_id'], 'example-with-dashes')
+
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
@@ -229,21 +235,70 @@ class TestFormatSelection(unittest.TestCase):
'141', '172', '140', '171', '139',
]
- for f1id, f2id in zip(order, order[1:]):
- f1 = YoutubeIE._formats[f1id].copy()
- f1['format_id'] = f1id
- f1['url'] = 'url:' + f1id
- f2 = YoutubeIE._formats[f2id].copy()
- f2['format_id'] = f2id
- f2['url'] = 'url:' + f2id
+ def format_info(f_id):
+ info = YoutubeIE._formats[f_id].copy()
+ info['format_id'] = f_id
+ info['url'] = 'url:' + f_id
+ return info
+ formats_order = [format_info(f_id) for f_id in order]
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': 'bestvideo+bestaudio'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded = ydl.downloaded_info_dicts[0]
+ self.assertEqual(downloaded['format_id'], '137+141')
+ self.assertEqual(downloaded['ext'], 'mp4')
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded = ydl.downloaded_info_dicts[0]
+ self.assertEqual(downloaded['format_id'], '38')
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': 'bestvideo/best,bestaudio'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
+ self.assertEqual(downloaded_ids, ['137', '141'])
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
+ self.assertEqual(downloaded_ids, ['137+141', '248+141'])
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
+ self.assertEqual(downloaded_ids, ['136+141', '247+141'])
+
+ info_dict = _make_result(list(formats_order), extractor='youtube')
+ ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
+ yie = YoutubeIE(ydl)
+ yie._sort_formats(info_dict['formats'])
+ ydl.process_ie_result(info_dict)
+ downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
+ self.assertEqual(downloaded_ids, ['248+141'])
+ for f1, f2 in zip(formats_order, formats_order[1:]):
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
- self.assertEqual(downloaded['format_id'], f1id)
+ self.assertEqual(downloaded['format_id'], f1['format_id'])
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
@@ -251,7 +306,18 @@ class TestFormatSelection(unittest.TestCase):
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
- self.assertEqual(downloaded['format_id'], f1id)
+ self.assertEqual(downloaded['format_id'], f1['format_id'])
+
+ def test_invalid_format_specs(self):
+ def assert_syntax_error(format_spec):
+ ydl = YDL({'format': format_spec})
+ info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}])
+ self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict)
+
+ assert_syntax_error('bestvideo,,best')
+ assert_syntax_error('+bestaudio')
+ assert_syntax_error('bestvideo+')
+ assert_syntax_error('/')
def test_format_filtering(self):
formats = [
@@ -308,6 +374,18 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
+ ydl = YDL({'format': 'all[width>=400][width<=600]'})
+ ydl.process_ie_result(info_dict)
+ downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
+ self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
+
+ ydl = YDL({'format': 'best[height<40]'})
+ try:
+ ydl.process_ie_result(info_dict)
+ except ExtractorError:
+ pass
+ self.assertEqual(ydl.downloaded_info_dicts, [])
+
class TestYoutubeDL(unittest.TestCase):
def test_subtitles(self):
diff --git a/test/test_compat.py b/test/test_compat.py
index c3ba8ad2e..4ee0dc99d 100644
--- a/test/test_compat.py
+++ b/test/test_compat.py
@@ -14,6 +14,7 @@ from youtube_dl.utils import get_filesystem_encoding
from youtube_dl.compat import (
compat_getenv,
compat_expanduser,
+ compat_shlex_split,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
@@ -67,5 +68,8 @@ class TestCompat(unittest.TestCase):
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
+ def test_compat_shlex_split(self):
+ self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_download.py b/test/test_download.py
index 1110357a7..284418834 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -136,7 +136,9 @@ def generator(test_case):
# We're not using .download here sine that is just a shim
# for outside error handling, and returns the exit code
# instead of the result dict.
- res_dict = ydl.extract_info(test_case['url'])
+ res_dict = ydl.extract_info(
+ test_case['url'],
+ force_generic_extractor=params.get('force_generic_extractor', False))
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
diff --git a/test/test_subtitles.py b/test/test_subtitles.py
index c4e3adb67..0343967d9 100644
--- a/test/test_subtitles.py
+++ b/test/test_subtitles.py
@@ -25,6 +25,7 @@ from youtube_dl.extractor import (
RaiIE,
VikiIE,
ThePlatformIE,
+ ThePlatformFeedIE,
RTVEALaCartaIE,
FunnyOrDieIE,
)
@@ -307,6 +308,18 @@ class TestThePlatformSubtitles(BaseTestSubtitles):
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
+class TestThePlatformFeedSubtitles(BaseTestSubtitles):
+ url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
+ IE = ThePlatformFeedIE
+
+ def test_allsubtitles(self):
+ self.DL.params['writesubtitles'] = True
+ self.DL.params['allsubtitles'] = True
+ subtitles = self.getSubtitles()
+ self.assertEqual(set(subtitles.keys()), set(['en']))
+ self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
+
+
class TestRtveSubtitles(BaseTestSubtitles):
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
IE = RTVEALaCartaIE
diff --git a/test/test_utils.py b/test/test_utils.py
index e13e11b59..a5f164c49 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -57,11 +57,16 @@ from youtube_dl.utils import (
urlencode_postdata,
version_tuple,
xpath_with_ns,
+ xpath_element,
xpath_text,
+ xpath_attr,
render_table,
match_str,
parse_dfxp_time_expr,
dfxp2srt,
+ cli_option,
+ cli_valueless_option,
+ cli_bool_option,
)
@@ -235,12 +240,21 @@ class TestUtil(unittest.TestCase):
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
+ <node x="" />
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
+ self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
+ self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
+ self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
+ self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
+ self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
+ self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
+ self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
+ self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
@@ -255,6 +269,16 @@ class TestUtil(unittest.TestCase):
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
+ def test_xpath_element(self):
+ doc = xml.etree.ElementTree.Element('root')
+ div = xml.etree.ElementTree.SubElement(doc, 'div')
+ p = xml.etree.ElementTree.SubElement(div, 'p')
+ p.text = 'Foo'
+ self.assertEqual(xpath_element(doc, 'div/p'), p)
+ self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
+ self.assertTrue(xpath_element(doc, 'div/bar') is None)
+ self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
+
def test_xpath_text(self):
testxml = '''<root>
<div>
@@ -263,9 +287,25 @@ class TestUtil(unittest.TestCase):
</root>'''
doc = xml.etree.ElementTree.fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
+ self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
+ def test_xpath_attr(self):
+ testxml = '''<root>
+ <div>
+ <p x="a">Foo</p>
+ </div>
+ </root>'''
+ doc = xml.etree.ElementTree.fromstring(testxml)
+ self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
+ self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
+ self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
+ self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
+ self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
+ self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
+ self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
+
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
@@ -324,6 +364,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
+ self.assertEqual(parse_duration('87 Min.'), 5220)
def test_fix_xml_ampersands(self):
self.assertEqual(
@@ -636,6 +677,51 @@ The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
+ def test_cli_option(self):
+ self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
+ self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
+ self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
+
+ def test_cli_valueless_option(self):
+ self.assertEqual(cli_valueless_option(
+ {'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
+ self.assertEqual(cli_valueless_option(
+ {'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
+ self.assertEqual(cli_valueless_option(
+ {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
+ self.assertEqual(cli_valueless_option(
+ {'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
+ self.assertEqual(cli_valueless_option(
+ {'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
+ self.assertEqual(cli_valueless_option(
+ {'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
+
+ def test_cli_bool_option(self):
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
+ ['--no-check-certificate', 'true'])
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
+ ['--no-check-certificate=true'])
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
+ ['--check-certificate', 'false'])
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
+ ['--check-certificate=false'])
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
+ ['--check-certificate', 'true'])
+ self.assertEqual(
+ cli_bool_option(
+ {'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
+ ['--check-certificate=true'])
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_write_annotations.py b/test/test_write_annotations.py
index 780636c77..84b8f39e0 100644
--- a/test/test_write_annotations.py
+++ b/test/test_write_annotations.py
@@ -33,7 +33,7 @@ params = get_params({
TEST_ID = 'gr51aVj-mLg'
-ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
+ANNOTATIONS_FILE = TEST_ID + '.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index c889b6f15..26aadb34f 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertEqual(len(entries), 100)
+ def test_youtube_flat_playlist_titles(self):
+ dl = FakeYDL()
+ dl.params['extract_flat'] = True
+ ie = YoutubePlaylistIE(dl)
+ result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
+ self.assertIsPlaylist(result)
+ for entry in result['entries']:
+ self.assertTrue(entry.get('title'))
+
if __name__ == '__main__':
unittest.main()
diff --git a/tox.ini b/tox.ini
index cd805fe8a..48504329f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,py33,py34
+envlist = py26,py27,py33,py34,py35
[testenv]
deps =
nose
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 00af78e06..adf70d658 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -21,24 +21,24 @@ import subprocess
import socket
import sys
import time
+import tokenize
import traceback
if os.name == 'nt':
import ctypes
from .compat import (
- compat_basestring,
compat_cookiejar,
compat_expanduser,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_str,
+ compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
)
from .utils import (
- escape_url,
ContentTooShortError,
date_from_str,
DateRange,
@@ -49,7 +49,6 @@ from .utils import (
ExtractorError,
format_bytes,
formatSeconds,
- HEADRequest,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
@@ -70,6 +69,7 @@ from .utils import (
version_tuple,
write_json_file,
write_string,
+ YoutubeDLCookieProcessor,
YoutubeDLHandler,
prepend_extension,
replace_extension,
@@ -285,7 +285,11 @@ class YoutubeDL(object):
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
- self.params = params
+ self.params = {
+ # Default parameters
+ 'nocheckcertificate': False,
+ }
+ self.params.update(params)
self.cache = Cache(self)
if params.get('bidi_workaround', False):
@@ -853,8 +857,8 @@ class YoutubeDL(object):
else:
raise Exception('Invalid result type: %s' % result_type)
- def _apply_format_filter(self, format_spec, available_formats):
- " Returns a tuple of the remaining format_spec and filtered formats "
+ def _build_format_filter(self, filter_spec):
+ " Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
@@ -864,13 +868,13 @@ class YoutubeDL(object):
'=': operator.eq,
'!=': operator.ne,
}
- operator_rex = re.compile(r'''(?x)\s*\[
+ operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
- \]$
+ $
''' % '|'.join(map(re.escape, OPERATORS.keys())))
- m = operator_rex.search(format_spec)
+ m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
@@ -881,7 +885,7 @@ class YoutubeDL(object):
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
- m.group('value'), format_spec))
+ m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
@@ -889,85 +893,283 @@ class YoutubeDL(object):
'=': operator.eq,
'!=': operator.ne,
}
- str_operator_rex = re.compile(r'''(?x)\s*\[
+ str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9_-]+)
- \s*\]$
+ \s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
- m = str_operator_rex.search(format_spec)
+ m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
op = STR_OPERATORS[m.group('op')]
if not m:
- raise ValueError('Invalid format specification %r' % format_spec)
+ raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
- new_formats = [f for f in available_formats if _filter(f)]
+ return _filter
+
+ def build_format_selector(self, format_spec):
+ def syntax_error(note, start):
+ message = (
+ 'Invalid format specification: '
+ '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
+ return SyntaxError(message)
+
+ PICKFIRST = 'PICKFIRST'
+ MERGE = 'MERGE'
+ SINGLE = 'SINGLE'
+ GROUP = 'GROUP'
+ FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
+
+ def _parse_filter(tokens):
+ filter_parts = []
+ for type, string, start, _, _ in tokens:
+ if type == tokenize.OP and string == ']':
+ return ''.join(filter_parts)
+ else:
+ filter_parts.append(string)
+
+ def _remove_unused_ops(tokens):
+ # Remove operators that we don't use and join them with the sourrounding strings
+ # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
+ ALLOWED_OPS = ('/', '+', ',', '(', ')')
+ last_string, last_start, last_end, last_line = None, None, None, None
+ for type, string, start, end, line in tokens:
+ if type == tokenize.OP and string == '[':
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ # everything inside brackets will be handled by _parse_filter
+ for type, string, start, end, line in tokens:
+ yield type, string, start, end, line
+ if type == tokenize.OP and string == ']':
+ break
+ elif type == tokenize.OP and string in ALLOWED_OPS:
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
+ if not last_string:
+ last_string = string
+ last_start = start
+ last_end = end
+ else:
+ last_string += string
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+
+ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
+ selectors = []
+ current_selector = None
+ for type, string, start, _, _ in tokens:
+ # ENCODING is only defined in python 3.x
+ if type == getattr(tokenize, 'ENCODING', None):
+ continue
+ elif type in [tokenize.NAME, tokenize.NUMBER]:
+ current_selector = FormatSelector(SINGLE, string, [])
+ elif type == tokenize.OP:
+ if string == ')':
+ if not inside_group:
+ # ')' will be handled by the parentheses group
+ tokens.restore_last_token()
+ break
+ elif inside_merge and string in ['/', ',']:
+ tokens.restore_last_token()
+ break
+ elif inside_choice and string == ',':
+ tokens.restore_last_token()
+ break
+ elif string == ',':
+ if not current_selector:
+ raise syntax_error('"," must follow a format selector', start)
+ selectors.append(current_selector)
+ current_selector = None
+ elif string == '/':
+ if not current_selector:
+ raise syntax_error('"/" must follow a format selector', start)
+ first_choice = current_selector
+ second_choice = _parse_format_selection(tokens, inside_choice=True)
+ current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
+ elif string == '[':
+ if not current_selector:
+ current_selector = FormatSelector(SINGLE, 'best', [])
+ format_filter = _parse_filter(tokens)
+ current_selector.filters.append(format_filter)
+ elif string == '(':
+ if current_selector:
+ raise syntax_error('Unexpected "("', start)
+ group = _parse_format_selection(tokens, inside_group=True)
+ current_selector = FormatSelector(GROUP, group, [])
+ elif string == '+':
+ video_selector = current_selector
+ audio_selector = _parse_format_selection(tokens, inside_merge=True)
+ if not video_selector or not audio_selector:
+ raise syntax_error('"+" must be between two format selectors', start)
+ current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
+ else:
+ raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
+ elif type == tokenize.ENDMARKER:
+ break
+ if current_selector:
+ selectors.append(current_selector)
+ return selectors
+
+ def _build_selector_function(selector):
+ if isinstance(selector, list):
+ fs = [_build_selector_function(s) for s in selector]
+
+ def selector_function(formats):
+ for f in fs:
+ for format in f(formats):
+ yield format
+ return selector_function
+ elif selector.type == GROUP:
+ selector_function = _build_selector_function(selector.selector)
+ elif selector.type == PICKFIRST:
+ fs = [_build_selector_function(s) for s in selector.selector]
+
+ def selector_function(formats):
+ for f in fs:
+ picked_formats = list(f(formats))
+ if picked_formats:
+ return picked_formats
+ return []
+ elif selector.type == SINGLE:
+ format_spec = selector.selector
+
+ def selector_function(formats):
+ formats = list(formats)
+ if not formats:
+ return
+ if format_spec == 'all':
+ for f in formats:
+ yield f
+ elif format_spec in ['best', 'worst', None]:
+ format_idx = 0 if format_spec == 'worst' else -1
+ audiovideo_formats = [
+ f for f in formats
+ if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
+ if audiovideo_formats:
+ yield audiovideo_formats[format_idx]
+ # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format
+ elif (all(f.get('acodec') != 'none' for f in formats) or
+ all(f.get('vcodec') != 'none' for f in formats)):
+ yield formats[format_idx]
+ elif format_spec == 'bestaudio':
+ audio_formats = [
+ f for f in formats
+ if f.get('vcodec') == 'none']
+ if audio_formats:
+ yield audio_formats[-1]
+ elif format_spec == 'worstaudio':
+ audio_formats = [
+ f for f in formats
+ if f.get('vcodec') == 'none']
+ if audio_formats:
+ yield audio_formats[0]
+ elif format_spec == 'bestvideo':
+ video_formats = [
+ f for f in formats
+ if f.get('acodec') == 'none']
+ if video_formats:
+ yield video_formats[-1]
+ elif format_spec == 'worstvideo':
+ video_formats = [
+ f for f in formats
+ if f.get('acodec') == 'none']
+ if video_formats:
+ yield video_formats[0]
+ else:
+ extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
+ if format_spec in extensions:
+ filter_f = lambda f: f['ext'] == format_spec
+ else:
+ filter_f = lambda f: f['format_id'] == format_spec
+ matches = list(filter(filter_f, formats))
+ if matches:
+ yield matches[-1]
+ elif selector.type == MERGE:
+ def _merge(formats_info):
+ format_1, format_2 = [f['format_id'] for f in formats_info]
+ # The first format must contain the video and the
+ # second the audio
+ if formats_info[0].get('vcodec') == 'none':
+ self.report_error('The first format must '
+ 'contain the video, try using '
+ '"-f %s+%s"' % (format_2, format_1))
+ return
+ output_ext = (
+ formats_info[0]['ext']
+ if self.params.get('merge_output_format') is None
+ else self.params['merge_output_format'])
+ return {
+ 'requested_formats': formats_info,
+ 'format': '%s+%s' % (formats_info[0].get('format'),
+ formats_info[1].get('format')),
+ 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
+ formats_info[1].get('format_id')),
+ 'width': formats_info[0].get('width'),
+ 'height': formats_info[0].get('height'),
+ 'resolution': formats_info[0].get('resolution'),
+ 'fps': formats_info[0].get('fps'),
+ 'vcodec': formats_info[0].get('vcodec'),
+ 'vbr': formats_info[0].get('vbr'),
+ 'stretched_ratio': formats_info[0].get('stretched_ratio'),
+ 'acodec': formats_info[1].get('acodec'),
+ 'abr': formats_info[1].get('abr'),
+ 'ext': output_ext,
+ }
+ video_selector, audio_selector = map(_build_selector_function, selector.selector)
- new_format_spec = format_spec[:-len(m.group(0))]
- if not new_format_spec:
- new_format_spec = 'best'
+ def selector_function(formats):
+ formats = list(formats)
+ for pair in itertools.product(video_selector(formats), audio_selector(formats)):
+ yield _merge(pair)
- return (new_format_spec, new_formats)
+ filters = [self._build_format_filter(f) for f in selector.filters]
- def select_format(self, format_spec, available_formats):
- while format_spec.endswith(']'):
- format_spec, available_formats = self._apply_format_filter(
- format_spec, available_formats)
- if not available_formats:
- return None
+ def final_selector(formats):
+ for _filter in filters:
+ formats = list(filter(_filter, formats))
+ return selector_function(formats)
+ return final_selector
- if format_spec in ['best', 'worst', None]:
- format_idx = 0 if format_spec == 'worst' else -1
- audiovideo_formats = [
- f for f in available_formats
- if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
- if audiovideo_formats:
- return audiovideo_formats[format_idx]
- # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format
- elif (all(f.get('acodec') != 'none' for f in available_formats) or
- all(f.get('vcodec') != 'none' for f in available_formats)):
- return available_formats[format_idx]
- elif format_spec == 'bestaudio':
- audio_formats = [
- f for f in available_formats
- if f.get('vcodec') == 'none']
- if audio_formats:
- return audio_formats[-1]
- elif format_spec == 'worstaudio':
- audio_formats = [
- f for f in available_formats
- if f.get('vcodec') == 'none']
- if audio_formats:
- return audio_formats[0]
- elif format_spec == 'bestvideo':
- video_formats = [
- f for f in available_formats
- if f.get('acodec') == 'none']
- if video_formats:
- return video_formats[-1]
- elif format_spec == 'worstvideo':
- video_formats = [
- f for f in available_formats
- if f.get('acodec') == 'none']
- if video_formats:
- return video_formats[0]
- else:
- extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
- if format_spec in extensions:
- filter_f = lambda f: f['ext'] == format_spec
- else:
- filter_f = lambda f: f['format_id'] == format_spec
- matches = list(filter(filter_f, available_formats))
- if matches:
- return matches[-1]
- return None
+ stream = io.BytesIO(format_spec.encode('utf-8'))
+ try:
+ tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
+ except tokenize.TokenError:
+ raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
+
+ class TokenIterator(object):
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.counter = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.counter >= len(self.tokens):
+ raise StopIteration()
+ value = self.tokens[self.counter]
+ self.counter += 1
+ return value
+
+ next = __next__
+
+ def restore_last_token(self):
+ self.counter -= 1
+
+ parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
+ return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
@@ -1030,13 +1232,20 @@ class YoutubeDL(object):
except (ValueError, OverflowError, OSError):
pass
+ subtitles = info_dict.get('subtitles')
+ if subtitles:
+ for _, subtitle in subtitles.items():
+ for subtitle_format in subtitle:
+ if 'ext' not in subtitle_format:
+ subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
+
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
- self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles')
+ self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
- info_dict['id'], info_dict.get('subtitles'),
+ info_dict['id'], subtitles,
info_dict.get('automatic_captions'))
# We now pick which formats have to be downloaded
@@ -1104,62 +1313,15 @@ class YoutubeDL(object):
if req_format is None:
req_format_list = []
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
- info_dict['extractor'] in ['youtube', 'ted']):
+ info_dict['extractor'] in ['youtube', 'ted'] and
+ not info_dict.get('is_live')):
merger = FFmpegMergerPP(self)
if merger.available and merger.can_merge():
req_format_list.append('bestvideo+bestaudio')
req_format_list.append('best')
req_format = '/'.join(req_format_list)
- formats_to_download = []
- if req_format == 'all':
- formats_to_download = formats
- else:
- for rfstr in req_format.split(','):
- # We can accept formats requested in the format: 34/5/best, we pick
- # the first that is available, starting from left
- req_formats = rfstr.split('/')
- for rf in req_formats:
- if re.match(r'.+?\+.+?', rf) is not None:
- # Two formats have been requested like '137+139'
- format_1, format_2 = rf.split('+')
- formats_info = (self.select_format(format_1, formats),
- self.select_format(format_2, formats))
- if all(formats_info):
- # The first format must contain the video and the
- # second the audio
- if formats_info[0].get('vcodec') == 'none':
- self.report_error('The first format must '
- 'contain the video, try using '
- '"-f %s+%s"' % (format_2, format_1))
- return
- output_ext = (
- formats_info[0]['ext']
- if self.params.get('merge_output_format') is None
- else self.params['merge_output_format'])
- selected_format = {
- 'requested_formats': formats_info,
- 'format': '%s+%s' % (formats_info[0].get('format'),
- formats_info[1].get('format')),
- 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
- formats_info[1].get('format_id')),
- 'width': formats_info[0].get('width'),
- 'height': formats_info[0].get('height'),
- 'resolution': formats_info[0].get('resolution'),
- 'fps': formats_info[0].get('fps'),
- 'vcodec': formats_info[0].get('vcodec'),
- 'vbr': formats_info[0].get('vbr'),
- 'stretched_ratio': formats_info[0].get('stretched_ratio'),
- 'acodec': formats_info[1].get('acodec'),
- 'abr': formats_info[1].get('abr'),
- 'ext': output_ext,
- }
- else:
- selected_format = None
- else:
- selected_format = self.select_format(rf, formats)
- if selected_format is not None:
- formats_to_download.append(selected_format)
- break
+ format_selector = self.build_format_selector(req_format)
+ formats_to_download = list(format_selector(formats))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
@@ -1707,27 +1869,6 @@ class YoutubeDL(object):
def urlopen(self, req):
""" Start an HTTP download """
-
- # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
- # always respected by websites, some tend to give out URLs with non percent-encoded
- # non-ASCII characters (see telemb.py, ard.py [#3412])
- # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
- # To work around aforementioned issue we will replace request's original URL with
- # percent-encoded one
- req_is_string = isinstance(req, compat_basestring)
- url = req if req_is_string else req.get_full_url()
- url_escaped = escape_url(url)
-
- # Substitute URL if any change after escaping
- if url != url_escaped:
- if req_is_string:
- req = url_escaped
- else:
- req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
- req = req_type(
- url_escaped, data=req.data, headers=req.headers,
- origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
-
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
@@ -1810,8 +1951,7 @@ class YoutubeDL(object):
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load()
- cookie_processor = compat_urllib_request.HTTPCookieProcessor(
- self.cookiejar)
+ cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
@@ -1880,7 +2020,7 @@ class YoutubeDL(object):
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
- with open(thumb_filename, 'wb') as thumbf:
+ with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 55b22c889..5e2ed4d4b 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -9,7 +9,6 @@ import codecs
import io
import os
import random
-import shlex
import sys
@@ -20,6 +19,7 @@ from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
+ compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
@@ -262,10 +262,10 @@ def _real_main(argv=None):
parser.error('setting filesize xattr requested but python-xattr is not available')
external_downloader_args = None
if opts.external_downloader_args:
- external_downloader_args = shlex.split(opts.external_downloader_args)
+ external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
- postprocessor_args = shlex.split(opts.postprocessor_args)
+ postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py
index 65a0f891c..42a0f8c6f 100755
--- a/youtube_dl/__main__.py
+++ b/youtube_dl/__main__.py
@@ -11,7 +11,7 @@ if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
- sys.path.append(os.path.dirname(os.path.dirname(path)))
+ sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import youtube_dl
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 0c57c7aeb..192e1c515 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -5,6 +5,7 @@ import getpass
import optparse
import os
import re
+import shlex
import shutil
import socket
import subprocess
@@ -43,6 +44,11 @@ except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
+ import http.cookies as compat_cookies
+except ImportError: # Python 2
+ import Cookie as compat_cookies
+
+try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
@@ -75,6 +81,11 @@ except ImportError:
import BaseHTTPServer as compat_http_server
try:
+ compat_str = unicode # Python 2
+except NameError:
+ compat_str = str
+
+try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
@@ -94,7 +105,7 @@ except ImportError: # Python 2
# Is it a string-like object?
string.split
return b''
- if isinstance(string, unicode):
+ if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
@@ -145,11 +156,6 @@ except ImportError: # Python 2
return compat_urllib_parse_unquote(string, encoding, errors)
try:
- compat_str = unicode # Python 2
-except NameError:
- compat_str = str
-
-try:
compat_basestring = basestring # Python 2
except NameError:
compat_basestring = str
@@ -222,6 +228,17 @@ except ImportError: # Python < 3.3
return "'" + s.replace("'", "'\"'\"'") + "'"
+if sys.version_info >= (2, 7, 3):
+ compat_shlex_split = shlex.split
+else:
+ # Working around shlex issue with unicode strings on some python 2
+ # versions (see http://bugs.python.org/issue1548891)
+ def compat_shlex_split(s, comments=False, posix=True):
+ if isinstance(s, compat_str):
+ s = s.encode('utf-8')
+ return shlex.split(s, comments, posix)
+
+
def compat_ord(c):
if type(c) is int:
return c
@@ -399,26 +416,32 @@ if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
- def compat_get_terminal_size():
- columns = compat_getenv('COLUMNS', None)
+ def compat_get_terminal_size(fallback=(80, 24)):
+ columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
- lines = compat_getenv('LINES', None)
+ lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
- try:
- sp = subprocess.Popen(
- ['stty', 'size'],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = sp.communicate()
- lines, columns = map(int, out.split())
- except Exception:
- pass
+ if columns is None or lines is None or columns <= 0 or lines <= 0:
+ try:
+ sp = subprocess.Popen(
+ ['stty', 'size'],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = sp.communicate()
+ _lines, _columns = map(int, out.split())
+ except Exception:
+ _columns, _lines = _terminal_size(*fallback)
+
+ if columns is None or columns <= 0:
+ columns = _columns
+ if lines is None or lines <= 0:
+ lines = _lines
return _terminal_size(columns, lines)
try:
@@ -431,11 +454,17 @@ except TypeError: # Python 2.6
yield n
n += step
+if sys.version_info >= (3, 0):
+ from tokenize import tokenize as compat_tokenize_tokenize
+else:
+ from tokenize import generate_tokens as compat_tokenize_tokenize
+
__all__ = [
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
+ 'compat_cookies',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
@@ -448,9 +477,11 @@ __all__ = [
'compat_ord',
'compat_parse_qs',
'compat_print',
+ 'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_subprocess_get_DEVNULL',
+ 'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py
index 97e755d4b..29a4500d3 100644
--- a/youtube_dl/downloader/common.py
+++ b/youtube_dl/downloader/common.py
@@ -325,7 +325,7 @@ class FileDownloader(object):
)
# Check file already present
- if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
+ if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py
index a4685d307..8b6fa2753 100644
--- a/youtube_dl/downloader/dash.py
+++ b/youtube_dl/downloader/dash.py
@@ -37,7 +37,7 @@ class DashSegmentsFD(FileDownloader):
def combine_url(base_url, target_url):
if re.match(r'^https?://', target_url):
return target_url
- return '%s/%s' % (base_url, target_url)
+ return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
with open(tmpfilename, 'wb') as outf:
append_url_to_file(
diff --git a/youtube_dl/downloader/external.py b/youtube_dl/downloader/external.py
index 1d5cc9904..2bc011266 100644
--- a/youtube_dl/downloader/external.py
+++ b/youtube_dl/downloader/external.py
@@ -5,6 +5,10 @@ import subprocess
from .common import FileDownloader
from ..utils import (
+ cli_option,
+ cli_valueless_option,
+ cli_bool_option,
+ cli_configuration_args,
encodeFilename,
encodeArgument,
)
@@ -45,18 +49,17 @@ class ExternalFD(FileDownloader):
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
- def _source_address(self, command_option):
- source_address = self.params.get('source_address')
- if source_address is None:
- return []
- return [command_option, source_address]
+ def _option(self, command_option, param):
+ return cli_option(self.params, command_option, param)
+
+ def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
+ return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
+
+ def _valueless_option(self, command_option, param, expected_value=True):
+ return cli_valueless_option(self.params, command_option, param, expected_value)
def _configuration_args(self, default=[]):
- ex_args = self.params.get('external_downloader_args')
- if ex_args is None:
- return default
- assert isinstance(ex_args, list)
- return ex_args
+ return cli_configuration_args(self.params, 'external_downloader_args', default)
def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """
@@ -77,7 +80,19 @@ class CurlFD(ExternalFD):
cmd = [self.exe, '--location', '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--interface')
+ cmd += self._option('--interface', 'source_address')
+ cmd += self._option('--proxy', 'proxy')
+ cmd += self._valueless_option('--insecure', 'nocheckcertificate')
+ cmd += self._configuration_args()
+ cmd += ['--', info_dict['url']]
+ return cmd
+
+
+class AxelFD(ExternalFD):
+ def _make_cmd(self, tmpfilename, info_dict):
+ cmd = [self.exe, '-o', tmpfilename]
+ for key, val in info_dict['http_headers'].items():
+ cmd += ['-H', '%s: %s' % (key, val)]
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
@@ -88,7 +103,9 @@ class WgetFD(ExternalFD):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--bind-address')
+ cmd += self._option('--bind-address', 'source_address')
+ cmd += self._option('--proxy', 'proxy')
+ cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
@@ -105,7 +122,9 @@ class Aria2cFD(ExternalFD):
cmd += ['--out', os.path.basename(tmpfilename)]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--interface')
+ cmd += self._option('--interface', 'source_address')
+ cmd += self._option('--all-proxy', 'proxy')
+ cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += ['--', info_dict['url']]
return cmd
diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py
index b1a858c45..174180db5 100644
--- a/youtube_dl/downloader/f4m.py
+++ b/youtube_dl/downloader/f4m.py
@@ -7,17 +7,16 @@ import os
import time
import xml.etree.ElementTree as etree
-from .common import FileDownloader
-from .http import HttpFD
+from .fragment import FragmentFD
from ..compat import (
compat_urlparse,
compat_urllib_error,
)
from ..utils import (
- struct_pack,
- struct_unpack,
encodeFilename,
sanitize_open,
+ struct_pack,
+ struct_unpack,
xpath_text,
)
@@ -226,16 +225,13 @@ def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
-class HttpQuietDownloader(HttpFD):
- def to_screen(self, *args, **kargs):
- pass
-
-
-class F4mFD(FileDownloader):
+class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
+ FD_NAME = 'f4m'
+
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
@@ -288,7 +284,7 @@ class F4mFD(FileDownloader):
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
- self.to_screen('[download] Downloading f4m manifest')
+ self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
doc = etree.fromstring(manifest)
@@ -320,67 +316,20 @@ class F4mFD(FileDownloader):
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
- self.report_destination(filename)
- http_dl = HttpQuietDownloader(
- self.ydl,
- {
- 'continuedl': True,
- 'quiet': True,
- 'noprogress': True,
- 'ratelimit': self.params.get('ratelimit', None),
- 'test': self.params.get('test', False),
- }
- )
- tmpfilename = self.temp_name(filename)
- (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
+ ctx = {
+ 'filename': filename,
+ 'total_frags': total_frags,
+ }
+
+ self._prepare_frag_download(ctx)
+
+ dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
- # This dict stores the download progress, it's updated by the progress
- # hook
- state = {
- 'status': 'downloading',
- 'downloaded_bytes': 0,
- 'frag_index': 0,
- 'frag_count': total_frags,
- 'filename': filename,
- 'tmpfilename': tmpfilename,
- }
- start = time.time()
-
- def frag_progress_hook(s):
- if s['status'] not in ('downloading', 'finished'):
- return
-
- frag_total_bytes = s.get('total_bytes', 0)
- if s['status'] == 'finished':
- state['downloaded_bytes'] += frag_total_bytes
- state['frag_index'] += 1
-
- estimated_size = (
- (state['downloaded_bytes'] + frag_total_bytes) /
- (state['frag_index'] + 1) * total_frags)
- time_now = time.time()
- state['total_bytes_estimate'] = estimated_size
- state['elapsed'] = time_now - start
-
- if s['status'] == 'finished':
- progress = self.calc_percent(state['frag_index'], total_frags)
- else:
- frag_downloaded_bytes = s['downloaded_bytes']
- frag_progress = self.calc_percent(frag_downloaded_bytes,
- frag_total_bytes)
- progress = self.calc_percent(state['frag_index'], total_frags)
- progress += frag_progress / float(total_frags)
-
- state['eta'] = self.calc_eta(
- start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
- state['speed'] = s.get('speed')
- self._hook_progress(state)
-
- http_dl.add_progress_hook(frag_progress_hook)
+ self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
@@ -391,23 +340,24 @@ class F4mFD(FileDownloader):
url += '?' + akamai_pv.strip(';')
if info_dict.get('extra_param_to_segment_url'):
url += info_dict.get('extra_param_to_segment_url')
- frag_filename = '%s-%s' % (tmpfilename, name)
+ frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
- success = http_dl.download(frag_filename, {'url': url})
+ success = ctx['dl'].download(frag_filename, {'url': url})
if not success:
return False
- with open(frag_filename, 'rb') as down:
- down_data = down.read()
- reader = FlvReader(down_data)
- while True:
- _, box_type, box_data = reader.read_box_info()
- if box_type == b'mdat':
- dest_stream.write(box_data)
- break
+ (down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
+ down_data = down.read()
+ down.close()
+ reader = FlvReader(down_data)
+ while True:
+ _, box_type, box_data = reader.read_box_info()
+ if box_type == b'mdat':
+ dest_stream.write(box_data)
+ break
if live:
- os.remove(frag_filename)
+ os.remove(encodeFilename(frag_sanitized))
else:
- frags_filenames.append(frag_filename)
+ frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
@@ -425,20 +375,9 @@ class F4mFD(FileDownloader):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
- dest_stream.close()
+ self._finish_frag_download(ctx)
- elapsed = time.time() - start
- self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
- os.remove(frag_file)
-
- fsize = os.path.getsize(encodeFilename(filename))
- self._hook_progress({
- 'downloaded_bytes': fsize,
- 'total_bytes': fsize,
- 'filename': filename,
- 'status': 'finished',
- 'elapsed': elapsed,
- })
+ os.remove(encodeFilename(frag_file))
return True
diff --git a/youtube_dl/downloader/fragment.py b/youtube_dl/downloader/fragment.py
new file mode 100644
index 000000000..5a64b29ee
--- /dev/null
+++ b/youtube_dl/downloader/fragment.py
@@ -0,0 +1,111 @@
+from __future__ import division, unicode_literals
+
+import os
+import time
+
+from .common import FileDownloader
+from .http import HttpFD
+from ..utils import (
+ encodeFilename,
+ sanitize_open,
+)
+
+
+class HttpQuietDownloader(HttpFD):
+ def to_screen(self, *args, **kargs):
+ pass
+
+
+class FragmentFD(FileDownloader):
+ """
+ A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
+ """
+
+ def _prepare_and_start_frag_download(self, ctx):
+ self._prepare_frag_download(ctx)
+ self._start_frag_download(ctx)
+
+ def _prepare_frag_download(self, ctx):
+ self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
+ self.report_destination(ctx['filename'])
+ dl = HttpQuietDownloader(
+ self.ydl,
+ {
+ 'continuedl': True,
+ 'quiet': True,
+ 'noprogress': True,
+ 'ratelimit': self.params.get('ratelimit', None),
+ 'retries': self.params.get('retries', 0),
+ 'test': self.params.get('test', False),
+ }
+ )
+ tmpfilename = self.temp_name(ctx['filename'])
+ dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
+ ctx.update({
+ 'dl': dl,
+ 'dest_stream': dest_stream,
+ 'tmpfilename': tmpfilename,
+ })
+
+ def _start_frag_download(self, ctx):
+ total_frags = ctx['total_frags']
+ # This dict stores the download progress, it's updated by the progress
+ # hook
+ state = {
+ 'status': 'downloading',
+ 'downloaded_bytes': 0,
+ 'frag_index': 0,
+ 'frag_count': total_frags,
+ 'filename': ctx['filename'],
+ 'tmpfilename': ctx['tmpfilename'],
+ }
+ start = time.time()
+ ctx['started'] = start
+
+ def frag_progress_hook(s):
+ if s['status'] not in ('downloading', 'finished'):
+ return
+
+ frag_total_bytes = s.get('total_bytes', 0)
+ if s['status'] == 'finished':
+ state['downloaded_bytes'] += frag_total_bytes
+ state['frag_index'] += 1
+
+ estimated_size = (
+ (state['downloaded_bytes'] + frag_total_bytes) /
+ (state['frag_index'] + 1) * total_frags)
+ time_now = time.time()
+ state['total_bytes_estimate'] = estimated_size
+ state['elapsed'] = time_now - start
+
+ if s['status'] == 'finished':
+ progress = self.calc_percent(state['frag_index'], total_frags)
+ else:
+ frag_downloaded_bytes = s['downloaded_bytes']
+ frag_progress = self.calc_percent(frag_downloaded_bytes,
+ frag_total_bytes)
+ progress = self.calc_percent(state['frag_index'], total_frags)
+ progress += frag_progress / float(total_frags)
+
+ state['eta'] = self.calc_eta(
+ start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
+ state['speed'] = s.get('speed')
+ self._hook_progress(state)
+
+ ctx['dl'].add_progress_hook(frag_progress_hook)
+
+ return start
+
+ def _finish_frag_download(self, ctx):
+ ctx['dest_stream'].close()
+ elapsed = time.time() - ctx['started']
+ self.try_rename(ctx['tmpfilename'], ctx['filename'])
+ fsize = os.path.getsize(encodeFilename(ctx['filename']))
+
+ self._hook_progress({
+ 'downloaded_bytes': fsize,
+ 'total_bytes': fsize,
+ 'filename': ctx['filename'],
+ 'status': 'finished',
+ 'elapsed': elapsed,
+ })
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
index 8be4f4249..a62d2047b 100644
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -4,15 +4,15 @@ import os
import re
import subprocess
-from ..postprocessor.ffmpeg import FFmpegPostProcessor
from .common import FileDownloader
-from ..compat import (
- compat_urlparse,
- compat_urllib_request,
-)
+from .fragment import FragmentFD
+
+from ..compat import compat_urlparse
+from ..postprocessor.ffmpeg import FFmpegPostProcessor
from ..utils import (
encodeArgument,
encodeFilename,
+ sanitize_open,
)
@@ -28,10 +28,21 @@ class HlsFD(FileDownloader):
return False
ffpp.check_version()
- args = [
- encodeArgument(opt)
- for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
- args.append(encodeFilename(tmpfilename, True))
+ args = [ffpp.executable, '-y']
+
+ if info_dict['http_headers']:
+ # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
+ # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
+ args += [
+ '-headers',
+ ''.join('%s: %s\r\n' % (key, val) for key, val in info_dict['http_headers'].items())]
+
+ args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc']
+
+ args = [encodeArgument(opt) for opt in args]
+ args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
+
+ self._debug_cmd(args)
retval = subprocess.call(args)
if retval == 0:
@@ -51,54 +62,51 @@ class HlsFD(FileDownloader):
return False
-class NativeHlsFD(FileDownloader):
+class NativeHlsFD(FragmentFD):
""" A more limited implementation that does not require ffmpeg """
+ FD_NAME = 'hlsnative'
+
def real_download(self, filename, info_dict):
- url = info_dict['url']
- self.report_destination(filename)
- tmpfilename = self.temp_name(filename)
+ man_url = info_dict['url']
+ self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
+ manifest = self.ydl.urlopen(man_url).read()
- self.to_screen(
- '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
- data = self.ydl.urlopen(url).read()
- s = data.decode('utf-8', 'ignore')
- segment_urls = []
+ s = manifest.decode('utf-8', 'ignore')
+ fragment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
- else compat_urlparse.urljoin(url, line))
- segment_urls.append(segment_url)
-
- is_test = self.params.get('test', False)
- remaining_bytes = self._TEST_FILE_SIZE if is_test else None
- byte_counter = 0
- with open(tmpfilename, 'wb') as outf:
- for i, segurl in enumerate(segment_urls):
- self.to_screen(
- '[hlsnative] %s: Downloading segment %d / %d' %
- (info_dict['id'], i + 1, len(segment_urls)))
- seg_req = compat_urllib_request.Request(segurl)
- if remaining_bytes is not None:
- seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
-
- segment = self.ydl.urlopen(seg_req).read()
- if remaining_bytes is not None:
- segment = segment[:remaining_bytes]
- remaining_bytes -= len(segment)
- outf.write(segment)
- byte_counter += len(segment)
- if remaining_bytes is not None and remaining_bytes <= 0:
+ else compat_urlparse.urljoin(man_url, line))
+ fragment_urls.append(segment_url)
+ # We only download the first fragment during the test
+ if self.params.get('test', False):
break
- self._hook_progress({
- 'downloaded_bytes': byte_counter,
- 'total_bytes': byte_counter,
+ ctx = {
'filename': filename,
- 'status': 'finished',
- })
- self.try_rename(tmpfilename, filename)
+ 'total_frags': len(fragment_urls),
+ }
+
+ self._prepare_and_start_frag_download(ctx)
+
+ frags_filenames = []
+ for i, frag_url in enumerate(fragment_urls):
+ frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
+ success = ctx['dl'].download(frag_filename, {'url': frag_url})
+ if not success:
+ return False
+ down, frag_sanitized = sanitize_open(frag_filename, 'rb')
+ ctx['dest_stream'].write(down.read())
+ down.close()
+ frags_filenames.append(frag_sanitized)
+
+ self._finish_frag_download(ctx)
+
+ for frag_file in frags_filenames:
+ os.remove(encodeFilename(frag_file))
+
return True
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index b7f144af9..a29f5cf31 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -4,6 +4,7 @@ import errno
import os
import socket
import time
+import re
from .common import FileDownloader
from ..compat import (
@@ -57,6 +58,24 @@ class HttpFD(FileDownloader):
# Establish connection
try:
data = self.ydl.urlopen(request)
+ # When trying to resume, Content-Range HTTP header of response has to be checked
+ # to match the value of requested Range HTTP header. This is due to a webservers
+ # that don't support resuming and serve a whole file with no Content-Range
+ # set in response despite of requested Range (see
+ # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
+ if resume_len > 0:
+ content_range = data.headers.get('Content-Range')
+ if content_range:
+ content_range_m = re.search(r'bytes (\d+)-', content_range)
+ # Content-Range is present and matches requested Range, resume is possible
+ if content_range_m and resume_len == int(content_range_m.group(1)):
+ break
+ # Content-Range is either not present or invalid. Assuming remote webserver is
+ # trying to send the whole file, resume is not possible, so wiping the local file
+ # and performing entire redownload
+ self.report_unable_to_resume()
+ resume_len = 0
+ open_mode = 'wb'
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
index 7d19bb808..f1d219ba9 100644
--- a/youtube_dl/downloader/rtmp.py
+++ b/youtube_dl/downloader/rtmp.py
@@ -105,7 +105,7 @@ class RtmpFD(FileDownloader):
protocol = info_dict.get('rtmp_protocol', None)
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
- continue_dl = info_dict.get('continuedl', True)
+ continue_dl = self.params.get('continuedl', True)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index 5c03bf8e8..bd6eb6ae0 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -43,7 +43,10 @@ from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
-from .bbccouk import BBCCoUkIE
+from .bbc import (
+ BBCCoUkIE,
+ BBCIE,
+)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
@@ -73,6 +76,7 @@ from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
+from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
@@ -115,6 +119,7 @@ from .dailymotion import (
)
from .daum import DaumIE
from .dbtv import DBTVIE
+from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
@@ -134,7 +139,6 @@ from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
-from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
@@ -154,6 +158,8 @@ from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
+from .esri import EsriVideoIE
+from .europa import EuropaIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
@@ -161,14 +167,12 @@ from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
+from .fczenit import FczenitIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
-from .fktv import (
- FKTVIE,
- FKTVPosteckeIE,
-)
+from .fktv import FKTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
@@ -224,7 +228,6 @@ from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
-from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
@@ -236,13 +239,21 @@ from .imdb import (
ImdbIE,
ImdbListIE
)
-from .imgur import ImgurIE
+from .imgur import (
+ ImgurIE,
+ ImgurAlbumIE,
+)
from .ina import InaIE
+from .indavideo import (
+ IndavideoIE,
+ IndavideoEmbedIE,
+)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
+from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
@@ -275,6 +286,7 @@ from .kuwo import (
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
+from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
@@ -285,6 +297,11 @@ from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
+from .limelight import (
+ LimelightMediaIE,
+ LimelightChannelIE,
+ LimelightChannelListIE,
+)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
@@ -302,7 +319,6 @@ from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
-from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
@@ -329,11 +345,12 @@ from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
+ MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
-from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
+from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
@@ -347,10 +364,14 @@ from .nbc import (
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
+ MSNBCIE,
)
from .ndr import (
NDRIE,
NJoyIE,
+ NDREmbedBaseIE,
+ NDREmbedIE,
+ NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
@@ -386,7 +407,11 @@ from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
-from .nowness import NownessIE
+from .nowness import (
+ NownessIE,
+ NownessPlaylistIE,
+ NownessSeriesIE,
+)
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
@@ -416,7 +441,6 @@ from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
-from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
@@ -426,6 +450,10 @@ from .orf import (
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
+from .periscope import (
+ PeriscopeIE,
+ QuickscopeIE,
+)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
@@ -434,8 +462,13 @@ from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
+from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
+from .pluralsight import (
+ PluralsightIE,
+ PluralsightCourseIE,
+)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
@@ -481,6 +514,7 @@ from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
+from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
@@ -507,6 +541,7 @@ from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
+from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
@@ -586,6 +621,7 @@ from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
+from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
@@ -593,7 +629,10 @@ from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
-from .theplatform import ThePlatformIE
+from .theplatform import (
+ ThePlatformIE,
+ ThePlatformFeedIE,
+)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
@@ -677,7 +716,6 @@ from .vgtv import (
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
-from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
@@ -712,6 +750,7 @@ from .vk import (
VKIE,
VKUserVideosIE,
)
+from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py
index dc0fb85d6..f9a389f67 100644
--- a/youtube_dl/extractor/abc.py
+++ b/youtube_dl/extractor/abc.py
@@ -1,16 +1,20 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ js_to_json,
+ int_or_none,
+)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
@@ -19,22 +23,47 @@ class ABCIE(InfoExtractor):
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
- }
+ }, {
+ 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
+ 'md5': 'db2a5369238b51f9811ad815b69dc086',
+ 'info_dict': {
+ 'id': 'NvqvPeNZsHU',
+ 'ext': 'mp4',
+ 'upload_date': '20150816',
+ 'uploader': 'ABC News (Australia)',
+ 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
+ 'uploader_id': 'NewsOnABC',
+ 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
+ },
+ 'add_ie': ['Youtube'],
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- urls_info_json = self._search_regex(
- r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
- flags=re.DOTALL)
- urls_info = json.loads(urls_info_json.replace('\'', '"'))
+ mobj = re.search(
+ r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
+ webpage)
+ if mobj is None:
+ raise ExtractorError('Unable to extract video urls')
+
+ urls_info = self._parse_json(
+ mobj.group('json_data'), video_id, transform_source=js_to_json)
+
+ if not isinstance(urls_info, list):
+ urls_info = [urls_info]
+
+ if mobj.group('type') == 'YouTube':
+ return self.playlist_result([
+ self.url_result(url_info['url']) for url_info in urls_info])
+
formats = [{
'url': url_info['url'],
- 'width': int(url_info['width']),
- 'height': int(url_info['height']),
- 'tbr': int(url_info['bitrate']),
- 'filesize': int(url_info['filesize']),
+ 'width': int_or_none(url_info.get('width')),
+ 'height': int_or_none(url_info.get('height')),
+ 'tbr': int_or_none(url_info.get('bitrate')),
+ 'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/academicearth.py b/youtube_dl/extractor/academicearth.py
index 47313fba8..34095501c 100644
--- a/youtube_dl/extractor/academicearth.py
+++ b/youtube_dl/extractor/academicearth.py
@@ -15,7 +15,7 @@ class AcademicEarthCourseIE(InfoExtractor):
'title': 'Laws of Nature',
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
},
- 'playlist_count': 4,
+ 'playlist_count': 3,
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py
index 39335b827..130afe791 100644
--- a/youtube_dl/extractor/adultswim.py
+++ b/youtube_dl/extractor/adultswim.py
@@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import (
+ determine_ext,
ExtractorError,
float_or_none,
xpath_text,
@@ -40,7 +41,8 @@ class AdultSwimIE(InfoExtractor):
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'title': 'Rick and Morty - Pilot',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
- }
+ },
+ 'skip': 'This video is only available for registered users',
}, {
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
'playlist': [
@@ -123,7 +125,6 @@ class AdultSwimIE(InfoExtractor):
else:
collections = bootstrapped_data['show']['collections']
collection, video_info = self.find_collection_containing_video(collections, episode_path)
-
# Video wasn't found in the collections, let's try `slugged_video`.
if video_info is None:
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
@@ -133,7 +134,15 @@ class AdultSwimIE(InfoExtractor):
show = bootstrapped_data['show']
show_title = show['title']
- segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
+ stream = video_info.get('stream')
+ clips = [stream] if stream else video_info.get('clips')
+ if not clips:
+ raise ExtractorError(
+ 'This video is only available via cable service provider subscription that'
+ ' is not currently supported. You may want to use --cookies.'
+ if video_info.get('auth') is True else 'Unable to find stream or clips',
+ expected=True)
+ segment_ids = [clip['videoPlaybackID'] for clip in clips]
episode_id = video_info['id']
episode_title = video_info['title']
@@ -142,7 +151,7 @@ class AdultSwimIE(InfoExtractor):
entries = []
for part_num, segment_id in enumerate(segment_ids):
- segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
+ segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id
segment_title = '%s - %s' % (show_title, episode_title)
if len(segment_ids) > 1:
@@ -156,19 +165,32 @@ class AdultSwimIE(InfoExtractor):
xpath_text(idoc, './/trt', 'segment duration').strip())
formats = []
- file_els = idoc.findall('.//files/file')
+ file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
+ unique_urls = []
+ unique_file_els = []
for file_el in file_els:
+ media_url = file_el.text
+ if not media_url or determine_ext(media_url) == 'f4m':
+ continue
+ if file_el.text not in unique_urls:
+ unique_urls.append(file_el.text)
+ unique_file_els.append(file_el)
+
+ for file_el in unique_file_els:
bitrate = file_el.attrib.get('bitrate')
ftype = file_el.attrib.get('type')
-
- formats.append({
- 'format_id': '%s_%s' % (bitrate, ftype),
- 'url': file_el.text.strip(),
- # The bitrate may not be a number (for example: 'iphone')
- 'tbr': int(bitrate) if bitrate.isdigit() else None,
- 'quality': 1 if ftype == 'hd' else -1
- })
+ media_url = file_el.text
+ if determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, segment_title, 'mp4', 'm3u8_native', preference=0, m3u8_id='hls'))
+ else:
+ formats.append({
+ 'format_id': '%s_%s' % (bitrate, ftype),
+ 'url': file_el.text.strip(),
+ # The bitrate may not be a number (for example: 'iphone')
+ 'tbr': int(bitrate) if bitrate.isdigit() else None,
+ })
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/airmozilla.py b/youtube_dl/extractor/airmozilla.py
index 611ad1e9d..f8e70f4e5 100644
--- a/youtube_dl/extractor/airmozilla.py
+++ b/youtube_dl/extractor/airmozilla.py
@@ -20,14 +20,14 @@ class AirMozillaIE(InfoExtractor):
'id': '6x4q2w',
'ext': 'mp4',
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
- 'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+',
+ 'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster',
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
'timestamp': 1422487800,
'upload_date': '20150128',
'location': 'SFO Commons',
'duration': 3780,
'view_count': int,
- 'categories': ['Main'],
+ 'categories': ['Main', 'Privacy'],
}
}
diff --git a/youtube_dl/extractor/aljazeera.py b/youtube_dl/extractor/aljazeera.py
index 612708e25..184a14a4f 100644
--- a/youtube_dl/extractor/aljazeera.py
+++ b/youtube_dl/extractor/aljazeera.py
@@ -16,6 +16,7 @@ class AlJazeeraIE(InfoExtractor):
'uploader': 'Al Jazeera English',
},
'add_ie': ['Brightcove'],
+ 'skip': 'Not accessible from Travis CI server',
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py
index 576f03b5b..f68dc3236 100644
--- a/youtube_dl/extractor/appletrailers.py
+++ b/youtube_dl/extractor/appletrailers.py
@@ -13,53 +13,53 @@ from ..utils import (
class AppleTrailersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TESTS = [{
- "url": "http://trailers.apple.com/trailers/wb/manofsteel/",
+ 'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
'info_dict': {
'id': 'manofsteel',
},
- "playlist": [
+ 'playlist': [
{
- "md5": "d97a8e575432dbcb81b7c3acb741f8a8",
- "info_dict": {
- "id": "manofsteel-trailer4",
- "ext": "mov",
- "duration": 111,
- "title": "Trailer 4",
- "upload_date": "20130523",
- "uploader_id": "wb",
+ 'md5': 'd97a8e575432dbcb81b7c3acb741f8a8',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer4',
+ 'ext': 'mov',
+ 'duration': 111,
+ 'title': 'Trailer 4',
+ 'upload_date': '20130523',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "b8017b7131b721fb4e8d6f49e1df908c",
- "info_dict": {
- "id": "manofsteel-trailer3",
- "ext": "mov",
- "duration": 182,
- "title": "Trailer 3",
- "upload_date": "20130417",
- "uploader_id": "wb",
+ 'md5': 'b8017b7131b721fb4e8d6f49e1df908c',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer3',
+ 'ext': 'mov',
+ 'duration': 182,
+ 'title': 'Trailer 3',
+ 'upload_date': '20130417',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "d0f1e1150989b9924679b441f3404d48",
- "info_dict": {
- "id": "manofsteel-trailer",
- "ext": "mov",
- "duration": 148,
- "title": "Trailer",
- "upload_date": "20121212",
- "uploader_id": "wb",
+ 'md5': 'd0f1e1150989b9924679b441f3404d48',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer',
+ 'ext': 'mov',
+ 'duration': 148,
+ 'title': 'Trailer',
+ 'upload_date': '20121212',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "5fe08795b943eb2e757fa95cb6def1cb",
- "info_dict": {
- "id": "manofsteel-teaser",
- "ext": "mov",
- "duration": 93,
- "title": "Teaser",
- "upload_date": "20120721",
- "uploader_id": "wb",
+ 'md5': '5fe08795b943eb2e757fa95cb6def1cb',
+ 'info_dict': {
+ 'id': 'manofsteel-teaser',
+ 'ext': 'mov',
+ 'duration': 93,
+ 'title': 'Teaser',
+ 'upload_date': '20120721',
+ 'uploader_id': 'wb',
},
},
]
diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
index 76de24477..2a00da3ee 100644
--- a/youtube_dl/extractor/arte.py
+++ b/youtube_dl/extractor/arte.py
@@ -4,6 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+)
from ..utils import (
find_xpath_attr,
unified_strdate,
@@ -77,7 +81,13 @@ class ArteTVPlus7IE(InfoExtractor):
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
- webpage, 'json vp url')
+ webpage, 'json vp url', default=None)
+ if not json_url:
+ iframe_url = self._html_search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
+ webpage, 'iframe url', group='url')
+ json_url = compat_parse_qs(
+ compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
index 505877b77..c1ef8051d 100644
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -10,6 +10,8 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ float_or_none,
+ int_or_none,
)
@@ -52,11 +54,11 @@ class BandcampIE(InfoExtractor):
ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
- 'url': format_url,
+ 'url': self._proto_relative_url(format_url, 'http:'),
'ext': ext,
'vcodec': 'none',
'acodec': ext,
- 'abr': int(abr_str),
+ 'abr': int_or_none(abr_str),
})
self._sort_formats(formats)
@@ -65,7 +67,7 @@ class BandcampIE(InfoExtractor):
'id': compat_str(data['id']),
'title': data['title'],
'formats': formats,
- 'duration': float(data['duration']),
+ 'duration': float_or_none(data.get('duration')),
}
else:
raise ExtractorError('No free songs found')
@@ -93,8 +95,8 @@ class BandcampIE(InfoExtractor):
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be
# in the "download_url" key
- final_url = self._search_regex(
- r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL')
+ final_url = self._proto_relative_url(self._search_regex(
+ r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:')
return {
'id': video_id,
diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
new file mode 100644
index 000000000..1b3a33e4e
--- /dev/null
+++ b/youtube_dl/extractor/bbc.py
@@ -0,0 +1,904 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ int_or_none,
+ parse_duration,
+ parse_iso8601,
+ remove_end,
+ unescapeHTML,
+)
+from ..compat import compat_HTTPError
+
+
+class BBCCoUkIE(InfoExtractor):
+ IE_NAME = 'bbc.co.uk'
+ IE_DESC = 'BBC iPlayer'
+ _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
+
+ _MEDIASELECTOR_URLS = [
+ # Provides HQ HLS streams with even better quality that pc mediaset but fails
+ # with geolocation in some cases when it's even not geo restricted at all (e.g.
+ # http://www.bbc.co.uk/programmes/b06bp7lf)
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
+ ]
+
+ _MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
+ _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
+
+ _NAMESPACES = (
+ _MEDIASELECTION_NS,
+ _EMP_PLAYLIST_NS,
+ )
+
+ _TESTS = [
+ {
+ 'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
+ 'info_dict': {
+ 'id': 'b039d07m',
+ 'ext': 'flv',
+ 'title': 'Kaleidoscope, Leonard Cohen',
+ 'description': 'The Canadian poet and songwriter reflects on his musical career.',
+ 'duration': 1740,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
+ 'info_dict': {
+ 'id': 'b00yng1d',
+ 'ext': 'flv',
+ 'title': 'The Man in Black: Series 3: The Printed Name',
+ 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
+ 'duration': 1800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Episode is no longer available on BBC iPlayer Radio',
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
+ 'info_dict': {
+ 'id': 'b00yng1d',
+ 'ext': 'flv',
+ 'title': 'The Voice UK: Series 3: Blind Auditions 5',
+ 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
+ 'duration': 5100,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
+ 'info_dict': {
+ 'id': 'b03k3pb7',
+ 'ext': 'flv',
+ 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
+ 'description': '2. Invasion',
+ 'duration': 3600,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+ }, {
+ 'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
+ 'info_dict': {
+ 'id': 'b04v209v',
+ 'ext': 'flv',
+ 'title': 'Pete Tong, The Essential New Tune Special',
+ 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
+ 'duration': 10800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
+ 'note': 'Audio',
+ 'info_dict': {
+ 'id': 'p02frcch',
+ 'ext': 'flv',
+ 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
+ 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
+ 'duration': 3507,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
+ 'note': 'Video',
+ 'info_dict': {
+ 'id': 'p025c103',
+ 'ext': 'flv',
+ 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
+ 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
+ 'duration': 226,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
+ 'info_dict': {
+ 'id': 'p02n76xf',
+ 'ext': 'flv',
+ 'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
+ 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
+ 'duration': 3540,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'geolocation',
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
+ 'info_dict': {
+ 'id': 'b05zmgw1',
+ 'ext': 'flv',
+ 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
+ 'title': 'Royal Academy Summer Exhibition',
+ 'duration': 3540,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'geolocation',
+ }, {
+ # iptv-all mediaset fails with geolocation however there is no geo restriction
+ # for this programme at all
+ 'url': 'http://www.bbc.co.uk/programmes/b06bp7lf',
+ 'info_dict': {
+ 'id': 'b06bp7kf',
+ 'ext': 'flv',
+ 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie",
+ 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.',
+ 'duration': 10800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
+ 'only_matching': True,
+ }
+ ]
+
+ class MediaSelectionError(Exception):
+ def __init__(self, id):
+ self.id = id
+
+ def _extract_asx_playlist(self, connection, programme_id):
+ asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
+ return [ref.get('href') for ref in asx.findall('./Entry/ref')]
+
+ def _extract_connection(self, connection, programme_id):
+ formats = []
+ kind = connection.get('kind')
+ protocol = connection.get('protocol')
+ supplier = connection.get('supplier')
+ if protocol == 'http':
+ href = connection.get('href')
+ transfer_format = connection.get('transferFormat')
+ # ASX playlist
+ if supplier == 'asx':
+ for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
+ formats.append({
+ 'url': ref,
+ 'format_id': 'ref%s_%s' % (i, supplier),
+ })
+ # Skip DASH until supported
+ elif transfer_format == 'dash':
+ pass
+ elif transfer_format == 'hls':
+ m3u8_formats = self._extract_m3u8_formats(
+ href, programme_id, ext='mp4', entry_protocol='m3u8_native',
+ m3u8_id=supplier, fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ # Direct link
+ else:
+ formats.append({
+ 'url': href,
+ 'format_id': supplier or kind or protocol,
+ })
+ elif protocol == 'rtmp':
+ application = connection.get('application', 'ondemand')
+ auth_string = connection.get('authString')
+ identifier = connection.get('identifier')
+ server = connection.get('server')
+ formats.append({
+ 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
+ 'play_path': identifier,
+ 'app': '%s?%s' % (application, auth_string),
+ 'page_url': 'http://www.bbc.co.uk',
+ 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
+ 'rtmp_live': False,
+ 'ext': 'flv',
+ 'format_id': supplier,
+ })
+ return formats
+
+ def _extract_items(self, playlist):
+ return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
+
+ def _findall_ns(self, element, xpath):
+ elements = []
+ for ns in self._NAMESPACES:
+ elements.extend(element.findall(xpath % ns))
+ return elements
+
+ def _extract_medias(self, media_selection):
+ error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
+ if error is None:
+ media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
+ if error is not None:
+ raise BBCCoUkIE.MediaSelectionError(error.get('id'))
+ return self._findall_ns(media_selection, './{%s}media')
+
+ def _extract_connections(self, media):
+ return self._findall_ns(media, './{%s}connection')
+
+ def _extract_video(self, media, programme_id):
+ formats = []
+ vbr = int_or_none(media.get('bitrate'))
+ vcodec = media.get('encoding')
+ service = media.get('service')
+ width = int_or_none(media.get('width'))
+ height = int_or_none(media.get('height'))
+ file_size = int_or_none(media.get('media_file_size'))
+ for connection in self._extract_connections(media):
+ conn_formats = self._extract_connection(connection, programme_id)
+ for format in conn_formats:
+ format.update({
+ 'width': width,
+ 'height': height,
+ 'vbr': vbr,
+ 'vcodec': vcodec,
+ 'filesize': file_size,
+ })
+ if service:
+ format['format_id'] = '%s_%s' % (service, format['format_id'])
+ formats.extend(conn_formats)
+ return formats
+
+ def _extract_audio(self, media, programme_id):
+ formats = []
+ abr = int_or_none(media.get('bitrate'))
+ acodec = media.get('encoding')
+ service = media.get('service')
+ for connection in self._extract_connections(media):
+ conn_formats = self._extract_connection(connection, programme_id)
+ for format in conn_formats:
+ format.update({
+ 'format_id': '%s_%s' % (service, format['format_id']),
+ 'abr': abr,
+ 'acodec': acodec,
+ })
+ formats.extend(conn_formats)
+ return formats
+
+ def _get_subtitles(self, media, programme_id):
+ subtitles = {}
+ for connection in self._extract_connections(media):
+ captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
+ lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
+ subtitles[lang] = [
+ {
+ 'url': connection.get('href'),
+ 'ext': 'ttml',
+ },
+ ]
+ return subtitles
+
+ def _raise_extractor_error(self, media_selection_error):
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
+ expected=True)
+
+ def _download_media_selector(self, programme_id):
+ last_exception = None
+ for mediaselector_url in self._MEDIASELECTOR_URLS:
+ try:
+ return self._download_media_selector_url(
+ mediaselector_url % programme_id, programme_id)
+ except BBCCoUkIE.MediaSelectionError as e:
+ if e.id in ('notukerror', 'geolocation'):
+ last_exception = e
+ continue
+ self._raise_extractor_error(e)
+ self._raise_extractor_error(last_exception)
+
+ def _download_media_selector_url(self, url, programme_id=None):
+ try:
+ media_selection = self._download_xml(
+ url, programme_id, 'Downloading media selection XML')
+ except ExtractorError as ee:
+ if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
+ media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
+ else:
+ raise
+ return self._process_media_selector(media_selection, programme_id)
+
+ def _process_media_selector(self, media_selection, programme_id):
+ formats = []
+ subtitles = None
+
+ for media in self._extract_medias(media_selection):
+ kind = media.get('kind')
+ if kind == 'audio':
+ formats.extend(self._extract_audio(media, programme_id))
+ elif kind == 'video':
+ formats.extend(self._extract_video(media, programme_id))
+ elif kind == 'captions':
+ subtitles = self.extract_subtitles(media, programme_id)
+ return formats, subtitles
+
+ def _download_playlist(self, playlist_id):
+ try:
+ playlist = self._download_json(
+ 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
+ playlist_id, 'Downloading playlist JSON')
+
+ version = playlist.get('defaultAvailableVersion')
+ if version:
+ smp_config = version['smpConfig']
+ title = smp_config['title']
+ description = smp_config['summary']
+ for item in smp_config['items']:
+ kind = item['kind']
+ if kind != 'programme' and kind != 'radioProgramme':
+ continue
+ programme_id = item.get('vpid')
+ duration = int_or_none(item.get('duration'))
+ formats, subtitles = self._download_media_selector(programme_id)
+ return programme_id, title, description, duration, formats, subtitles
+ except ExtractorError as ee:
+ if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
+ raise
+
+ # fallback to legacy playlist
+ return self._process_legacy_playlist(playlist_id)
+
+ def _process_legacy_playlist_url(self, url, display_id):
+ playlist = self._download_legacy_playlist_url(url, display_id)
+ return self._extract_from_legacy_playlist(playlist, display_id)
+
+ def _process_legacy_playlist(self, playlist_id):
+ return self._process_legacy_playlist_url(
+ 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
+
+ def _download_legacy_playlist_url(self, url, playlist_id=None):
+ return self._download_xml(
+ url, playlist_id, 'Downloading legacy playlist XML')
+
+ def _extract_from_legacy_playlist(self, playlist, playlist_id):
+ no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
+ if no_items is not None:
+ reason = no_items.get('reason')
+ if reason == 'preAvailability':
+ msg = 'Episode %s is not yet available' % playlist_id
+ elif reason == 'postAvailability':
+ msg = 'Episode %s is no longer available' % playlist_id
+ elif reason == 'noMedia':
+ msg = 'Episode %s is not currently available' % playlist_id
+ else:
+ msg = 'Episode %s is not available: %s' % (playlist_id, reason)
+ raise ExtractorError(msg, expected=True)
+
+ for item in self._extract_items(playlist):
+ kind = item.get('kind')
+ if kind != 'programme' and kind != 'radioProgramme':
+ continue
+ title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
+ description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
+ description = description_el.text if description_el is not None else None
+
+ def get_programme_id(item):
+ def get_from_attributes(item):
+ for p in('identifier', 'group'):
+ value = item.get(p)
+ if value and re.match(r'^[pb][\da-z]{7}$', value):
+ return value
+ get_from_attributes(item)
+ mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
+ if mediator is not None:
+ return get_from_attributes(mediator)
+
+ programme_id = get_programme_id(item)
+ duration = int_or_none(item.get('duration'))
+
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ else:
+ formats, subtitles = self._process_media_selector(item, playlist_id)
+ programme_id = playlist_id
+
+ return programme_id, title, description, duration, formats, subtitles
+
+ def _real_extract(self, url):
+ group_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, group_id, 'Downloading video page')
+
+ programme_id = None
+
+ tviplayer = self._search_regex(
+ r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
+ webpage, 'player', default=None)
+
+ if tviplayer:
+ player = self._parse_json(tviplayer, group_id).get('player', {})
+ duration = int_or_none(player.get('duration'))
+ programme_id = player.get('vpid')
+
+ if not programme_id:
+ programme_id = self._search_regex(
+ r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
+
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ title = self._og_search_title(webpage)
+ description = self._search_regex(
+ r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
+ webpage, 'description', fatal=False)
+ else:
+ programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': self._og_search_thumbnail(webpage, default=None),
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+
+class BBCIE(BBCCoUkIE):
+ IE_NAME = 'bbc'
+ IE_DESC = 'BBC'
+ _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
+
+ _MEDIASELECTOR_URLS = [
+ # Provides HQ HLS streams but fails with geolocation in some cases when it's
+ # even not geo restricted at all
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
+ # Provides more formats, namely direct mp4 links, but fails on some videos with
+ # notukerror for non UK (?) users (e.g.
+ # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
+ 'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
+ # Provides fewer formats, but works everywhere for everybody (hopefully)
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
+ ]
+
+ _TESTS = [{
+ # article with multiple videos embedded with data-playable containing vpids
+ 'url': 'http://www.bbc.com/news/world-europe-32668511',
+ 'info_dict': {
+ 'id': 'world-europe-32668511',
+ 'title': 'Russia stages massive WW2 parade despite Western boycott',
+ 'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
+ },
+ 'playlist_count': 2,
+ }, {
+ # article with multiple videos embedded with data-playable (more videos)
+ 'url': 'http://www.bbc.com/news/business-28299555',
+ 'info_dict': {
+ 'id': 'business-28299555',
+ 'title': 'Farnborough Airshow: Video highlights',
+ 'description': 'BBC reports and video highlights at the Farnborough Airshow.',
+ },
+ 'playlist_count': 9,
+ 'skip': 'Save time',
+ }, {
+ # article with multiple videos embedded with `new SMP()`
+ # broken
+ 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
+ 'info_dict': {
+ 'id': '3662a707-0af9-3149-963f-47bea720b460',
+ 'title': 'BBC Blogs - Adam Curtis - BUGGER',
+ },
+ 'playlist_count': 18,
+ }, {
+ # single video embedded with data-playable containing vpid
+ 'url': 'http://www.bbc.com/news/world-europe-32041533',
+ 'info_dict': {
+ 'id': 'p02mprgb',
+ 'ext': 'mp4',
+ 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
+ 'description': 'md5:2868290467291b37feda7863f7a83f54',
+ 'duration': 47,
+ 'timestamp': 1427219242,
+ 'upload_date': '20150324',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # article with single video embedded with data-playable containing XML playlist
+ # with direct video links as progressiveDownloadUrl (for now these are extracted)
+ # and playlist with f4m and m3u8 as streamingUrl
+ 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
+ 'info_dict': {
+ 'id': '150615_telabyad_kentin_cogu',
+ 'ext': 'mp4',
+ 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
+ 'timestamp': 1434397334,
+ 'upload_date': '20150615',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video embedded with data-playable containing XML playlists (regional section)
+ 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
+ 'info_dict': {
+ 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
+ 'ext': 'mp4',
+ 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
+ 'timestamp': 1434713142,
+ 'upload_date': '20150619',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video from video playlist embedded with vxp-playlist-data JSON
+ 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
+ 'info_dict': {
+ 'id': 'p02w6qjc',
+ 'ext': 'mp4',
+ 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
+ 'duration': 56,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video story with digitalData
+ 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
+ 'info_dict': {
+ 'id': 'p02q6gc4',
+ 'ext': 'flv',
+ 'title': 'Sri Lanka’s spicy secret',
+ 'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
+ 'timestamp': 1437674293,
+ 'upload_date': '20150723',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # single video story without digitalData
+ 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
+ 'info_dict': {
+ 'id': 'p018zqqg',
+ 'ext': 'mp4',
+ 'title': 'Hyundai Santa Fe Sport: Rock star',
+ 'description': 'md5:b042a26142c4154a6e472933cf20793d',
+ 'timestamp': 1415867444,
+ 'upload_date': '20141113',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # single video with playlist.sxml URL in playlist param
+ 'url': 'http://www.bbc.com/sport/0/football/33653409',
+ 'info_dict': {
+ 'id': 'p02xycnp',
+ 'ext': 'mp4',
+ 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
+ 'duration': 140,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # article with multiple videos embedded with playlist.sxml in playlist param
+ 'url': 'http://www.bbc.com/sport/0/football/34475836',
+ 'info_dict': {
+ 'id': '34475836',
+ 'title': 'What Liverpool can expect from Klopp',
+ },
+ 'playlist_count': 3,
+ }, {
+ # single video with playlist URL from weather section
+ 'url': 'http://www.bbc.com/weather/features/33601775',
+ 'only_matching': True,
+ }, {
+ # custom redirection to www.bbc.com
+ 'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
+ 'only_matching': True,
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
+
+ def _extract_from_media_meta(self, media_meta, video_id):
+ # Direct links to media in media metadata (e.g.
+ # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
+ # TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
+ source_files = media_meta.get('sourceFiles')
+ if source_files:
+ return [{
+ 'url': f['url'],
+ 'format_id': format_id,
+ 'ext': f.get('encoding'),
+ 'tbr': float_or_none(f.get('bitrate'), 1000),
+ 'filesize': int_or_none(f.get('filesize')),
+ } for format_id, f in source_files.items() if f.get('url')], []
+
+ programme_id = media_meta.get('externalId')
+ if programme_id:
+ return self._download_media_selector(programme_id)
+
+ # Process playlist.sxml as legacy playlist
+ href = media_meta.get('href')
+ if href:
+ playlist = self._download_legacy_playlist_url(href)
+ _, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
+ return formats, subtitles
+
+ return [], []
+
+ def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
+ programme_id, title, description, duration, formats, subtitles = \
+ self._process_legacy_playlist_url(url, playlist_id)
+ self._sort_formats(formats)
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ timestamp = None
+ playlist_title = None
+ playlist_description = None
+
+ ld = self._parse_json(
+ self._search_regex(
+ r'(?s)<script type="application/ld\+json">(.+?)</script>',
+ webpage, 'ld json', default='{}'),
+ playlist_id, fatal=False)
+ if ld:
+ timestamp = parse_iso8601(ld.get('datePublished'))
+ playlist_title = ld.get('headline')
+ playlist_description = ld.get('articleBody')
+
+ if not timestamp:
+ timestamp = parse_iso8601(self._search_regex(
+ [r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
+ r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
+ r'"datePublished":\s*"([^"]+)'],
+ webpage, 'date', default=None))
+
+ entries = []
+
+ # article with multiple videos embedded with playlist.sxml (e.g.
+ # http://www.bbc.com/sport/0/football/34475836)
+ playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
+ if playlists:
+ entries = [
+ self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
+ for playlist_url in playlists]
+
+ # news article with multiple videos embedded with data-playable
+ data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
+ if data_playables:
+ for _, data_playable_json in data_playables:
+ data_playable = self._parse_json(
+ unescapeHTML(data_playable_json), playlist_id, fatal=False)
+ if not data_playable:
+ continue
+ settings = data_playable.get('settings', {})
+ if settings:
+ # data-playable with video vpid in settings.playlistObject.items (e.g.
+ # http://www.bbc.com/news/world-us-canada-34473351)
+ playlist_object = settings.get('playlistObject', {})
+ if playlist_object:
+ items = playlist_object.get('items')
+ if items and isinstance(items, list):
+ title = playlist_object['title']
+ description = playlist_object.get('summary')
+ duration = int_or_none(items[0].get('duration'))
+ programme_id = items[0].get('vpid')
+ formats, subtitles = self._download_media_selector(programme_id)
+ self._sort_formats(formats)
+ entries.append({
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+ else:
+ # data-playable without vpid but with a playlist.sxml URLs
+ # in otherSettings.playlist (e.g.
+ # http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
+ playlist = data_playable.get('otherSettings', {}).get('playlist', {})
+ if playlist:
+ entries.append(self._extract_from_playlist_sxml(
+ playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
+
+ if entries:
+ playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News')
+ playlist_description = playlist_description or self._og_search_description(webpage, default=None)
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
+
+ # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
+ programme_id = self._search_regex(
+ [r'data-video-player-vpid="([\da-z]{8})"',
+ r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
+ webpage, 'vpid', default=None)
+
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ self._sort_formats(formats)
+ # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
+ digital_data = self._parse_json(
+ self._search_regex(
+ r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
+ programme_id, fatal=False)
+ page_info = digital_data.get('page', {}).get('pageInfo', {})
+ title = page_info.get('pageName') or self._og_search_title(webpage)
+ description = page_info.get('description') or self._og_search_description(webpage)
+ timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ playlist_title = self._html_search_regex(
+ r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
+ playlist_description = self._og_search_description(webpage, default=None)
+
+ def extract_all(pattern):
+ return list(filter(None, map(
+ lambda s: self._parse_json(s, playlist_id, fatal=False),
+ re.findall(pattern, webpage))))
+
+ # Multiple video article (e.g.
+ # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
+ EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
+ entries = []
+ for match in extract_all(r'new\s+SMP\(({.+?})\)'):
+ embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
+ if embed_url and re.match(EMBED_URL, embed_url):
+ entries.append(embed_url)
+ entries.extend(re.findall(
+ r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
+ if entries:
+ return self.playlist_result(
+ [self.url_result(entry, 'BBCCoUk') for entry in entries],
+ playlist_id, playlist_title, playlist_description)
+
+ # Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
+ medias = extract_all(r"data-media-meta='({[^']+})'")
+
+ if not medias:
+ # Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
+ media_asset = self._search_regex(
+ r'mediaAssetPage\.init\(\s*({.+?}), "/',
+ webpage, 'media asset', default=None)
+ if media_asset:
+ media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
+ medias = []
+ for video in media_asset_page.get('videos', {}).values():
+ medias.extend(video.values())
+
+ if not medias:
+ # Multiple video playlist with single `now playing` entry (e.g.
+ # http://www.bbc.com/news/video_and_audio/must_see/33767813)
+ vxp_playlist = self._parse_json(
+ self._search_regex(
+ r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
+ webpage, 'playlist data'),
+ playlist_id)
+ playlist_medias = []
+ for item in vxp_playlist:
+ media = item.get('media')
+ if not media:
+ continue
+ playlist_medias.append(media)
+ # Download single video if found media with asset id matching the video id from URL
+ if item.get('advert', {}).get('assetId') == playlist_id:
+ medias = [media]
+ break
+ # Fallback to the whole playlist
+ if not medias:
+ medias = playlist_medias
+
+ entries = []
+ for num, media_meta in enumerate(medias, start=1):
+ formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
+ if not formats:
+ continue
+ self._sort_formats(formats)
+
+ video_id = media_meta.get('externalId')
+ if not video_id:
+ video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
+
+ title = media_meta.get('caption')
+ if not title:
+ title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
+
+ duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
+
+ images = []
+ for image in media_meta.get('images', {}).values():
+ images.extend(image.values())
+ if 'image' in media_meta:
+ images.append(media_meta['image'])
+
+ thumbnails = [{
+ 'url': image.get('href'),
+ 'width': int_or_none(image.get('width')),
+ 'height': int_or_none(image.get('height')),
+ } for image in images]
+
+ entries.append({
+ 'id': video_id,
+ 'title': title,
+ 'thumbnails': thumbnails,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py
deleted file mode 100644
index 5825d2867..000000000
--- a/youtube_dl/extractor/bbccouk.py
+++ /dev/null
@@ -1,379 +0,0 @@
-from __future__ import unicode_literals
-
-import xml.etree.ElementTree
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- int_or_none,
-)
-from ..compat import compat_HTTPError
-
-
-class BBCCoUkIE(InfoExtractor):
- IE_NAME = 'bbc.co.uk'
- IE_DESC = 'BBC iPlayer'
- _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
-
- _TESTS = [
- {
- 'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
- 'info_dict': {
- 'id': 'b039d07m',
- 'ext': 'flv',
- 'title': 'Kaleidoscope, Leonard Cohen',
- 'description': 'The Canadian poet and songwriter reflects on his musical career.',
- 'duration': 1740,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
- 'info_dict': {
- 'id': 'b00yng1d',
- 'ext': 'flv',
- 'title': 'The Man in Black: Series 3: The Printed Name',
- 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
- 'duration': 1800,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Episode is no longer available on BBC iPlayer Radio',
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
- 'info_dict': {
- 'id': 'b00yng1d',
- 'ext': 'flv',
- 'title': 'The Voice UK: Series 3: Blind Auditions 5',
- 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
- 'duration': 5100,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
- 'info_dict': {
- 'id': 'b03k3pb7',
- 'ext': 'flv',
- 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
- 'description': '2. Invasion',
- 'duration': 3600,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
- }, {
- 'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
- 'info_dict': {
- 'id': 'b04v209v',
- 'ext': 'flv',
- 'title': 'Pete Tong, The Essential New Tune Special',
- 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
- 'duration': 10800,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
- 'note': 'Audio',
- 'info_dict': {
- 'id': 'p02frcch',
- 'ext': 'flv',
- 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
- 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
- 'duration': 3507,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
- 'note': 'Video',
- 'info_dict': {
- 'id': 'p025c103',
- 'ext': 'flv',
- 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
- 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
- 'duration': 226,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
- 'info_dict': {
- 'id': 'p02n76xf',
- 'ext': 'flv',
- 'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
- 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
- 'duration': 3540,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'geolocation',
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
- 'info_dict': {
- 'id': 'b05zmgw1',
- 'ext': 'flv',
- 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
- 'title': 'Royal Academy Summer Exhibition',
- 'duration': 3540,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'geolocation',
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
- 'only_matching': True,
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
- 'only_matching': True,
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
- 'only_matching': True,
- }
- ]
-
- def _extract_asx_playlist(self, connection, programme_id):
- asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
- return [ref.get('href') for ref in asx.findall('./Entry/ref')]
-
- def _extract_connection(self, connection, programme_id):
- formats = []
- protocol = connection.get('protocol')
- supplier = connection.get('supplier')
- if protocol == 'http':
- href = connection.get('href')
- # ASX playlist
- if supplier == 'asx':
- for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
- formats.append({
- 'url': ref,
- 'format_id': 'ref%s_%s' % (i, supplier),
- })
- # Direct link
- else:
- formats.append({
- 'url': href,
- 'format_id': supplier,
- })
- elif protocol == 'rtmp':
- application = connection.get('application', 'ondemand')
- auth_string = connection.get('authString')
- identifier = connection.get('identifier')
- server = connection.get('server')
- formats.append({
- 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
- 'play_path': identifier,
- 'app': '%s?%s' % (application, auth_string),
- 'page_url': 'http://www.bbc.co.uk',
- 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
- 'rtmp_live': False,
- 'ext': 'flv',
- 'format_id': supplier,
- })
- return formats
-
- def _extract_items(self, playlist):
- return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
-
- def _extract_medias(self, media_selection):
- error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
- if error is not None:
- raise ExtractorError(
- '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
- return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
-
- def _extract_connections(self, media):
- return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
-
- def _extract_video(self, media, programme_id):
- formats = []
- vbr = int(media.get('bitrate'))
- vcodec = media.get('encoding')
- service = media.get('service')
- width = int(media.get('width'))
- height = int(media.get('height'))
- file_size = int(media.get('media_file_size'))
- for connection in self._extract_connections(media):
- conn_formats = self._extract_connection(connection, programme_id)
- for format in conn_formats:
- format.update({
- 'format_id': '%s_%s' % (service, format['format_id']),
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- 'vcodec': vcodec,
- 'filesize': file_size,
- })
- formats.extend(conn_formats)
- return formats
-
- def _extract_audio(self, media, programme_id):
- formats = []
- abr = int(media.get('bitrate'))
- acodec = media.get('encoding')
- service = media.get('service')
- for connection in self._extract_connections(media):
- conn_formats = self._extract_connection(connection, programme_id)
- for format in conn_formats:
- format.update({
- 'format_id': '%s_%s' % (service, format['format_id']),
- 'abr': abr,
- 'acodec': acodec,
- })
- formats.extend(conn_formats)
- return formats
-
- def _get_subtitles(self, media, programme_id):
- subtitles = {}
- for connection in self._extract_connections(media):
- captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
- lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
- subtitles[lang] = [
- {
- 'url': connection.get('href'),
- 'ext': 'ttml',
- },
- ]
- return subtitles
-
- def _download_media_selector(self, programme_id):
- try:
- media_selection = self._download_xml(
- 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
- programme_id, 'Downloading media selection XML')
- except ExtractorError as ee:
- if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
- media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
- else:
- raise
-
- formats = []
- subtitles = None
-
- for media in self._extract_medias(media_selection):
- kind = media.get('kind')
- if kind == 'audio':
- formats.extend(self._extract_audio(media, programme_id))
- elif kind == 'video':
- formats.extend(self._extract_video(media, programme_id))
- elif kind == 'captions':
- subtitles = self.extract_subtitles(media, programme_id)
-
- return formats, subtitles
-
- def _download_playlist(self, playlist_id):
- try:
- playlist = self._download_json(
- 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
- playlist_id, 'Downloading playlist JSON')
-
- version = playlist.get('defaultAvailableVersion')
- if version:
- smp_config = version['smpConfig']
- title = smp_config['title']
- description = smp_config['summary']
- for item in smp_config['items']:
- kind = item['kind']
- if kind != 'programme' and kind != 'radioProgramme':
- continue
- programme_id = item.get('vpid')
- duration = int(item.get('duration'))
- formats, subtitles = self._download_media_selector(programme_id)
- return programme_id, title, description, duration, formats, subtitles
- except ExtractorError as ee:
- if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
- raise
-
- # fallback to legacy playlist
- playlist = self._download_xml(
- 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
- playlist_id, 'Downloading legacy playlist XML')
-
- no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
- if no_items is not None:
- reason = no_items.get('reason')
- if reason == 'preAvailability':
- msg = 'Episode %s is not yet available' % playlist_id
- elif reason == 'postAvailability':
- msg = 'Episode %s is no longer available' % playlist_id
- elif reason == 'noMedia':
- msg = 'Episode %s is not currently available' % playlist_id
- else:
- msg = 'Episode %s is not available: %s' % (playlist_id, reason)
- raise ExtractorError(msg, expected=True)
-
- for item in self._extract_items(playlist):
- kind = item.get('kind')
- if kind != 'programme' and kind != 'radioProgramme':
- continue
- title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
- description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
- programme_id = item.get('identifier')
- duration = int(item.get('duration'))
- formats, subtitles = self._download_media_selector(programme_id)
-
- return programme_id, title, description, duration, formats, subtitles
-
- def _real_extract(self, url):
- group_id = self._match_id(url)
-
- webpage = self._download_webpage(url, group_id, 'Downloading video page')
-
- programme_id = None
-
- tviplayer = self._search_regex(
- r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
- webpage, 'player', default=None)
-
- if tviplayer:
- player = self._parse_json(tviplayer, group_id).get('player', {})
- duration = int_or_none(player.get('duration'))
- programme_id = player.get('vpid')
-
- if not programme_id:
- programme_id = self._search_regex(
- r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
-
- if programme_id:
- formats, subtitles = self._download_media_selector(programme_id)
- title = self._og_search_title(webpage)
- description = self._search_regex(
- r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
- webpage, 'description', fatal=False)
- else:
- programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
-
- self._sort_formats(formats)
-
- return {
- 'id': programme_id,
- 'title': title,
- 'description': description,
- 'thumbnail': self._og_search_thumbnail(webpage, default=None),
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles,
- }
diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py
index b38057f2f..e6c928699 100644
--- a/youtube_dl/extractor/beeg.py
+++ b/youtube_dl/extractor/beeg.py
@@ -1,65 +1,67 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
- 'md5': '1bff67111adb785c51d1b42959ec10e5',
+ 'md5': '46c384def73b33dbc581262e5ee67cef',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
- 'description': 'md5:6db3c6177972822aaba18652ff59c773',
- 'categories': list, # NSFW
- 'thumbnail': 're:https?://.*\.jpg$',
+ 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
+ 'timestamp': 1391813355,
+ 'upload_date': '20140207',
+ 'duration': 383,
+ 'tags': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- webpage = self._download_webpage(url, video_id)
-
- quality_arr = self._search_regex(
- r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
+ video_id = self._match_id(url)
- formats = [{
- 'url': fmt[1],
- 'format_id': fmt[0],
- 'height': int(fmt[0][:-1]),
- } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
+ video = self._download_json(
+ 'http://beeg.com/api/v1/video/%s' % video_id, video_id)
+ formats = []
+ for format_id, video_url in video.items():
+ height = self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None)
+ if not height:
+ continue
+ formats.append({
+ 'url': self._proto_relative_url(video_url.replace('{DATA_MARKERS}', ''), 'http:'),
+ 'format_id': format_id,
+ 'height': int(height),
+ })
self._sort_formats(formats)
- title = self._html_search_regex(
- r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
+ title = video['title']
+ video_id = video.get('id') or video_id
+ display_id = video.get('code')
+ description = video.get('desc')
- description = self._html_search_regex(
- r'<meta name="description" content="([^"]*)"',
- webpage, 'description', fatal=False)
- thumbnail = self._html_search_regex(
- r'\'previewer.url\'\s*:\s*"([^"]*)"',
- webpage, 'thumbnail', fatal=False)
+ timestamp = parse_iso8601(video.get('date'), ' ')
+ duration = int_or_none(video.get('duration'))
- categories_str = self._html_search_regex(
- r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
- categories = (
- None if categories_str is None
- else categories_str.split(','))
+ tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
return {
'id': video_id,
+ 'display_id': display_id,
'title': title,
'description': description,
- 'thumbnail': thumbnail,
- 'categories': categories,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'tags': tags,
'formats': formats,
'age_limit': 18,
}
diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py
index 4d8cce1ef..1a0184861 100644
--- a/youtube_dl/extractor/bild.py
+++ b/youtube_dl/extractor/bild.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
- fix_xml_ampersands,
+ unescapeHTML,
)
@@ -17,26 +17,24 @@ class BildIE(InfoExtractor):
'info_dict': {
'id': '38184146',
'ext': 'mp4',
- 'title': 'BILD hat sie getestet',
+ 'title': 'Das können die neuen iPads',
+ 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 196,
- 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
- xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
- doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands)
-
- duration = int_or_none(doc.attrib.get('duration'), scale=1000)
+ video_data = self._download_json(
+ url.split('.bild.html')[0] + ',view=json.bild.html', video_id)
return {
'id': video_id,
- 'title': doc.attrib['ueberschrift'],
- 'description': doc.attrib.get('text'),
- 'url': doc.attrib['src'],
- 'thumbnail': doc.attrib.get('img'),
- 'duration': duration,
+ 'title': unescapeHTML(video_data['title']).strip(),
+ 'description': unescapeHTML(video_data.get('description')),
+ 'url': video_data['clipList'][0]['srces'][0]['src'],
+ 'thumbnail': video_data.get('poster'),
+ 'duration': int_or_none(video_data.get('durationSec')),
}
diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py
index 809287d14..aa08051b1 100644
--- a/youtube_dl/extractor/breakcom.py
+++ b/youtube_dl/extractor/breakcom.py
@@ -18,6 +18,7 @@ class BreakIE(InfoExtractor):
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
+ 'age_limit': 13,
}
}, {
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py
index 699b4f7d0..004372f8d 100644
--- a/youtube_dl/extractor/canalplus.py
+++ b/youtube_dl/extractor/canalplus.py
@@ -78,7 +78,8 @@ class CanalplusIE(InfoExtractor):
if video_id is None:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
- r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
+ [r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)', r'id=["\']canal_video_player(?P<id>\d+)'],
+ webpage, 'video id', group='id')
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
@@ -106,15 +107,11 @@ class CanalplusIE(InfoExtractor):
continue
format_id = fmt.tag
if format_id == 'HLS':
- hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
- for fmt in hls_formats:
- fmt['preference'] = preference(format_id)
- formats.extend(hls_formats)
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', preference=preference(format_id)))
elif format_id == 'HDS':
- hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
- for fmt in hds_formats:
- fmt['preference'] = preference(format_id)
- formats.extend(hds_formats)
+ formats.extend(self._extract_f4m_formats(
+ format_url + '?hdcore=2.11.3', video_id, preference=preference(format_id)))
else:
formats.append({
'url': format_url,
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index dda583680..e857e66f4 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -17,55 +17,81 @@ from ..utils import (
class CeskaTelevizeIE(InfoExtractor):
- _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
-
- _TESTS = [
- {
- 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
+ _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(?:[^/]+/)*(?P<id>[^/#?]+)/*(?:[#?].*)?$'
+ _TESTS = [{
+ 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
+ 'info_dict': {
+ 'id': '61924494876951776',
+ 'ext': 'mp4',
+ 'title': 'Hyde Park Civilizace',
+ 'description': 'md5:fe93f6eda372d150759d11644ebbfb4a',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 3350,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
+ 'info_dict': {
+ 'id': '61924494876844374',
+ 'ext': 'mp4',
+ 'title': 'První republika: Zpěvačka z Dupárny Bobina',
+ 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 88.4,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ # video with 18+ caution trailer
+ 'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
+ 'info_dict': {
+ 'id': '215562210900007-bogotart',
+ 'title': 'Queer: Bogotart',
+ 'description': 'Alternativní průvodce současným queer světem',
+ },
+ 'playlist': [{
'info_dict': {
- 'id': '214411058091220',
+ 'id': '61924494876844842',
'ext': 'mp4',
- 'title': 'Hyde Park Civilizace',
- 'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'duration': 3350,
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'title': 'Queer: Bogotart (Varování 18+)',
+ 'duration': 10.2,
},
- },
- {
- 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
+ }, {
'info_dict': {
- 'id': '14716',
+ 'id': '61924494877068022',
'ext': 'mp4',
- 'title': 'První republika: Zpěvačka z Dupárny Bobina',
- 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
+ 'title': 'Queer: Bogotart (Queer)',
'thumbnail': 're:^https?://.*\.jpg',
- 'duration': 88.4,
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'duration': 1558.3,
},
+ }],
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
},
- ]
+ }]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ playlist_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
+ webpage = self._download_webpage(url, playlist_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
- typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
- episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
+ typ = self._html_search_regex(
+ r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
+ episode_id = self._html_search_regex(
+ r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
@@ -83,7 +109,7 @@ class CeskaTelevizeIE(InfoExtractor):
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
- playlistpage = self._download_json(req, video_id)
+ playlistpage = self._download_json(req, playlist_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
@@ -92,33 +118,43 @@ class CeskaTelevizeIE(InfoExtractor):
req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url))
req.add_header('Referer', url)
- playlist = self._download_json(req, video_id)
-
- item = playlist['playlist'][0]
- formats = []
- for format_id, stream_url in item['streamUrls'].items():
- formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
- self._sort_formats(formats)
-
- title = self._og_search_title(webpage)
- description = self._og_search_description(webpage)
- duration = float_or_none(item.get('duration'))
- thumbnail = item.get('previewImageUrl')
-
- subtitles = {}
- subs = item.get('subtitles')
- if subs:
- subtitles = self.extract_subtitles(episode_id, subs)
-
- return {
- 'id': episode_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles,
- }
+ playlist_title = self._og_search_title(webpage)
+ playlist_description = self._og_search_description(webpage)
+
+ playlist = self._download_json(req, playlist_id)['playlist']
+ playlist_len = len(playlist)
+
+ entries = []
+ for item in playlist:
+ formats = []
+ for format_id, stream_url in item['streamUrls'].items():
+ formats.extend(self._extract_m3u8_formats(
+ stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native'))
+ self._sort_formats(formats)
+
+ item_id = item.get('id') or item['assetId']
+ title = item['title']
+
+ duration = float_or_none(item.get('duration'))
+ thumbnail = item.get('previewImageUrl')
+
+ subtitles = {}
+ if item.get('type') == 'VOD':
+ subs = item.get('subtitles')
+ if subs:
+ subtitles = self.extract_subtitles(episode_id, subs)
+
+ entries.append({
+ 'id': item_id,
+ 'title': playlist_title if playlist_len == 1 else '%s (%s)' % (playlist_title, title),
+ 'description': playlist_description if playlist_len == 1 else None,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
def _get_subtitles(self, episode_id, subs):
original_subtitles = self._download_webpage(
diff --git a/youtube_dl/extractor/channel9.py b/youtube_dl/extractor/channel9.py
index 3dfc24f5b..c74553dcf 100644
--- a/youtube_dl/extractor/channel9.py
+++ b/youtube_dl/extractor/channel9.py
@@ -3,7 +3,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ parse_filesize,
+ qualities,
+)
class Channel9IE(InfoExtractor):
@@ -28,7 +32,7 @@ class Channel9IE(InfoExtractor):
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
- 'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
+ 'thumbnail': 're:http://.*\.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
@@ -44,31 +48,29 @@ class Channel9IE(InfoExtractor):
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
- 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
+ 'thumbnail': 're:http://.*\.jpg',
'authors': ['Mike Wilmot'],
},
+ },
+ {
+ # low quality mp4 is best
+ 'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
+ 'info_dict': {
+ 'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
+ 'ext': 'mp4',
+ 'title': 'Ranges for the Standard Library',
+ 'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d',
+ 'duration': 5646,
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
- # Sorted by quality
- _known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4']
-
- def _restore_bytes(self, formatted_size):
- if not formatted_size:
- return 0
- m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size)
- if not m:
- return 0
- units = m.group('units')
- try:
- exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper())
- except ValueError:
- return 0
- size = float(m.group('size'))
- return int(size * (1024 ** exponent))
-
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
@@ -78,16 +80,20 @@ class Channel9IE(InfoExtractor):
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
- # Extract known formats
+ quality = qualities((
+ 'MP3', 'MP4',
+ 'Low Quality WMV', 'Low Quality MP4',
+ 'Mid Quality WMV', 'Mid Quality MP4',
+ 'High Quality WMV', 'High Quality MP4'))
formats = [{
'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
- 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
- 'preference': self._known_formats.index(x.group('quality')),
+ 'filesize_approx': parse_filesize(x.group('filesize')),
+ 'quality': quality(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
- } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
+ } for x in list(re.finditer(FORMAT_REGEX, html))]
self._sort_formats(formats)
@@ -158,7 +164,7 @@ class Channel9IE(InfoExtractor):
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
- return m.group('day') if m is not None else None
+ return m.group('day').strip() if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
@@ -224,12 +230,12 @@ class Channel9IE(InfoExtractor):
if contents is None:
return contents
- authors = self._extract_authors(html)
+ if len(contents) > 1:
+ raise ExtractorError('Got more than one entry')
+ result = contents[0]
+ result['authors'] = self._extract_authors(html)
- for content in contents:
- content['authors'] = authors
-
- return contents
+ return result
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
diff --git a/youtube_dl/extractor/chaturbate.py b/youtube_dl/extractor/chaturbate.py
new file mode 100644
index 000000000..0b67ba67d
--- /dev/null
+++ b/youtube_dl/extractor/chaturbate.py
@@ -0,0 +1,50 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class ChaturbateIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)'
+ _TESTS = [{
+ 'url': 'https://www.chaturbate.com/siswet19/',
+ 'info_dict': {
+ 'id': 'siswet19',
+ 'ext': 'mp4',
+ 'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'age_limit': 18,
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'https://en.chaturbate.com/siswet19/',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ m3u8_url = self._search_regex(
+ r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage,
+ 'playlist', default=None, group='url')
+
+ if not m3u8_url:
+ error = self._search_regex(
+ r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
+ webpage, 'error', group='error')
+ raise ExtractorError(error, expected=True)
+
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
+
+ return {
+ 'id': video_id,
+ 'title': self._live_title(video_id),
+ 'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id,
+ 'age_limit': self._rta_search(webpage),
+ 'is_live': True,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py
index c949a4814..fd1770dac 100644
--- a/youtube_dl/extractor/cinemassacre.py
+++ b/youtube_dl/extractor/cinemassacre.py
@@ -6,6 +6,7 @@ import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .bliptv import BlipTVIE
+from .screenwavemedia import ScreenwaveMediaIE
class CinemassacreIE(InfoExtractor):
@@ -83,10 +84,10 @@ class CinemassacreIE(InfoExtractor):
playerdata_url = self._search_regex(
[
- r'src="(http://(?:player2\.screenwavemedia\.com|player\.screenwavemedia\.com/play)/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
- r'<iframe[^>]+src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
+ ScreenwaveMediaIE.EMBED_PATTERN,
+ r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
],
- webpage, 'player data URL', default=None)
+ webpage, 'player data URL', default=None, group='url')
if not playerdata_url:
playerdata_url = BlipTVIE._extract_url(webpage)
if not playerdata_url:
diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py
index a5c3cb7c6..7af903571 100644
--- a/youtube_dl/extractor/clipfish.py
+++ b/youtube_dl/extractor/clipfish.py
@@ -1,53 +1,68 @@
from __future__ import unicode_literals
import re
-import time
-import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
- parse_duration,
+ determine_ext,
+ int_or_none,
+ js_to_json,
+ parse_iso8601,
+ remove_end,
)
class ClipfishIE(InfoExtractor):
- IE_NAME = 'clipfish'
-
- _VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
+ _VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
- 'md5': '2521cd644e862936cf2e698206e47385',
+ 'md5': '79bc922f3e8a9097b3d68a93780fd475',
'info_dict': {
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
+ 'timestamp': 1370938118,
+ 'upload_date': '20130611',
'duration': 82,
- },
- 'skip': 'Blocked in the US'
+ }
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
-
- info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
- (video_id, int(time.time())))
- doc = self._download_xml(
- info_url, video_id, note='Downloading info page')
- title = doc.find('title').text
- video_url = doc.find('filename').text
- if video_url is None:
- xml_bytes = xml.etree.ElementTree.tostring(doc)
- raise ExtractorError('Cannot find video URL in document %r' %
- xml_bytes)
- thumbnail = doc.find('imageurl').text
- duration = parse_duration(doc.find('duration').text)
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_info = self._parse_json(
+ js_to_json(self._html_search_regex(
+ '(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')),
+ video_id)
+
+ formats = []
+ for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage):
+ ext = determine_ext(video_url)
+ if ext == 'm3u8':
+ formats.append({
+ 'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
+ 'ext': 'mp4',
+ 'format_id': 'hls',
+ })
+ else:
+ formats.append({
+ 'url': video_url,
+ 'format_id': ext,
+ })
+ self._sort_formats(formats)
+
+ title = remove_end(self._og_search_title(webpage), ' - Video')
+ thumbnail = self._og_search_thumbnail(webpage)
+ duration = int_or_none(video_info.get('length'))
+ timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date'))
return {
'id': video_id,
'title': title,
- 'url': video_url,
+ 'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
+ 'timestamp': timestamp,
}
diff --git a/youtube_dl/extractor/clubic.py b/youtube_dl/extractor/clubic.py
index 14f215c5c..1dfa7c12e 100644
--- a/youtube_dl/extractor/clubic.py
+++ b/youtube_dl/extractor/clubic.py
@@ -12,9 +12,9 @@ from ..utils import (
class ClubicIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html'
+ _VALID_URL = r'http://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
'md5': '1592b694ba586036efac1776b0b43cd3',
'info_dict': {
@@ -24,7 +24,10 @@ class ClubicIE(InfoExtractor):
'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*',
'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$',
}
- }
+ }, {
+ 'url': 'http://www.clubic.com/video/video-clubic-week-2-0-apple-iphone-6s-et-plus-mais-surtout-le-pencil-469792.html',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
diff --git a/youtube_dl/extractor/comcarcoff.py b/youtube_dl/extractor/comcarcoff.py
index 9c25b2223..81f3d7697 100644
--- a/youtube_dl/extractor/comcarcoff.py
+++ b/youtube_dl/extractor/comcarcoff.py
@@ -36,7 +36,7 @@ class ComCarCoffIE(InfoExtractor):
webpage, 'full data json'))
video_id = full_data['activeVideo']['video']
- video_data = full_data['videos'][video_id]
+ video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id]
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 91ebb0ce5..3e4bd10b6 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -151,12 +151,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
- if mobj.group('shortname') in ('tds', 'thedailyshow'):
- url = 'http://thedailyshow.cc.com/full-episodes/'
- else:
- url = 'http://thecolbertreport.cc.com/full-episodes/'
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- assert mobj is not None
+ return self.url_result('http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes')
if mobj.group('clip'):
if mobj.group('videotitle'):
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index b9014fc23..6169fbbeb 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -14,10 +14,14 @@ import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
+ compat_cookies,
+ compat_getpass,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
+ compat_urllib_parse,
compat_urllib_parse_urlparse,
+ compat_urllib_request,
compat_urlparse,
compat_str,
)
@@ -35,6 +39,10 @@ from ..utils import (
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
+ unified_strdate,
+ url_basename,
+ xpath_text,
+ xpath_with_ns,
)
@@ -65,7 +73,7 @@ class InfoExtractor(object):
Potential fields:
* url Mandatory. The URL of the video file
- * ext Will be calculated from url if missing
+ * ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
@@ -145,6 +153,7 @@ class InfoExtractor(object):
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
+ release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
@@ -155,13 +164,15 @@ class InfoExtractor(object):
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
- * "url": A url pointing to the subtitles file
+ * "url": A URL pointing to the subtitles file
+ "ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
+ repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
@@ -176,13 +187,18 @@ class InfoExtractor(object):
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
- webpage_url: The url to the video webpage, if given to youtube-dl it
+ webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
+ tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
+ start_time: Time in seconds where the reproduction should start, as
+ specified in the URL.
+ end_time: Time in seconds where the reproduction should end, as
+ specified in the URL.
Unless mentioned otherwise, the fields should be Unicode strings.
@@ -193,8 +209,8 @@ class InfoExtractor(object):
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
- Additionally, playlists can have "title" and "id" attributes with the same
- semantics as videos (see above).
+ Additionally, playlists can have "title", "description" and "id" attributes
+ with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
@@ -498,10 +514,22 @@ class InfoExtractor(object):
"""Report attempt to log in."""
self.to_screen('Logging in')
+ @staticmethod
+ def raise_login_required(msg='This video is only available for registered users'):
+ raise ExtractorError(
+ '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
+ expected=True)
+
+ @staticmethod
+ def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
+ raise ExtractorError(
+ '%s. You might want to use --proxy to workaround.' % msg,
+ expected=True)
+
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
- """Returns a url that points to a page that should be processed"""
+ """Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
@@ -599,7 +627,7 @@ class InfoExtractor(object):
return (username, password)
- def _get_tfa_info(self):
+ def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
@@ -613,19 +641,26 @@ class InfoExtractor(object):
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
- return None
+ return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
- content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
- property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
+ content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
+ property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
+ % {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
+ @staticmethod
+ def _meta_regex(prop):
+ return r'''(?isx)<meta
+ (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
+ [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
+
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
@@ -635,7 +670,7 @@ class InfoExtractor(object):
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
- return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
+ return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
@@ -656,9 +691,7 @@ class InfoExtractor(object):
if display_name is None:
display_name = name
return self._html_search_regex(
- r'''(?isx)<meta
- (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
- [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
+ self._meta_regex(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
@@ -709,20 +742,23 @@ class InfoExtractor(object):
@staticmethod
def _hidden_inputs(html):
- return dict([
- (input.group('name'), input.group('value')) for input in re.finditer(
- r'''(?x)
- <input\s+
- type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+
- name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+
- (?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)?
- value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value)
- ''', html)
- ])
+ html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
+ hidden_inputs = {}
+ for input in re.findall(r'(?i)<input([^>]+)>', html):
+ if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
+ continue
+ name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
+ if not name:
+ continue
+ value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
+ if not value:
+ continue
+ hidden_inputs[name.group('value')] = value.group('value')
+ return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
- r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
+ r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
@@ -837,13 +873,18 @@ class InfoExtractor(object):
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
- transform_source=lambda s: fix_xml_ampersands(s).strip()):
+ transform_source=lambda s: fix_xml_ampersands(s).strip(),
+ fatal=True):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
- transform_source=transform_source)
+ transform_source=transform_source,
+ fatal=fatal)
+
+ if manifest is False:
+ return manifest
formats = []
manifest_version = '1.0'
@@ -864,7 +905,10 @@ class InfoExtractor(object):
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
- formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
+ f4m_formats = self._extract_f4m_formats(
+ manifest_url, video_id, preference, f4m_id, fatal=fatal)
+ if f4m_formats:
+ formats.extend(f4m_formats)
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
@@ -967,69 +1011,237 @@ class InfoExtractor(object):
self._sort_formats(formats)
return formats
- # TODO: improve extraction
- def _extract_smil_formats(self, smil_url, video_id, fatal=True):
- smil = self._download_xml(
- smil_url, video_id, 'Downloading SMIL file',
- 'Unable to download SMIL file', fatal=fatal)
+ @staticmethod
+ def _xpath_ns(path, namespace=None):
+ if not namespace:
+ return path
+ out = []
+ for c in path.split('/'):
+ if not c or c == '.':
+ out.append(c)
+ else:
+ out.append('{%s}%s' % (namespace, c))
+ return '/'.join(out)
+
+ def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
+ smil = self._download_smil(smil_url, video_id, fatal=fatal)
+
if smil is False:
assert not fatal
return []
- base = smil.find('./head/meta').get('base')
+ namespace = self._parse_smil_namespace(smil)
+
+ return self._parse_smil_formats(
+ smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
+
+ def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
+ smil = self._download_smil(smil_url, video_id, fatal=fatal)
+ if smil is False:
+ return {}
+ return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
+
+ def _download_smil(self, smil_url, video_id, fatal=True):
+ return self._download_xml(
+ smil_url, video_id, 'Downloading SMIL file',
+ 'Unable to download SMIL file', fatal=fatal)
+
+ def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
+ namespace = self._parse_smil_namespace(smil)
+
+ formats = self._parse_smil_formats(
+ smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
+ subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
+
+ video_id = os.path.splitext(url_basename(smil_url))[0]
+ title = None
+ description = None
+ upload_date = None
+ for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
+ name = meta.attrib.get('name')
+ content = meta.attrib.get('content')
+ if not name or not content:
+ continue
+ if not title and name == 'title':
+ title = content
+ elif not description and name in ('description', 'abstract'):
+ description = content
+ elif not upload_date and name == 'date':
+ upload_date = unified_strdate(content)
+
+ thumbnails = [{
+ 'id': image.get('type'),
+ 'url': image.get('src'),
+ 'width': int_or_none(image.get('width')),
+ 'height': int_or_none(image.get('height')),
+ } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
+
+ return {
+ 'id': video_id,
+ 'title': title or video_id,
+ 'description': description,
+ 'upload_date': upload_date,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ def _parse_smil_namespace(self, smil):
+ return self._search_regex(
+ r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
+
+ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
+ base = smil_url
+ for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
+ b = meta.get('base') or meta.get('httpBase')
+ if b:
+ base = b
+ break
formats = []
rtmp_count = 0
- if smil.findall('./body/seq/video'):
- video = smil.findall('./body/seq/video')[0]
- fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
- formats.extend(fmts)
- else:
- for video in smil.findall('./body/switch/video'):
- fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
- formats.extend(fmts)
+ http_count = 0
+
+ videos = smil.findall(self._xpath_ns('.//video', namespace))
+ for video in videos:
+ src = video.get('src')
+ if not src:
+ continue
+
+ bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+ filesize = int_or_none(video.get('size') or video.get('fileSize'))
+ width = int_or_none(video.get('width'))
+ height = int_or_none(video.get('height'))
+ proto = video.get('proto')
+ ext = video.get('ext')
+ src_ext = determine_ext(src)
+ streamer = video.get('streamer') or base
+
+ if proto == 'rtmp' or streamer.startswith('rtmp'):
+ rtmp_count += 1
+ formats.append({
+ 'url': streamer,
+ 'play_path': src,
+ 'ext': 'flv',
+ 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
+ 'tbr': bitrate,
+ 'filesize': filesize,
+ 'width': width,
+ 'height': height,
+ })
+ if transform_rtmp_url:
+ streamer, src = transform_rtmp_url(streamer, src)
+ formats[-1].update({
+ 'url': streamer,
+ 'play_path': src,
+ })
+ continue
+
+ src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
+
+ if proto == 'm3u8' or src_ext == 'm3u8':
+ m3u8_formats = self._extract_m3u8_formats(
+ src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ continue
+
+ if src_ext == 'f4m':
+ f4m_url = src_url
+ if not f4m_params:
+ f4m_params = {
+ 'hdcore': '3.2.0',
+ 'plugin': 'flowplayer-3.2.0.1',
+ }
+ f4m_url += '&' if '?' in f4m_url else '?'
+ f4m_url += compat_urllib_parse.urlencode(f4m_params)
+ f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+ continue
+
+ if src_url.startswith('http') and self._is_valid_url(src, video_id):
+ http_count += 1
+ formats.append({
+ 'url': src_url,
+ 'ext': ext or src_ext or 'flv',
+ 'format_id': 'http-%d' % (bitrate or http_count),
+ 'tbr': bitrate,
+ 'filesize': filesize,
+ 'width': width,
+ 'height': height,
+ })
+ continue
self._sort_formats(formats)
return formats
- def _parse_smil_video(self, video, video_id, base, rtmp_count):
- src = video.get('src')
- if not src:
- return [], rtmp_count
- bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
- width = int_or_none(video.get('width'))
- height = int_or_none(video.get('height'))
- proto = video.get('proto')
- if not proto:
- if base:
- if base.startswith('rtmp'):
- proto = 'rtmp'
- elif base.startswith('http'):
- proto = 'http'
- ext = video.get('ext')
- if proto == 'm3u8':
- return self._extract_m3u8_formats(src, video_id, ext), rtmp_count
- elif proto == 'rtmp':
- rtmp_count += 1
- streamer = video.get('streamer') or base
- return ([{
- 'url': streamer,
- 'play_path': src,
- 'ext': 'flv',
- 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- }], rtmp_count)
- elif proto.startswith('http'):
- return ([{
- 'url': base + src,
- 'ext': ext or 'flv',
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- }], rtmp_count)
+ def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
+ subtitles = {}
+ for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
+ src = textstream.get('src')
+ if not src:
+ continue
+ ext = textstream.get('ext') or determine_ext(src)
+ if not ext:
+ type_ = textstream.get('type')
+ SUBTITLES_TYPES = {
+ 'text/vtt': 'vtt',
+ 'text/srt': 'srt',
+ 'application/smptett+xml': 'tt',
+ }
+ if type_ in SUBTITLES_TYPES:
+ ext = SUBTITLES_TYPES[type_]
+ lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
+ subtitles.setdefault(lang, []).append({
+ 'url': src,
+ 'ext': ext,
+ })
+ return subtitles
+
+ def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
+ xspf = self._download_xml(
+ playlist_url, playlist_id, 'Downloading xpsf playlist',
+ 'Unable to download xspf manifest', fatal=fatal)
+ if xspf is False:
+ return []
+ return self._parse_xspf(xspf, playlist_id)
+
+ def _parse_xspf(self, playlist, playlist_id):
+ NS_MAP = {
+ 'xspf': 'http://xspf.org/ns/0/',
+ 's1': 'http://static.streamone.nl/player/ns/0',
+ }
+
+ entries = []
+ for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
+ title = xpath_text(
+ track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
+ description = xpath_text(
+ track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
+ thumbnail = xpath_text(
+ track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
+ duration = float_or_none(
+ xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
+
+ formats = [{
+ 'url': location.text,
+ 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
+ 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
+ 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
+ } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
+ self._sort_formats(formats)
+
+ entries.append({
+ 'id': playlist_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ })
+ return entries
def _live_title(self, name):
""" Generate the title for a live video """
@@ -1065,6 +1277,12 @@ class InfoExtractor(object):
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
+ def _get_cookies(self, url):
+ """ Return a compat_cookies.SimpleCookie with the cookies for the url """
+ req = compat_urllib_request.Request(url)
+ self._downloader.cookiejar.add_cookie_header(req)
+ return compat_cookies.SimpleCookie(req.get_header('Cookie'))
+
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
@@ -1103,6 +1321,23 @@ class InfoExtractor(object):
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
+ @staticmethod
+ def _merge_subtitle_items(subtitle_list1, subtitle_list2):
+ """ Merge subtitle items for one language. Items with duplicated URLs
+ will be dropped. """
+ list1_urls = set([item['url'] for item in subtitle_list1])
+ ret = list(subtitle_list1)
+ ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
+ return ret
+
+ @classmethod
+ def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
+ """ Merge two subtitle dictionaries, language by language. """
+ ret = dict(subtitle_dict1)
+ for lang in subtitle_dict2:
+ ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
+ return ret
+
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
@@ -1116,7 +1351,7 @@ class InfoExtractor(object):
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
- They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+ They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py
index 3db4db4e4..6f92ae2ed 100644
--- a/youtube_dl/extractor/condenast.py
+++ b/youtube_dl/extractor/condenast.py
@@ -2,7 +2,6 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
from ..compat import (
@@ -12,6 +11,7 @@ from ..compat import (
)
from ..utils import (
orderedSet,
+ remove_end,
)
@@ -24,21 +24,33 @@ class CondeNastIE(InfoExtractor):
# The keys are the supported sites and the values are the name to be shown
# to the user and in the extractor description.
_SITES = {
- 'wired': 'WIRED',
+ 'allure': 'Allure',
+ 'architecturaldigest': 'Architectural Digest',
+ 'arstechnica': 'Ars Technica',
+ 'bonappetit': 'Bon Appétit',
+ 'brides': 'Brides',
+ 'cnevids': 'Condé Nast',
+ 'cntraveler': 'Condé Nast Traveler',
+ 'details': 'Details',
+ 'epicurious': 'Epicurious',
+ 'glamour': 'Glamour',
+ 'golfdigest': 'Golf Digest',
'gq': 'GQ',
+ 'newyorker': 'The New Yorker',
+ 'self': 'SELF',
+ 'teenvogue': 'Teen Vogue',
+ 'vanityfair': 'Vanity Fair',
'vogue': 'Vogue',
- 'glamour': 'Glamour',
+ 'wired': 'WIRED',
'wmagazine': 'W Magazine',
- 'vanityfair': 'Vanity Fair',
- 'cnevids': 'Condé Nast',
}
- _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
+ _VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
- EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
+ EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys())
- _TEST = {
+ _TESTS = [{
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'md5': '1921f713ed48aabd715691f774c451f7',
'info_dict': {
@@ -47,7 +59,16 @@ class CondeNastIE(InfoExtractor):
'title': '3D Printed Speakers Lit With LED',
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
}
- }
+ }, {
+ # JS embed
+ 'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js',
+ 'md5': 'f1a6f9cafb7083bab74a710f65d08999',
+ 'info_dict': {
+ 'id': '55f9cf8b61646d1acf00000c',
+ 'ext': 'mp4',
+ 'title': '3D printed TSA Travel Sentry keys really do open TSA locks',
+ }
+ }]
def _extract_series(self, url, webpage):
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
@@ -86,8 +107,8 @@ class CondeNastIE(InfoExtractor):
info_url = base_info_url + data
info_page = self._download_webpage(info_url, video_id,
'Downloading video info')
- video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
- video_info = json.loads(video_info)
+ video_info = self._search_regex(r'var\s+video\s*=\s*({.+?});', info_page, 'video info')
+ video_info = self._parse_json(video_info, video_id)
formats = [{
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
@@ -111,6 +132,13 @@ class CondeNastIE(InfoExtractor):
url_type = mobj.group('type')
item_id = mobj.group('id')
+ # Convert JS embed to regular embed
+ if url_type == 'embedjs':
+ parsed_url = compat_urlparse.urlparse(url)
+ url = compat_urlparse.urlunparse(parsed_url._replace(
+ path=remove_end(parsed_url.path, '.js').replace('/embedjs/', '/embed/')))
+ url_type = 'embed'
+
self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, item_id)
diff --git a/youtube_dl/extractor/criterion.py b/youtube_dl/extractor/criterion.py
index 4fb178165..dedb810a0 100644
--- a/youtube_dl/extractor/criterion.py
+++ b/youtube_dl/extractor/criterion.py
@@ -27,9 +27,7 @@ class CriterionIE(InfoExtractor):
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
- description = self._html_search_regex(
- r'<meta name="description" content="(.+?)" />',
- webpage, 'video description')
+ description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index d1b6d7366..cecd0c784 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -14,22 +14,74 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_request,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
bytes_to_intlist,
intlist_to_bytes,
+ int_or_none,
+ remove_end,
unified_strdate,
urlencode_postdata,
+ xpath_text,
)
from ..aes import (
aes_cbc_decrypt,
)
-class CrunchyrollIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
+class CrunchyrollBaseIE(InfoExtractor):
_NETRC_MACHINE = 'crunchyroll'
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+ self.report_login()
+ login_url = 'https://www.crunchyroll.com/?a=formhandler'
+ data = urlencode_postdata({
+ 'formname': 'RpcApiUser_Login',
+ 'name': username,
+ 'password': password,
+ })
+ login_request = compat_urllib_request.Request(login_url, data)
+ login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ self._download_webpage(login_request, None, False, 'Wrong login info')
+
+ def _real_initialize(self):
+ self._login()
+
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
+ request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
+ else compat_urllib_request.Request(url_or_request))
+ # Accept-Language must be set explicitly to accept any language to avoid issues
+ # similar to https://github.com/rg3/youtube-dl/issues/6797.
+ # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
+ # should be imposed or not (from what I can see it just takes the first language
+ # ignoring the priority and requires it to correspond the IP). By the way this causes
+ # Crunchyroll to not work in georestriction cases in some browsers that don't place
+ # the locale lang first in header. However allowing any language seems to workaround the issue.
+ request.add_header('Accept-Language', '*')
+ return super(CrunchyrollBaseIE, self)._download_webpage(
+ request, video_id, note, errnote, fatal, tries, timeout, encoding)
+
+ @staticmethod
+ def _add_skip_wall(url):
+ parsed_url = compat_urlparse.urlparse(url)
+ qs = compat_urlparse.parse_qs(parsed_url.query)
+ # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message:
+ # > This content may be inappropriate for some people.
+ # > Are you sure you want to continue?
+ # since it's not disabled by default in crunchyroll account's settings.
+ # See https://github.com/rg3/youtube-dl/issues/7202.
+ qs['skip_wall'] = ['1']
+ return compat_urlparse.urlunparse(
+ parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+
+
+class CrunchyrollIE(CrunchyrollBaseIE):
+ _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
'info_dict': {
@@ -61,10 +113,13 @@ class CrunchyrollIE(InfoExtractor):
# rtmp
'skip_download': True,
},
-
}, {
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
'only_matching': True,
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium available
+ 'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617',
+ 'only_matching': True,
}]
_FORMAT_IDS = {
@@ -74,24 +129,6 @@ class CrunchyrollIE(InfoExtractor):
'1080': ('80', '108'),
}
- def _login(self):
- (username, password) = self._get_login_info()
- if username is None:
- return
- self.report_login()
- login_url = 'https://www.crunchyroll.com/?a=formhandler'
- data = urlencode_postdata({
- 'formname': 'RpcApiUser_Login',
- 'name': username,
- 'password': password,
- })
- login_request = compat_urllib_request.Request(login_url, data)
- login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- self._download_webpage(login_request, None, False, 'Wrong login info')
-
- def _real_initialize(self):
- self._login()
-
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
@@ -234,8 +271,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
else:
webpage_url = 'http://www.' + mobj.group('url')
- webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
- note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
+ webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage')
+ note_m = self._html_search_regex(
+ r'<div class="showmedia-trailer-notice">(.+?)</div>',
+ webpage, 'trailer-notice', default='')
if note_m:
raise ExtractorError(note_m)
@@ -245,15 +284,22 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
if msg.get('type') == 'error':
raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
+ if 'To view this, please log in to verify you are 18 or older.' in webpage:
+ self.raise_login_required()
+
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
video_title = re.sub(r' {2,}', ' ', video_title)
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
if not video_description:
video_description = None
- video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
+ video_upload_date = self._html_search_regex(
+ [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
+ webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
- video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
+ video_uploader = self._html_search_regex(
+ r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage,
+ 'video_uploader', fatal=False)
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = compat_urllib_request.Request(playerdata_url)
@@ -279,13 +325,33 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
stream_info = streamdata.find('./{default}preload/stream_info')
video_url = stream_info.find('./host').text
video_play_path = stream_info.find('./file').text
- formats.append({
+ metadata = stream_info.find('./metadata')
+ format_info = {
+ 'format': video_format,
+ 'format_id': video_format,
+ 'height': int_or_none(xpath_text(metadata, './height')),
+ 'width': int_or_none(xpath_text(metadata, './width')),
+ }
+
+ if '.fplive.net/' in video_url:
+ video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
+ parsed_video_url = compat_urlparse.urlparse(video_url)
+ direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
+ netloc='v.lvlt.crcdn.net',
+ path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_play_path.split(':')[-1])))
+ if self._is_valid_url(direct_video_url, video_id, video_format):
+ format_info.update({
+ 'url': direct_video_url,
+ })
+ formats.append(format_info)
+ continue
+
+ format_info.update({
'url': video_url,
'play_path': video_play_path,
'ext': 'flv',
- 'format': video_format,
- 'format_id': video_format,
})
+ formats.append(format_info)
subtitles = self.extract_subtitles(video_id, webpage)
@@ -301,9 +367,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
}
-class CrunchyrollShowPlaylistIE(InfoExtractor):
+class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
IE_NAME = "crunchyroll:playlist"
- _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
+ _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
@@ -312,12 +378,25 @@ class CrunchyrollShowPlaylistIE(InfoExtractor):
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
},
'playlist_count': 13,
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium available
+ 'url': 'http://www.crunchyroll.com/cosplay-complex-ova',
+ 'info_dict': {
+ 'id': 'cosplay-complex-ova',
+ 'title': 'Cosplay Complex OVA'
+ },
+ 'playlist_count': 3,
+ 'skip': 'Georestricted',
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
+ 'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
+ 'only_matching': True,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
- webpage = self._download_webpage(url, show_id)
+ webpage = self._download_webpage(self._add_skip_wall(url), show_id)
title = self._html_search_regex(
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
webpage, 'title')
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 1a41c0db1..9cd9ff17d 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -13,8 +13,9 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ determine_ext,
int_or_none,
- orderedSet,
+ parse_iso8601,
str_to_int,
unescapeHTML,
)
@@ -28,10 +29,16 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
+ def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
+ request = self._build_request(url)
+ return self._download_webpage_handle(request, *args, **kwargs)
-class DailymotionIE(DailymotionBaseInfoExtractor):
- """Information Extractor for Dailymotion"""
+ def _download_webpage_no_ff(self, url, *args, **kwargs):
+ request = self._build_request(url)
+ return self._download_webpage(request, *args, **kwargs)
+
+class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
@@ -50,10 +57,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
- 'uploader': 'IGN',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
- 'upload_date': '20150306',
+ 'description': 'Several come bundled with the Steam Controller.',
+ 'thumbnail': 're:^https?:.*\.(?:jpg|png)$',
'duration': 74,
+ 'timestamp': 1425657362,
+ 'upload_date': '20150306',
+ 'uploader': 'IGN',
+ 'uploader_id': 'xijv66',
+ 'age_limit': 0,
+ 'view_count': int,
+ 'comment_count': int,
}
},
# Vevo video
@@ -82,46 +96,120 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'uploader': 'HotWaves1012',
'age_limit': 18,
}
+ },
+ # geo-restricted, player v5
+ {
+ 'url': 'http://www.dailymotion.com/video/xhza0o',
+ 'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
- url = 'https://www.dailymotion.com/video/%s' % video_id
- # Retrieve video webpage to extract further information
- request = self._build_request(url)
- webpage = self._download_webpage(request, video_id)
+ webpage = self._download_webpage_no_ff(
+ 'https://www.dailymotion.com/video/%s' % video_id, video_id)
+
+ age_limit = self._rta_search(webpage)
+
+ description = self._og_search_description(webpage) or self._html_search_meta(
+ 'description', webpage, 'description')
- # Extract URL, uploader and title from webpage
- self.report_extraction(video_id)
+ view_count = str_to_int(self._search_regex(
+ [r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
+ r'video_views_count[^>]+>\s+([\d\.,]+)'],
+ webpage, 'view count', fatal=False))
+ comment_count = int_or_none(self._search_regex(
+ r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
+ webpage, 'comment count', fatal=False))
+
+ player_v5 = self._search_regex(
+ [r'buildPlayer\(({.+?})\);', r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);'],
+ webpage, 'player v5', default=None)
+ if player_v5:
+ player = self._parse_json(player_v5, video_id)
+ metadata = player['metadata']
+
+ self._check_error(metadata)
+
+ formats = []
+ for quality, media_list in metadata['qualities'].items():
+ for media in media_list:
+ media_url = media.get('url')
+ if not media_url:
+ continue
+ type_ = media.get('type')
+ if type_ == 'application/vnd.lumberjack.manifest':
+ continue
+ if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', m3u8_id='hls'))
+ else:
+ f = {
+ 'url': media_url,
+ 'format_id': quality,
+ }
+ m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
+ if m:
+ f.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ formats.append(f)
+ self._sort_formats(formats)
+
+ title = metadata['title']
+ duration = int_or_none(metadata.get('duration'))
+ timestamp = int_or_none(metadata.get('created_time'))
+ thumbnail = metadata.get('poster_url')
+ uploader = metadata.get('owner', {}).get('screenname')
+ uploader_id = metadata.get('owner', {}).get('id')
+
+ subtitles = {}
+ for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
+ subtitles[subtitle_lang] = [{
+ 'ext': determine_ext(subtitle_url),
+ 'url': subtitle_url,
+ } for subtitle_url in subtitle.get('urls', [])]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
- # It may just embed a vevo video:
- m_vevo = re.search(
+ # vevo embed
+ vevo_id = self._search_regex(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
- webpage)
- if m_vevo is not None:
- vevo_id = m_vevo.group('id')
- self.to_screen('Vevo video detected: %s' % vevo_id)
- return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
+ webpage, 'vevo embed', default=None)
+ if vevo_id:
+ return self.url_result('vevo:%s' % vevo_id, 'Vevo')
- age_limit = self._rta_search(webpage)
+ # fallback old player
+ embed_page = self._download_webpage_no_ff(
+ 'https://www.dailymotion.com/embed/video/%s' % video_id,
+ video_id, 'Downloading embed page')
- video_upload_date = None
- mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage)
- if mobj is not None:
- video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
-
- embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id
- embed_request = self._build_request(embed_url)
- embed_page = self._download_webpage(
- embed_request, video_id, 'Downloading embed page')
- info = self._search_regex(r'var info = ({.*?}),$', embed_page,
- 'video info', flags=re.MULTILINE)
- info = json.loads(info)
- if info.get('error') is not None:
- msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
- raise ExtractorError(msg, expected=True)
+ timestamp = parse_iso8601(self._html_search_meta(
+ 'video:release_date', webpage, 'upload date'))
+
+ info = self._parse_json(
+ self._search_regex(
+ r'var info = ({.*?}),$', embed_page,
+ 'video info', flags=re.MULTILINE),
+ video_id)
+
+ self._check_error(info)
formats = []
for (key, format_id) in self._FORMATS:
@@ -139,16 +227,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'width': width,
'height': height,
})
- if not formats:
- raise ExtractorError('Unable to extract video URL')
+ self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
- view_count = str_to_int(self._search_regex(
- r'video_views_count[^>]+>\s+([\d\.,]+)',
- webpage, 'view count', fatal=False))
-
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
@@ -159,8 +242,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
- 'upload_date': video_upload_date,
+ 'timestamp': timestamp,
'title': title,
+ 'description': description,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
@@ -168,6 +252,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'duration': info['duration']
}
+ def _check_error(self, info):
+ if info.get('error') is not None:
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True)
+
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
@@ -199,18 +288,26 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
}]
def _extract_entries(self, id):
- video_ids = []
+ video_ids = set()
+ processed_urls = set()
for pagenum in itertools.count(1):
- request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
- webpage = self._download_webpage(request,
- id, 'Downloading page %s' % pagenum)
+ page_url = self._PAGE_TEMPLATE % (id, pagenum)
+ webpage, urlh = self._download_webpage_handle_no_ff(
+ page_url, id, 'Downloading page %s' % pagenum)
+ if urlh.geturl() in processed_urls:
+ self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
+ page_url, urlh.geturl()), id)
+ break
+
+ processed_urls.add(urlh.geturl())
- video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
+ for video_id in re.findall(r'data-xid="(.+?)"', webpage):
+ if video_id not in video_ids:
+ yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
+ video_ids.add(video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
- return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
- for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -227,7 +324,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
- _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$'
+ _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
@@ -236,6 +333,17 @@ class DailymotionUserIE(DailymotionPlaylistIE):
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
+ }, {
+ 'url': 'http://www.dailymotion.com/user/UnderProject',
+ 'info_dict': {
+ 'id': 'UnderProject',
+ 'title': 'UnderProject',
+ },
+ 'playlist_mincount': 1800,
+ 'expected_warnings': [
+ 'Stopped at duplicated page',
+ ],
+ 'skip': 'Takes too long time',
}]
def _real_extract(self, url):
@@ -286,8 +394,7 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- request = self._build_request(url)
- webpage = self._download_webpage(request, video_id)
+ webpage = self._download_webpage_no_ff(url, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py
new file mode 100644
index 000000000..6f2fea5ff
--- /dev/null
+++ b/youtube_dl/extractor/dcn.py
@@ -0,0 +1,84 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class DCNIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887',
+ 'info_dict':
+ {
+ 'id': '17375',
+ 'ext': 'mp4',
+ 'title': 'رحلة العمر : الحلقة 1',
+ 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 2041,
+ 'timestamp': 1227504126,
+ 'upload_date': '20081124',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ request = compat_urllib_request.Request(
+ 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
+ headers={'Origin': 'http://www.dcndigital.ae'})
+
+ video = self._download_json(request, video_id)
+ title = video.get('title_en') or video['title_ar']
+
+ webpage = self._download_webpage(
+ 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
+ compat_urllib_parse.urlencode({
+ 'id': video['id'],
+ 'user_id': video['user_id'],
+ 'signature': video['signature'],
+ 'countries': 'Q0M=',
+ 'filter': 'DENY',
+ }), video_id)
+
+ m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url')
+ formats = self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
+
+ rtsp_url = self._search_regex(
+ r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
+ if rtsp_url:
+ formats.append({
+ 'url': rtsp_url,
+ 'format_id': 'rtsp',
+ })
+
+ self._sort_formats(formats)
+
+ img = video.get('img')
+ thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
+ duration = int_or_none(video.get('duration'))
+ description = video.get('description_en') or video.get('description_ar')
+ timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/dhm.py b/youtube_dl/extractor/dhm.py
index 3ed1f1663..44e0c5d4d 100644
--- a/youtube_dl/extractor/dhm.py
+++ b/youtube_dl/extractor/dhm.py
@@ -1,10 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- xpath_text,
- parse_duration,
-)
+from ..utils import parse_duration
class DHMIE(InfoExtractor):
@@ -34,24 +31,14 @@ class DHMIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ playlist_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
- playlist = self._download_xml(playlist_url, video_id)
-
- track = playlist.find(
- './{http://xspf.org/ns/0/}trackList/{http://xspf.org/ns/0/}track')
-
- video_url = xpath_text(
- track, './{http://xspf.org/ns/0/}location',
- 'video url', fatal=True)
- thumbnail = xpath_text(
- track, './{http://xspf.org/ns/0/}image',
- 'thumbnail')
+ entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> &raquo;([^<]+)</title>'],
@@ -63,11 +50,10 @@ class DHMIE(InfoExtractor):
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
- return {
- 'id': video_id,
- 'url': video_url,
+ entries[0].update({
'title': title,
'description': description,
'duration': duration,
- 'thumbnail': thumbnail,
- }
+ })
+
+ return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/divxstage.py b/youtube_dl/extractor/divxstage.py
deleted file mode 100644
index b88379e06..000000000
--- a/youtube_dl/extractor/divxstage.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class DivxStageIE(NovaMovIE):
- IE_NAME = 'divxstage'
- IE_DESC = 'DivxStage'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
-
- _HOST = 'www.divxstage.eu'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<div class="video_det">\s*<strong>([^<]+)</strong>'
- _DESCRIPTION_REGEX = r'<div class="video_det">\s*<strong>[^<]+</strong>\s*<p>([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.divxstage.eu/video/57f238e2e5e01',
- 'md5': '63969f6eb26533a1968c4d325be63e72',
- 'info_dict': {
- 'id': '57f238e2e5e01',
- 'ext': 'flv',
- 'title': 'youtubedl test video',
- 'description': 'This is a test video for youtubedl.',
- }
- }
diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py
index 999fb5620..1f00386fe 100644
--- a/youtube_dl/extractor/dumpert.py
+++ b/youtube_dl/extractor/dumpert.py
@@ -9,8 +9,8 @@ from ..utils import qualities
class DumpertIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/mediabase/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
+ _TESTS = [{
'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
'info_dict': {
@@ -20,11 +20,15 @@ class DumpertIE(InfoExtractor):
'description': 'Niet schrikken hoor',
'thumbnail': 're:^https?://.*\.jpg$',
}
- }
+ }, {
+ 'url': 'http://www.dumpert.nl/embed/6675421/dc440fe7/',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
+ url = 'https://www.dumpert.nl/mediabase/' + video_id
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'nsfw=1; cpc=10')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/eagleplatform.py b/youtube_dl/extractor/eagleplatform.py
index 688dfc2f7..e529b9b96 100644
--- a/youtube_dl/extractor/eagleplatform.py
+++ b/youtube_dl/extractor/eagleplatform.py
@@ -21,7 +21,7 @@ class EaglePlatformIE(InfoExtractor):
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
- 'md5': '0b7994faa2bd5c0f69a3db6db28d078d',
+ 'md5': '70f5187fb620f2c1d503b3b22fd4efe3',
'info_dict': {
'id': '227304',
'ext': 'mp4',
@@ -36,7 +36,7 @@ class EaglePlatformIE(InfoExtractor):
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
- 'md5': '6c2ebeab03b739597ce8d86339d5a905',
+ 'md5': '90b26344ba442c8e44aa4cf8f301164a',
'info_dict': {
'id': '12820',
'ext': 'mp4',
@@ -48,7 +48,8 @@ class EaglePlatformIE(InfoExtractor):
'skip': 'Georestricted',
}]
- def _handle_error(self, response):
+ @staticmethod
+ def _handle_error(response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
@@ -58,6 +59,9 @@ class EaglePlatformIE(InfoExtractor):
self._handle_error(response)
return response
+ def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'):
+ return self._download_json(url_or_request, video_id, note)['data'][0]
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
@@ -69,7 +73,7 @@ class EaglePlatformIE(InfoExtractor):
title = media['title']
description = media.get('description')
- thumbnail = media.get('snapshot')
+ thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
@@ -78,13 +82,20 @@ class EaglePlatformIE(InfoExtractor):
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
- m3u8_data = self._download_json(
- media['sources']['secure_m3u8']['auto'],
- video_id, 'Downloading m3u8 JSON')
+ secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:')
+ m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
- m3u8_data['data'][0], video_id,
+ m3u8_url, video_id,
'mp4', entry_protocol='m3u8_native')
+
+ mp4_url = self._get_video_url(
+ # Secure mp4 URL is constructed according to Player.prototype.mp4 from
+ # http://lentaru.media.eagleplatform.com/player/player.js
+ re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4', secure_m3u8),
+ video_id, 'Downloading mp4 JSON')
+ formats.append({'url': mp4_url, 'format_id': 'mp4'})
+
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/engadget.py b/youtube_dl/extractor/engadget.py
index 4ea37ebd9..e4180701d 100644
--- a/youtube_dl/extractor/engadget.py
+++ b/youtube_dl/extractor/engadget.py
@@ -10,7 +10,7 @@ from ..utils import (
class EngadgetIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://www.engadget.com/
- (?:video/5min/(?P<id>\d+)|
+ (?:video(?:/5min)?/(?P<id>\d+)|
[\d/]+/.*?)
'''
diff --git a/youtube_dl/extractor/eroprofile.py b/youtube_dl/extractor/eroprofile.py
index 316033cf1..7fcd0151d 100644
--- a/youtube_dl/extractor/eroprofile.py
+++ b/youtube_dl/extractor/eroprofile.py
@@ -71,8 +71,7 @@ class EroProfileIE(InfoExtractor):
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
- raise ExtractorError(
- 'This video requires login. Please specify a username and password and try again.', expected=True)
+ self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
diff --git a/youtube_dl/extractor/esri.py b/youtube_dl/extractor/esri.py
new file mode 100644
index 000000000..bf5d2019f
--- /dev/null
+++ b/youtube_dl/extractor/esri.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ int_or_none,
+ parse_filesize,
+ unified_strdate,
+)
+
+
+class EsriVideoIE(InfoExtractor):
+ _VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
+ 'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
+ 'info_dict': {
+ 'id': '1124',
+ 'ext': 'mp4',
+ 'title': 'ArcGIS Online - Developing Applications',
+ 'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 185,
+ 'upload_date': '20120419',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ formats = []
+ for width, height, content in re.findall(
+ r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
+ for video_url, ext, filesize in re.findall(
+ r'<a[^>]+href="([^"]+)">([^<]+)&nbsp;\(([^<]+)\)</a>', content):
+ formats.append({
+ 'url': compat_urlparse.urljoin(url, video_url),
+ 'ext': ext.lower(),
+ 'format_id': '%s-%s' % (ext.lower(), height),
+ 'width': int(width),
+ 'height': int(height),
+ 'filesize_approx': parse_filesize(filesize),
+ })
+ self._sort_formats(formats)
+
+ title = self._html_search_meta('title', webpage, 'title')
+ description = self._html_search_meta(
+ 'description', webpage, 'description', fatal=False)
+
+ thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
+ if thumbnail:
+ thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
+
+ duration = int_or_none(self._search_regex(
+ [r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
+ webpage, 'duration', fatal=False))
+
+ upload_date = unified_strdate(self._html_search_meta(
+ 'last-modified', webpage, 'upload date', fatal=None))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'upload_date': upload_date,
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/europa.py b/youtube_dl/extractor/europa.py
new file mode 100644
index 000000000..adc43919e
--- /dev/null
+++ b/youtube_dl/extractor/europa.py
@@ -0,0 +1,93 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ int_or_none,
+ orderedSet,
+ parse_duration,
+ qualities,
+ unified_strdate,
+ xpath_text
+)
+
+
+class EuropaIE(InfoExtractor):
+ _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
+ _TESTS = [{
+ 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
+ 'md5': '574f080699ddd1e19a675b0ddf010371',
+ 'info_dict': {
+ 'id': 'I107758',
+ 'ext': 'mp4',
+ 'title': 'TRADE - Wikileaks on TTIP',
+ 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'upload_date': '20150811',
+ 'duration': 34,
+ 'view_count': int,
+ 'formats': 'mincount:3',
+ }
+ }, {
+ 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ playlist = self._download_xml(
+ 'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
+
+ def get_item(type_, preference):
+ items = {}
+ for item in playlist.findall('./info/%s/item' % type_):
+ lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
+ if lang and label:
+ items[lang] = label.strip()
+ for p in preference:
+ if items.get(p):
+ return items[p]
+
+ query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ preferred_lang = query.get('sitelang', ('en', ))[0]
+
+ preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
+
+ title = get_item('title', preferred_langs) or video_id
+ description = get_item('description', preferred_langs)
+ thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
+ upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
+ duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
+ view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
+
+ language_preference = qualities(preferred_langs[::-1])
+
+ formats = []
+ for file_ in playlist.findall('./files/file'):
+ video_url = xpath_text(file_, './url')
+ if not video_url:
+ continue
+ lang = xpath_text(file_, './lg')
+ formats.append({
+ 'url': video_url,
+ 'format_id': lang,
+ 'format_note': xpath_text(file_, './lglabel'),
+ 'language_preference': language_preference(lang)
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnmail,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/expotv.py b/youtube_dl/extractor/expotv.py
index a38b773e8..1585a03bb 100644
--- a/youtube_dl/extractor/expotv.py
+++ b/youtube_dl/extractor/expotv.py
@@ -33,20 +33,27 @@ class ExpoTVIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
player_key = self._search_regex(
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
- config_url = 'http://client.expotv.com/video/config/%s/%s' % (
- video_id, player_key)
config = self._download_json(
- config_url, video_id,
- note='Downloading video configuration')
+ 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key),
+ video_id, 'Downloading video configuration')
- formats = [{
- 'url': fcfg['file'],
- 'height': int_or_none(fcfg.get('height')),
- 'format_note': fcfg.get('label'),
- 'ext': self._search_regex(
- r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
- 'file extension', default=None),
- } for fcfg in config['sources']]
+ formats = []
+ for fcfg in config['sources']:
+ media_url = fcfg.get('file')
+ if not media_url:
+ continue
+ if fcfg.get('type') == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
+ else:
+ formats.append({
+ 'url': media_url,
+ 'height': int_or_none(fcfg.get('height')),
+ 'format_id': fcfg.get('label'),
+ 'ext': self._search_regex(
+ r'filename=.*\.([a-z0-9_A-Z]+)&', media_url,
+ 'file extension', default=None) or fcfg.get('type'),
+ })
self._sort_formats(formats)
title = self._og_search_title(webpage)
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index e17bb9aea..178a7ca4c 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -17,6 +17,8 @@ from ..utils import (
int_or_none,
limit_length,
urlencode_postdata,
+ get_element_by_id,
+ clean_html,
)
@@ -42,6 +44,7 @@ class FacebookIE(InfoExtractor):
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
+ 'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
@@ -50,6 +53,7 @@ class FacebookIE(InfoExtractor):
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
+ 'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
@@ -161,6 +165,7 @@ class FacebookIE(InfoExtractor):
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
+ uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
return {
'id': video_id,
@@ -168,4 +173,5 @@ class FacebookIE(InfoExtractor):
'formats': formats,
'duration': int_or_none(video_data.get('video_duration')),
'thumbnail': video_data.get('thumbnail_src'),
+ 'uploader': uploader,
}
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 1ccc1a964..a406945e8 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -10,12 +10,13 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
)
class FC2IE(InfoExtractor):
- _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)'
+ _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)*content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_NETRC_MACHINE = 'fc2'
_TESTS = [{
@@ -37,6 +38,9 @@ class FC2IE(InfoExtractor):
'password': '(snip)',
'skip': 'requires actual password'
}
+ }, {
+ 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
+ 'only_matching': True,
}]
def _login(self):
@@ -52,10 +56,7 @@ class FC2IE(InfoExtractor):
'Submit': ' Login ',
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
@@ -80,13 +81,13 @@ class FC2IE(InfoExtractor):
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
- refer = url.replace('/content/', '/a/content/')
+ refer = url.replace('/content/', '/a/content/') if '/a/content/' not in url else url
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
- format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
+ format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
diff --git a/youtube_dl/extractor/fczenit.py b/youtube_dl/extractor/fczenit.py
new file mode 100644
index 000000000..f1f150ef2
--- /dev/null
+++ b/youtube_dl/extractor/fczenit.py
@@ -0,0 +1,41 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class FczenitIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/gl(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://fc-zenit.ru/video/gl6785/',
+ 'md5': '458bacc24549173fe5a5aa29174a5606',
+ 'info_dict': {
+ 'id': '6785',
+ 'ext': 'mp4',
+ 'title': '«Зенит-ТВ»: как Олег Шатов играл против «Урала»',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ video_title = self._html_search_regex(r'<div class=\"photoalbum__title\">([^<]+)', webpage, 'title')
+
+ bitrates_raw = self._html_search_regex(r'bitrates:.*\n(.*)\]', webpage, 'video URL')
+ bitrates = re.findall(r'url:.?\'(.+?)\'.*?bitrate:.?([0-9]{3}?)', bitrates_raw)
+
+ formats = [{
+ 'url': furl,
+ 'tbr': tbr,
+ } for furl, tbr in bitrates]
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': video_title,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py
index 157094e8c..2955965d9 100644
--- a/youtube_dl/extractor/fivemin.py
+++ b/youtube_dl/extractor/fivemin.py
@@ -2,11 +2,15 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_str,
compat_urllib_parse,
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
+ parse_duration,
+ replace_extension,
)
@@ -28,6 +32,7 @@ class FiveMinIE(InfoExtractor):
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
+ 'duration': 177,
},
},
{
@@ -38,9 +43,52 @@ class FiveMinIE(InfoExtractor):
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
+ 'duration': 184,
},
},
]
+ _ERRORS = {
+ 'ErrorVideoNotExist': 'We\'re sorry, but the video you are trying to watch does not exist.',
+ 'ErrorVideoNoLongerAvailable': 'We\'re sorry, but the video you are trying to watch is no longer available.',
+ 'ErrorVideoRejected': 'We\'re sorry, but the video you are trying to watch has been removed.',
+ 'ErrorVideoUserNotGeo': 'We\'re sorry, but the video you are trying to watch cannot be viewed from your current location.',
+ 'ErrorVideoLibraryRestriction': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
+ 'ErrorExposurePermission': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
+ }
+ _QUALITIES = {
+ 1: {
+ 'width': 640,
+ 'height': 360,
+ },
+ 2: {
+ 'width': 854,
+ 'height': 480,
+ },
+ 4: {
+ 'width': 1280,
+ 'height': 720,
+ },
+ 8: {
+ 'width': 1920,
+ 'height': 1080,
+ },
+ 16: {
+ 'width': 640,
+ 'height': 360,
+ },
+ 32: {
+ 'width': 854,
+ 'height': 480,
+ },
+ 64: {
+ 'width': 1280,
+ 'height': 720,
+ },
+ 128: {
+ 'width': 640,
+ 'height': 360,
+ },
+ }
def _real_extract(self, url):
video_id = self._match_id(url)
@@ -59,26 +107,36 @@ class FiveMinIE(InfoExtractor):
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
video_id)
if not response['success']:
- err_msg = response['errorMessage']
- if err_msg == 'ErrorVideoUserNotGeo':
- msg = 'Video not available from your location'
- else:
- msg = 'Aol said: %s' % err_msg
- raise ExtractorError(msg, expected=True, video_id=video_id)
+ raise ExtractorError(
+ '%s said: %s' % (
+ self.IE_NAME,
+ self._ERRORS.get(response['errorMessage'], response['errorMessage'])),
+ expected=True)
info = response['binding'][0]
- second_id = compat_str(int(video_id[:-2]) + 1)
formats = []
- for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]:
- if any(r['ID'] == quality for r in info['Renditions']):
+ parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs(
+ compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0])
+ for rendition in info['Renditions']:
+ if rendition['RenditionType'] == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(rendition['Url'], video_id, m3u8_id='hls'))
+ elif rendition['RenditionType'] == 'aac':
+ continue
+ else:
+ rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType'])))
+ quality = self._QUALITIES.get(rendition['ID'], {})
formats.append({
- 'format_id': compat_str(quality),
- 'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality),
- 'height': height,
+ 'format_id': '%s-%d' % (rendition['RenditionType'], rendition['ID']),
+ 'url': rendition_url,
+ 'width': quality.get('width'),
+ 'height': quality.get('height'),
})
+ self._sort_formats(formats)
return {
'id': video_id,
'title': info['Title'],
+ 'thumbnail': info.get('ThumbURL'),
+ 'duration': parse_duration(info.get('Duration')),
'formats': formats,
}
diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py
index 190d9f9ad..40ea27895 100644
--- a/youtube_dl/extractor/fktv.py
+++ b/youtube_dl/extractor/fktv.py
@@ -1,13 +1,12 @@
from __future__ import unicode_literals
import re
-import random
-import json
from .common import InfoExtractor
from ..utils import (
- get_element_by_id,
clean_html,
+ determine_ext,
+ ExtractorError,
)
@@ -17,66 +16,40 @@ class FKTVIE(InfoExtractor):
_TEST = {
'url': 'http://fernsehkritik.tv/folge-1',
+ 'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79',
'info_dict': {
- 'id': '00011',
- 'ext': 'flv',
+ 'id': '1',
+ 'ext': 'mp4',
'title': 'Folge 1 vom 10. April 2007',
- 'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
+ 'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
- episode = int(self._match_id(url))
-
- video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%s.jpg' % episode
- start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%s/Start' % episode,
- episode)
- playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
- 'playlist', flags=re.DOTALL)
- files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
-
- videos = []
- for i, _ in enumerate(files, 1):
- video_id = '%04d%d' % (episode, i)
- video_url = 'http://fernsehkritik.tv/js/directme.php?file=%s%s.flv' % (episode, '' if i == 1 else '-%d' % i)
- videos.append({
- 'ext': 'flv',
- 'id': video_id,
- 'url': video_url,
- 'title': clean_html(get_element_by_id('eptitle', start_webpage)),
- 'description': clean_html(get_element_by_id('contentlist', start_webpage)),
- 'thumbnail': video_thumbnail
- })
- return {
- '_type': 'multi_video',
- 'entries': videos,
- 'id': 'folge-%s' % episode,
- }
-
-
-class FKTVPosteckeIE(InfoExtractor):
- IE_NAME = 'fernsehkritik.tv:postecke'
- _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
- _TEST = {
- 'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
- 'md5': '262f0adbac80317412f7e57b4808e5c4',
- 'info_dict': {
- 'id': '0120',
- 'ext': 'flv',
- 'title': 'Postecke 120',
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- episode = int(mobj.group('ep'))
-
- server = random.randint(2, 4)
- video_id = '%04d' % episode
- video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
- video_title = 'Postecke %d' % episode
+ episode = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
+ title = clean_html(self._html_search_regex(
+ '<h3>([^<]+)</h3>', webpage, 'title'))
+ matches = re.search(
+ r'(?s)<video(?:(?!poster)[^>])+(?:poster="([^"]+)")?[^>]*>(.*)</video>',
+ webpage)
+ if matches is None:
+ raise ExtractorError('Unable to extract the video')
+
+ poster, sources = matches.groups()
+ if poster is None:
+ self.report_warning('unable to extract thumbnail')
+
+ urls = re.findall(r'<source[^>]+src="([^"]+)"', sources)
+ formats = [{
+ 'url': furl,
+ 'format_id': determine_ext(furl),
+ } for furl in urls]
return {
- 'id': video_id,
- 'url': video_url,
- 'title': video_title,
+ 'id': episode,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': poster,
}
diff --git a/youtube_dl/extractor/folketinget.py b/youtube_dl/extractor/folketinget.py
index 0fb29de75..75399fa7d 100644
--- a/youtube_dl/extractor/folketinget.py
+++ b/youtube_dl/extractor/folketinget.py
@@ -30,6 +30,10 @@ class FolketingetIE(InfoExtractor):
'upload_date': '20141120',
'duration': 3960,
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py
index b2284ab01..fb6d108c0 100644
--- a/youtube_dl/extractor/fourtube.py
+++ b/youtube_dl/extractor/fourtube.py
@@ -32,6 +32,7 @@ class FourTubeIE(InfoExtractor):
'view_count': int,
'like_count': int,
'categories': list,
+ 'age_limit': 18,
}
}
@@ -45,10 +46,10 @@ class FourTubeIE(InfoExtractor):
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
uploader_id = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
- webpage, 'uploader id')
+ webpage, 'uploader id', fatal=False)
uploader = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
- webpage, 'uploader')
+ webpage, 'uploader', fatal=False)
categories_html = self._search_regex(
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
@@ -67,13 +68,24 @@ class FourTubeIE(InfoExtractor):
webpage, 'like count', fatal=False))
duration = parse_duration(self._html_search_meta('duration', webpage))
- params_js = self._search_regex(
- r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
- webpage, 'initialization parameters'
- )
- params = self._parse_json('[%s]' % params_js, video_id)
- media_id = params[0]
- sources = ['%s' % p for p in params[2]]
+ media_id = self._search_regex(
+ r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage,
+ 'media id', default=None, group='id')
+ sources = [
+ quality
+ for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)]
+ if not (media_id and sources):
+ player_js = self._download_webpage(
+ self._search_regex(
+ r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2',
+ webpage, 'player JS', group='url'),
+ video_id, 'Downloading player JS')
+ params_js = self._search_regex(
+ r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
+ player_js, 'initialization parameters')
+ params = self._parse_json('[%s]' % params_js, video_id)
+ media_id = params[0]
+ sources = ['%s' % p for p in params[2]]
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
media_id, '+'.join(sources))
diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py
index 917f76b1e..3a4a59135 100644
--- a/youtube_dl/extractor/foxnews.py
+++ b/youtube_dl/extractor/foxnews.py
@@ -1,5 +1,7 @@
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
@@ -8,7 +10,8 @@ from ..utils import (
class FoxNewsIE(InfoExtractor):
- _VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
+ IE_DESC = 'Fox News and Fox Business Video'
+ _VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
_TESTS = [
{
'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
@@ -42,13 +45,19 @@ class FoxNewsIE(InfoExtractor):
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
'only_matching': True,
},
+ {
+ 'url': 'http://video.foxbusiness.com/v/4442309889001',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ host = mobj.group('host')
video = self._download_json(
- 'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id)
+ 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id)
item = video['channel']['item']
title = item['title']
diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index 75723c00d..129984a5f 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -78,9 +78,14 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
})
self._sort_formats(formats)
+ title = info['titre']
+ subtitle = info.get('sous_titre')
+ if subtitle:
+ title += ' - %s' % subtitle
+
return {
'id': video_id,
- 'title': info['titre'],
+ 'title': title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
@@ -214,15 +219,15 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
},
# france5
{
- 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
- 'md5': '78f0f4064f9074438e660785bbf2c5d9',
+ 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1',
+ 'md5': 'f6c577df3806e26471b3d21631241fd0',
'info_dict': {
- 'id': '108961659',
+ 'id': '123327454',
'ext': 'flv',
- 'title': 'C à dire ?!',
- 'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
- 'upload_date': '20140915',
- 'timestamp': 1410795000,
+ 'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?',
+ 'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4',
+ 'upload_date': '20150831',
+ 'timestamp': 1441035120,
},
},
# franceo
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py
index dd87257c4..f5f13689c 100644
--- a/youtube_dl/extractor/funnyordie.py
+++ b/youtube_dl/extractor/funnyordie.py
@@ -53,7 +53,7 @@ class FunnyOrDieIE(InfoExtractor):
for bitrate in bitrates:
for link in links:
formats.append({
- 'url': '%s%d.%s' % (link[0], bitrate, link[1]),
+ 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py
index 43f916412..a6834db43 100644
--- a/youtube_dl/extractor/gdcvault.py
+++ b/youtube_dl/extractor/gdcvault.py
@@ -7,7 +7,10 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
-from ..utils import remove_end
+from ..utils import (
+ remove_end,
+ HEADRequest,
+)
class GDCVaultIE(InfoExtractor):
@@ -73,10 +76,20 @@ class GDCVaultIE(InfoExtractor):
return video_formats
def _parse_flv(self, xml_description):
- video_formats = []
+ formats = []
akamai_url = xml_description.find('./metadata/akamaiHost').text
+ audios = xml_description.find('./metadata/audios')
+ if audios is not None:
+ for audio in audios:
+ formats.append({
+ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
+ 'play_path': remove_end(audio.get('url'), '.flv'),
+ 'ext': 'flv',
+ 'vcodec': 'none',
+ 'format_id': audio.get('code'),
+ })
slide_video_path = xml_description.find('./metadata/slideVideo').text
- video_formats.append({
+ formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(slide_video_path, '.flv'),
'ext': 'flv',
@@ -86,7 +99,7 @@ class GDCVaultIE(InfoExtractor):
'format_id': 'slides',
})
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
- video_formats.append({
+ formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(speaker_video_path, '.flv'),
'ext': 'flv',
@@ -95,7 +108,7 @@ class GDCVaultIE(InfoExtractor):
'preference': -1,
'format_id': 'speaker',
})
- return video_formats
+ return formats
def _login(self, webpage_url, display_id):
(username, password) = self._get_login_info()
@@ -133,16 +146,18 @@ class GDCVaultIE(InfoExtractor):
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
- video_url = 'http://www.gdcvault.com/' + direct_url
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
+ video_url = 'http://www.gdcvault.com' + direct_url
+ # resolve the url so that we can detect the correct extension
+ head = self._request_webpage(HEADRequest(video_url), video_id)
+ video_url = head.geturl()
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
- 'ext': 'flv',
'title': title,
}
@@ -168,8 +183,8 @@ class GDCVaultIE(InfoExtractor):
# Fallback to the older format
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
- xml_decription_url = xml_root + 'xml/' + xml_name
- xml_description = self._download_xml(xml_decription_url, display_id)
+ xml_description_url = xml_root + 'xml/' + xml_name
+ xml_description = self._download_xml(xml_description_url, display_id)
video_title = xml_description.find('./metadata/title').text
video_formats = self._parse_mp4(xml_description)
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index cd133a10c..ca5fbafb2 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import os
import re
+import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
@@ -48,6 +49,8 @@ from .vimeo import VimeoIE
from .dailymotion import DailymotionCloudIE
from .onionstudios import OnionStudiosIE
from .snagfilms import SnagFilmsEmbedIE
+from .screenwavemedia import ScreenwaveMediaIE
+from .mtv import MTVServicesEmbeddedIE
class GenericIE(InfoExtractor):
@@ -130,6 +133,89 @@ class GenericIE(InfoExtractor):
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
+ # SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
+ {
+ 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
+ 'info_dict': {
+ 'id': 'smil',
+ 'ext': 'mp4',
+ 'title': 'Automatics, robotics and biocybernetics',
+ 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
+ 'formats': 'mincount:16',
+ 'subtitles': 'mincount:1',
+ },
+ 'params': {
+ 'force_generic_extractor': True,
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
+ {
+ 'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
+ 'info_dict': {
+ 'id': 'hds',
+ 'ext': 'flv',
+ 'title': 'hds',
+ 'formats': 'mincount:1',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from https://www.restudy.dk/video/play/id/1637
+ {
+ 'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
+ 'info_dict': {
+ 'id': 'video_1637',
+ 'ext': 'flv',
+ 'title': 'video_1637',
+ 'formats': 'mincount:3',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
+ {
+ 'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
+ 'info_dict': {
+ 'id': 'smil-service',
+ 'ext': 'flv',
+ 'title': 'smil-service',
+ 'formats': 'mincount:1',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
+ {
+ 'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
+ 'info_dict': {
+ 'id': '4719370',
+ 'ext': 'mp4',
+ 'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
+ 'formats': 'mincount:3',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
+ {
+ 'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
+ 'info_dict': {
+ 'id': 'mZlp2ctYIUEB',
+ 'ext': 'mp4',
+ 'title': 'Tikibad ontruimd wegens brand',
+ 'description': 'md5:05ca046ff47b931f9b04855015e163a4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 33,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
@@ -147,6 +233,22 @@ class GenericIE(InfoExtractor):
}
},
{
+ # redirect in Refresh HTTP header
+ 'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
+ 'info_dict': {
+ 'id': 'pO8h3EaFRdo',
+ 'ext': 'mp4',
+ 'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
+ 'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
+ 'upload_date': '20150917',
+ 'uploader_id': 'brtvofficial',
+ 'uploader': 'Boiler Room',
+ },
+ 'params': {
+ 'skip_download': False,
+ },
+ },
+ {
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
@@ -236,6 +338,19 @@ class GenericIE(InfoExtractor):
},
'add_ie': ['Ooyala'],
},
+ {
+ # ooyala video embedded with http://player.ooyala.com/iframe.js
+ 'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
+ 'info_dict': {
+ 'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
+ 'ext': 'mp4',
+ 'title': '"Steve Jobs: Man in the Machine" trailer',
+ 'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
# multiple ooyala embeds on SBN network websites
{
'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
@@ -276,14 +391,6 @@ class GenericIE(InfoExtractor):
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
},
- # BBC iPlayer embeds
- {
- 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/posts/BUGGER',
- 'info_dict': {
- 'title': 'BBC - Blogs - Adam Curtis - BUGGER',
- },
- 'playlist_mincount': 18,
- },
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
@@ -913,6 +1020,16 @@ class GenericIE(InfoExtractor):
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
+ },
+ # ScreenwaveMedia embed
+ {
+ 'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1',
+ 'md5': '24ace5baba0d35d55c6810b51f34e9e0',
+ 'info_dict': {
+ 'id': 'cinemasnob-55d26273809dd',
+ 'ext': 'mp4',
+ 'title': 'cinemasnob',
+ },
}
]
@@ -1118,11 +1235,15 @@ class GenericIE(InfoExtractor):
self.report_extraction(video_id)
- # Is it an RSS feed?
+ # Is it an RSS feed, a SMIL file or a XSPF playlist?
try:
doc = parse_xml(webpage)
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
+ elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
+ return self._parse_smil(doc, url, video_id)
+ elif doc.tag == '{http://xspf.org/ns/0/}playlist':
+ return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
except compat_xml_parse_error:
pass
@@ -1328,7 +1449,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
- mobj = (re.search(r'player\.ooyala\.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
+ mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
@@ -1491,12 +1612,9 @@ class GenericIE(InfoExtractor):
return self.url_result(url, ie='Vulture')
# Look for embedded mtvservices player
- mobj = re.search(
- r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"',
- webpage)
- if mobj is not None:
- url = unescapeHTML(mobj.group('url'))
- return self.url_result(url, ie='MTVServicesEmbedded')
+ mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
+ if mtvservices_url:
+ return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
@@ -1535,7 +1653,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
- r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
+ r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
@@ -1626,6 +1744,11 @@ class GenericIE(InfoExtractor):
if snagfilms_url:
return self.url_result(snagfilms_url)
+ # Look for ScreenwaveMedia embeds
+ mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
+ if mobj is not None:
+ return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
+
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
@@ -1663,7 +1786,7 @@ class GenericIE(InfoExtractor):
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
- r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
+ r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
@@ -1689,7 +1812,7 @@ class GenericIE(InfoExtractor):
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
- found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
+ found = re.findall(r'(?s)<(?:video|audio)[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
@@ -1700,9 +1823,12 @@ class GenericIE(InfoExtractor):
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
+ # In python 2 response HTTP headers are bytestrings
+ if sys.version_info < (3, 0) and isinstance(refresh_header, str):
+ refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
- new_url = compat_urlparse.urljoin(url, found.group(1))
+ new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
@@ -1724,7 +1850,8 @@ class GenericIE(InfoExtractor):
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
- if determine_ext(video_url) == 'smil':
+ ext = determine_ext(video_url)
+ if ext == 'smil':
entries.append({
'id': video_id,
'formats': self._extract_smil_formats(video_url, video_id),
@@ -1732,6 +1859,8 @@ class GenericIE(InfoExtractor):
'title': video_title,
'age_limit': age_limit,
})
+ elif ext == 'xspf':
+ return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
else:
entries.append({
'id': video_id,
diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 8a95793ca..33d6432a6 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -13,6 +13,7 @@ from ..compat import (
from ..utils import (
ExtractorError,
float_or_none,
+ int_or_none,
)
@@ -359,13 +360,8 @@ class GloboIE(InfoExtractor):
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
title = video['title']
- duration = float_or_none(video['duration'], 1000)
- like_count = video['likes']
- uploader = video['channel']
- uploader_id = video['channel_id']
formats = []
-
for resource in video['resources']:
resource_id = resource.get('_id')
if not resource_id:
@@ -407,6 +403,11 @@ class GloboIE(InfoExtractor):
self._sort_formats(formats)
+ duration = float_or_none(video.get('duration'), 1000)
+ like_count = int_or_none(video.get('likes'))
+ uploader = video.get('channel')
+ uploader_id = video.get('channel_id')
+
return {
'id': video_id,
'title': title,
diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/gorillavid.py
index f006f0cb1..d23e3eac1 100644
--- a/youtube_dl/extractor/gorillavid.py
+++ b/youtube_dl/extractor/gorillavid.py
@@ -10,15 +10,16 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ encode_dict,
int_or_none,
)
class GorillaVidIE(InfoExtractor):
- IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net'
+ IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
- (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net))/
+ (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
@@ -67,13 +68,22 @@ class GorillaVidIE(InfoExtractor):
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
+ }, {
+ 'url': 'http://filehoot.com/3ivfabn7573c.html',
+ 'info_dict': {
+ 'id': '3ivfabn7573c',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
+ 'thumbnail': 're:http://.*\.jpg',
+ }
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
+ url = 'http://%s/%s' % (mobj.group('host'), video_id)
+ webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
@@ -87,7 +97,7 @@ class GorillaVidIE(InfoExtractor):
if countdown:
self._sleep(countdown, video_id)
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse.urlencode(encode_dict(fields))
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
@@ -95,7 +105,7 @@ class GorillaVidIE(InfoExtractor):
webpage = self._download_webpage(req, video_id, 'Downloading video page')
title = self._search_regex(
- [r'style="z-index: [0-9]+;">([^<]+)</span>', r'>Watch (.+) '],
+ [r'style="z-index: [0-9]+;">([^<]+)</span>', r'<td nowrap>([^<]+)</td>', r'>Watch (.+) '],
webpage, 'title', default=None) or self._og_search_title(webpage)
video_url = self._search_regex(
r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
diff --git a/youtube_dl/extractor/hostingbulk.py b/youtube_dl/extractor/hostingbulk.py
deleted file mode 100644
index a3154cfde..000000000
--- a/youtube_dl/extractor/hostingbulk.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
-from ..utils import (
- ExtractorError,
- int_or_none,
- urlencode_postdata,
-)
-
-
-class HostingBulkIE(InfoExtractor):
- _VALID_URL = r'''(?x)
- https?://(?:www\.)?hostingbulk\.com/
- (?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
- _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
- _TEST = {
- 'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
- 'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
- 'info_dict': {
- 'id': 'n0ulw1hv20fm',
- 'ext': 'mp4',
- 'title': 'md5:5afeba33f48ec87219c269e054afd622',
- 'filesize': 6816081,
- 'thumbnail': 're:^http://.*\.jpg$',
- }
- }
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
- url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
-
- # Custom request with cookie to set language to English, so our file
- # deleted regex would work.
- request = compat_urllib_request.Request(
- url, headers={'Cookie': 'lang=english'})
- webpage = self._download_webpage(request, video_id)
-
- if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
- raise ExtractorError('Video %s does not exist' % video_id,
- expected=True)
-
- title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
- filesize = int_or_none(
- self._search_regex(
- r'<small>\((\d+)\sbytes?\)</small>',
- webpage,
- 'filesize',
- fatal=False
- )
- )
- thumbnail = self._search_regex(
- r'<img src="([^"]+)".+?class="pic"',
- webpage, 'thumbnail', fatal=False)
-
- fields = self._hidden_inputs(webpage)
-
- request = compat_urllib_request.Request(url, urlencode_postdata(fields))
- request.add_header('Content-type', 'application/x-www-form-urlencoded')
- response = self._request_webpage(request, video_id,
- 'Submiting download request')
- video_url = response.geturl()
-
- formats = [{
- 'format_id': 'sd',
- 'filesize': filesize,
- 'url': video_url,
- }]
-
- return {
- 'id': video_id,
- 'title': title,
- 'thumbnail': thumbnail,
- 'formats': formats,
- }
diff --git a/youtube_dl/extractor/iconosquare.py b/youtube_dl/extractor/iconosquare.py
index 70e4c0d41..a39f422e9 100644
--- a/youtube_dl/extractor/iconosquare.py
+++ b/youtube_dl/extractor/iconosquare.py
@@ -1,7 +1,11 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ get_element_by_id,
+ remove_end,
+)
class IconosquareIE(InfoExtractor):
@@ -12,7 +16,7 @@ class IconosquareIE(InfoExtractor):
'info_dict': {
'id': '522207370455279102_24101272',
'ext': 'mp4',
- 'title': 'Instagram media by @aguynamedpatrick (Patrick Janelle)',
+ 'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)',
'description': 'md5:644406a9ec27457ed7aa7a9ebcd4ce3d',
'timestamp': 1376471991,
'upload_date': '20130814',
@@ -29,8 +33,7 @@ class IconosquareIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
media = self._parse_json(
- self._search_regex(
- r'window\.media\s*=\s*({.+?});\n', webpage, 'media'),
+ get_element_by_id('mediaJson', webpage),
video_id)
formats = [{
@@ -41,9 +44,7 @@ class IconosquareIE(InfoExtractor):
} for format_id, f in media['videos'].items()]
self._sort_formats(formats)
- title = self._html_search_regex(
- r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
- webpage, 'title')
+ title = remove_end(self._og_search_title(webpage), ' - via Iconosquare')
timestamp = int_or_none(media.get('created_time') or media.get('caption', {}).get('created_time'))
description = media.get('caption', {}).get('text')
@@ -61,6 +62,14 @@ class IconosquareIE(InfoExtractor):
'height': int_or_none(t.get('height'))
} for thumbnail_id, t in media.get('images', {}).items()]
+ comments = [{
+ 'id': comment.get('id'),
+ 'text': comment['text'],
+ 'timestamp': int_or_none(comment.get('created_time')),
+ 'author': comment.get('from', {}).get('full_name'),
+ 'author_id': comment.get('from', {}).get('username'),
+ } for comment in media.get('comments', {}).get('data', []) if 'text' in comment]
+
return {
'id': video_id,
'title': title,
@@ -72,4 +81,5 @@ class IconosquareIE(InfoExtractor):
'comment_count': comment_count,
'like_count': like_count,
'formats': formats,
+ 'comments': comments,
}
diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py
index d692ea79a..70c8ca64e 100644
--- a/youtube_dl/extractor/imgur.py
+++ b/youtube_dl/extractor/imgur.py
@@ -13,7 +13,7 @@ from ..utils import (
class ImgurIE(InfoExtractor):
- _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)'
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!gallery)(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
@@ -97,3 +97,28 @@ class ImgurIE(InfoExtractor):
'description': self._og_search_description(webpage),
'title': self._og_search_title(webpage),
}
+
+
+class ImgurAlbumIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/gallery/(?P<id>[a-zA-Z0-9]+)'
+
+ _TEST = {
+ 'url': 'http://imgur.com/gallery/Q95ko',
+ 'info_dict': {
+ 'id': 'Q95ko',
+ },
+ 'playlist_count': 25,
+ }
+
+ def _real_extract(self, url):
+ album_id = self._match_id(url)
+
+ album_images = self._download_json(
+ 'http://imgur.com/gallery/%s/album_images/hit.json?all=true' % album_id,
+ album_id)['data']['images']
+
+ entries = [
+ self.url_result('http://imgur.com/%s' % image['hash'])
+ for image in album_images if image.get('hash')]
+
+ return self.playlist_result(entries, album_id)
diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
new file mode 100644
index 000000000..12fb5e8e1
--- /dev/null
+++ b/youtube_dl/extractor/indavideo.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+ parse_iso8601,
+)
+
+
+class IndavideoEmbedIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)'
+ _TESTS = [{
+ 'url': 'http://indavideo.hu/player/video/1bdc3c6d80/',
+ 'md5': 'f79b009c66194acacd40712a6778acfa',
+ 'info_dict': {
+ 'id': '1837039',
+ 'ext': 'mp4',
+ 'title': 'Cicatánc',
+ 'description': '',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'cukiajanlo',
+ 'uploader_id': '83729',
+ 'timestamp': 1439193826,
+ 'upload_date': '20150810',
+ 'duration': 72,
+ 'age_limit': 0,
+ 'tags': ['tánc', 'cica', 'cuki', 'cukiajanlo', 'newsroom'],
+ },
+ }, {
+ 'url': 'http://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://assets.indavideo.hu/swf/player.swf?v=fe25e500&vID=1bdc3c6d80&autostart=1&hide=1&i=1',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ video = self._download_json(
+ 'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
+ video_id)['data']
+
+ title = video['title']
+
+ video_urls = video.get('video_files', [])
+ video_file = video.get('video_file')
+ if video:
+ video_urls.append(video_file)
+ video_urls = list(set(video_urls))
+
+ video_prefix = video_urls[0].rsplit('/', 1)[0]
+
+ for flv_file in video.get('flv_files', []):
+ flv_url = '%s/%s' % (video_prefix, flv_file)
+ if flv_url not in video_urls:
+ video_urls.append(flv_url)
+
+ formats = [{
+ 'url': video_url,
+ 'height': self._search_regex(r'\.(\d{3,4})\.mp4$', video_url, 'height', default=None),
+ } for video_url in video_urls]
+ self._sort_formats(formats)
+
+ timestamp = video.get('date')
+ if timestamp:
+ # upload date is in CEST
+ timestamp = parse_iso8601(timestamp + ' +0200', ' ')
+
+ thumbnails = [{
+ 'url': self._proto_relative_url(thumbnail)
+ } for thumbnail in video.get('thumbnails', [])]
+
+ tags = [tag['title'] for tag in video.get('tags', [])]
+
+ return {
+ 'id': video.get('id') or video_id,
+ 'title': title,
+ 'description': video.get('description'),
+ 'thumbnails': thumbnails,
+ 'uploader': video.get('user_name'),
+ 'uploader_id': video.get('user_id'),
+ 'timestamp': timestamp,
+ 'duration': int_or_none(video.get('length')),
+ 'age_limit': parse_age_limit(video.get('age_limit')),
+ 'tags': tags,
+ 'formats': formats,
+ }
+
+
+class IndavideoIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'
+ _TESTS = [{
+ 'url': 'http://indavideo.hu/video/Vicces_cica_1',
+ 'md5': '8c82244ba85d2a2310275b318eb51eac',
+ 'info_dict': {
+ 'id': '1335611',
+ 'display_id': 'Vicces_cica_1',
+ 'ext': 'mp4',
+ 'title': 'Vicces cica',
+ 'description': 'Játszik a tablettel. :D',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Jet_Pack',
+ 'uploader_id': '491217',
+ 'timestamp': 1390821212,
+ 'upload_date': '20140127',
+ 'duration': 7,
+ 'age_limit': 0,
+ 'tags': ['vicces', 'macska', 'cica', 'ügyes', 'nevetés', 'játszik', 'Cukiság', 'Jet_Pack'],
+ },
+ }, {
+ 'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://erotika.indavideo.hu/video/Amator_tini_punci',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://film.indavideo.hu/video/f_hrom_nagymamm_volt',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+ embed_url = self._search_regex(
+ r'<link[^>]+rel="video_src"[^>]+href="(.+?)"', webpage, 'embed url')
+
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'IndavideoEmbed',
+ 'url': embed_url,
+ 'display_id': display_id,
+ }
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index afb7f4e61..0e53cb154 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -95,6 +95,10 @@ class IqiyiIE(InfoExtractor):
('10', 'h1'),
]
+ @staticmethod
+ def md5_text(text):
+ return hashlib.md5(text.encode('utf-8')).hexdigest()
+
def construct_video_urls(self, data, video_id, _uuid):
def do_xor(x, y):
a = y % 3
@@ -121,7 +125,7 @@ class IqiyiIE(InfoExtractor):
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
- return hashlib.md5((t + mg + x).encode('utf8')).hexdigest()
+ return self.md5_text(t + mg + x)
video_urls_dict = {}
for format_item in data['vp']['tkl'][0]['vs']:
@@ -179,20 +183,19 @@ class IqiyiIE(InfoExtractor):
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
tm = str(int(time.time()))
+ tail = tm + tvid
param = {
'key': 'fvip',
- 'src': hashlib.md5(b'youtube-dl').hexdigest(),
+ 'src': self.md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
- 'enc': hashlib.md5(
- (enc_key + tm + tvid).encode('utf8')).hexdigest(),
+ 'enc': self.md5_text(enc_key + tail),
'qyid': _uuid,
'tn': random.random(),
'um': 0,
- 'authkey': hashlib.md5(
- (tm + tvid).encode('utf8')).hexdigest()
+ 'authkey': self.md5_text(self.md5_text('') + tail),
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
@@ -201,7 +204,10 @@ class IqiyiIE(InfoExtractor):
return raw_data
def get_enc_key(self, swf_url, video_id):
- enc_key = '8e29ab5666d041c3a1ea76e06dabdffb'
+ # TODO: automatic key extraction
+ # last update at 2015-10-10 for Zombie::bite
+ # '7239670519b6ac209a0bee4ef0446a6b24894b8ac2751506e42116212a0d0272e505'[2:66][1::2]
+ enc_key = '97596c0abee04ab49ba25564161ad225'
return enc_key
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/ir90tv.py b/youtube_dl/extractor/ir90tv.py
new file mode 100644
index 000000000..214bcd5b5
--- /dev/null
+++ b/youtube_dl/extractor/ir90tv.py
@@ -0,0 +1,42 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import remove_start
+
+
+class Ir90TvIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?90tv\.ir/video/(?P<id>[0-9]+)/.*'
+ _TESTS = [{
+ 'url': 'http://90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218',
+ 'md5': '411dbd94891381960cb9e13daa47a869',
+ 'info_dict': {
+ 'id': '95719',
+ 'ext': 'mp4',
+ 'title': 'شایعات نقل و انتقالات مهم فوتبال اروپا 94/02/18',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://www.90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = remove_start(self._html_search_regex(
+ r'<title>([^<]+)</title>', webpage, 'title'), '90tv.ir :: ')
+
+ video_url = self._search_regex(
+ r'<source[^>]+src="([^"]+)"', webpage, 'video url')
+
+ thumbnail = self._search_regex(r'poster="([^"]+)"', webpage, 'thumbnail url', fatal=False)
+
+ return {
+ 'url': video_url,
+ 'id': video_id,
+ 'title': title,
+ 'video_url': video_url,
+ 'thumbnail': thumbnail,
+ }
diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py
index 1df084d87..eef7daa29 100644
--- a/youtube_dl/extractor/jeuxvideo.py
+++ b/youtube_dl/extractor/jeuxvideo.py
@@ -28,7 +28,7 @@ class JeuxVideoIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
- title = self._html_search_meta('name', webpage)
+ title = self._html_search_meta('name', webpage) or self._og_search_title(webpage)
config_url = self._html_search_regex(
r'data-src="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index d28730492..3dca0e566 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -13,12 +13,24 @@ from ..utils import (
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
- (?:kaltura:|
- https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_
- )(?P<partner_id>\d+)
- (?::|
- /(?:[^/]+/)*?entry_id/
- )(?P<id>[0-9a-z_]+)'''
+ (?:
+ kaltura:(?P<partner_id_s>\d+):(?P<id_s>[0-9a-z_]+)|
+ https?://
+ (:?(?:www|cdnapisec)\.)?kaltura\.com/
+ (?:
+ (?:
+ # flash player
+ index\.php/kwidget/
+ (?:[^/]+/)*?wid/_(?P<partner_id>\d+)/
+ (?:[^/]+/)*?entry_id/(?P<id>[0-9a-z_]+)|
+ # html5 player
+ html5/html5lib/
+ (?:[^/]+/)*?entry_id/(?P<id_html5>[0-9a-z_]+)
+ .*\?.*\bwid=_(?P<partner_id_html5>\d+)
+ )
+ )
+ )
+ '''
_API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?'
_TESTS = [
{
@@ -43,6 +55,10 @@ class KalturaIE(InfoExtractor):
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
+ {
+ 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
+ 'only_matching': True,
+ }
]
def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
@@ -105,9 +121,9 @@ class KalturaIE(InfoExtractor):
video_id, actions, note='Downloading video info JSON')
def _real_extract(self, url):
- video_id = self._match_id(url)
mobj = re.match(self._VALID_URL, url)
- partner_id, entry_id = mobj.group('partner_id'), mobj.group('id')
+ partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5')
+ entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5')
info, source_data = self._get_video_info(entry_id, partner_id)
@@ -126,7 +142,7 @@ class KalturaIE(InfoExtractor):
self._sort_formats(formats)
return {
- 'id': video_id,
+ 'id': entry_id,
'title': info['name'],
'formats': formats,
'description': info.get('description'),
diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py
index c0956ba09..94a03d277 100644
--- a/youtube_dl/extractor/keek.py
+++ b/youtube_dl/extractor/keek.py
@@ -1,46 +1,39 @@
+# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
+ _VALID_URL = r'https?://(?:www\.)?keek\.com/keek/(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
- 'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
- 'md5': '09c5c109067536c1cec8bac8c21fea05',
+ 'url': 'https://www.keek.com/keek/NODfbab',
+ 'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
- 'uploader': 'youtube-dl project',
- 'uploader_id': 'ytdl',
- 'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
+ 'title': 'md5:35d42050a3ece241d5ddd7fdcc6fd896',
+ 'uploader': 'ytdl',
+ 'uploader_id': 'eGT5bab',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
- video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
- thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
- raw_desc = self._html_search_meta('description', webpage)
- if raw_desc:
- uploader = self._html_search_regex(
- r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
- uploader_id = self._html_search_regex(
- r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
- else:
- uploader = None
- uploader_id = None
-
return {
'id': video_id,
- 'url': video_url,
+ 'url': self._og_search_video_url(webpage),
'ext': 'mp4',
- 'title': self._og_search_title(webpage),
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
+ 'title': self._og_search_description(webpage).strip(),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'uploader': self._search_regex(
+ r'data-username=(["\'])(?P<uploader>.+?)\1', webpage,
+ 'uploader', fatal=False, group='uploader'),
+ 'uploader_id': self._search_regex(
+ r'data-user-id=(["\'])(?P<uploader_id>.+?)\1', webpage,
+ 'uploader id', fatal=False, group='uploader_id'),
}
diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py
index 720bc939b..a59c529f4 100644
--- a/youtube_dl/extractor/kontrtube.py
+++ b/youtube_dl/extractor/kontrtube.py
@@ -4,7 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ parse_duration,
+)
class KontrTubeIE(InfoExtractor):
@@ -34,33 +37,28 @@ class KontrTubeIE(InfoExtractor):
webpage = self._download_webpage(
url, display_id, 'Downloading page')
- video_url = self._html_search_regex(
+ video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
- thumbnail = self._html_search_regex(
- r"preview_url\s*:\s*'(.+?)/?',", webpage, 'video thumbnail', fatal=False)
+ thumbnail = self._search_regex(
+ r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
- r'<title>(.+?)</title>', webpage, 'video title')
+ r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
- 'description', webpage, 'video description')
+ 'description', webpage, 'description')
- mobj = re.search(
- r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
- webpage)
- duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+ duration = self._search_regex(
+ r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
+ if duration:
+ duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
- view_count = self._html_search_regex(
- r'<div class="col_2">Просмотров: <span>(\d+)</span></div>',
+ view_count = self._search_regex(
+ r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
+ if view_count:
+ view_count = int_or_none(view_count.replace(' ', ''))
- comment_count = None
- comment_str = self._html_search_regex(
- r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count', fatal=False)
- if comment_str.startswith('комментариев нет'):
- comment_count = 0
- else:
- mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str)
- if mobj:
- comment_count = mobj.group('total')
+ comment_count = int_or_none(self._search_regex(
+ r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
diff --git a/youtube_dl/extractor/krasview.py b/youtube_dl/extractor/krasview.py
index 96f95979a..0ae8ebd68 100644
--- a/youtube_dl/extractor/krasview.py
+++ b/youtube_dl/extractor/krasview.py
@@ -25,6 +25,9 @@ class KrasViewIE(InfoExtractor):
'duration': 27,
'thumbnail': 're:^https?://.*\.jpg',
},
+ 'params': {
+ 'skip_download': 'Not accessible from Travis CI server',
+ },
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/kuwo.py b/youtube_dl/extractor/kuwo.py
index 1077846f2..0c8ed5d07 100644
--- a/youtube_dl/extractor/kuwo.py
+++ b/youtube_dl/extractor/kuwo.py
@@ -57,6 +57,7 @@ class KuwoIE(KuwoBaseIE):
'upload_date': '20080122',
'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c'
},
+ 'skip': 'this song has been offline because of copyright issues',
}, {
'url': 'http://www.kuwo.cn/yinyue/6446136/',
'info_dict': {
@@ -76,9 +77,11 @@ class KuwoIE(KuwoBaseIE):
webpage = self._download_webpage(
url, song_id, note='Download song detail info',
errnote='Unable to get song detail info')
+ if '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage:
+ raise ExtractorError('this song has been offline because of copyright issues', expected=True)
song_name = self._html_search_regex(
- r'<h1[^>]+title="([^"]+)">', webpage, 'song name')
+ r'(?s)class="(?:[^"\s]+\s+)*title(?:\s+[^"\s]+)*".*?<h1[^>]+title="([^"]+)"', webpage, 'song name')
singer_name = self._html_search_regex(
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
webpage, 'singer name', fatal=False)
@@ -202,6 +205,7 @@ class KuwoSingerIE(InfoExtractor):
'title': 'Ali',
},
'playlist_mincount': 95,
+ 'skip': 'Regularly stalls travis build', # See https://travis-ci.org/rg3/youtube-dl/jobs/78878540
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/lecture2go.py b/youtube_dl/extractor/lecture2go.py
new file mode 100644
index 000000000..40a3d2346
--- /dev/null
+++ b/youtube_dl/extractor/lecture2go.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ parse_duration,
+ int_or_none,
+)
+
+
+class Lecture2GoIE(InfoExtractor):
+ _VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473',
+ 'md5': 'ac02b570883020d208d405d5a3fd2f7f',
+ 'info_dict': {
+ 'id': '17473',
+ 'ext': 'flv',
+ 'title': '2 - Endliche Automaten und reguläre Sprachen',
+ 'creator': 'Frank Heitmann',
+ 'duration': 5220,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title')
+
+ formats = []
+ for url in set(re.findall(r'"src","([^"]+)"', webpage)):
+ ext = determine_ext(url)
+ if ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(url, video_id))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(url, video_id))
+ else:
+ formats.append({
+ 'url': url,
+ })
+
+ self._sort_formats(formats)
+
+ creator = self._html_search_regex(
+ r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False)
+ duration = parse_duration(self._html_search_regex(
+ r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False))
+ view_count = int_or_none(self._html_search_regex(
+ r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'creator': creator,
+ 'duration': duration,
+ 'view_count': view_count,
+ }
diff --git a/youtube_dl/extractor/letv.py b/youtube_dl/extractor/letv.py
index ba2ae8085..a28abb0f0 100644
--- a/youtube_dl/extractor/letv.py
+++ b/youtube_dl/extractor/letv.py
@@ -15,6 +15,7 @@ from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
+ int_or_none,
)
@@ -134,7 +135,7 @@ class LetvIE(InfoExtractor):
}
if format_id[-1:] == 'p':
- url_info_dict['height'] = format_id[:-1]
+ url_info_dict['height'] = int_or_none(format_id[:-1])
urls.append(url_info_dict)
diff --git a/youtube_dl/extractor/libsyn.py b/youtube_dl/extractor/libsyn.py
index 9ab1416f5..d375695f5 100644
--- a/youtube_dl/extractor/libsyn.py
+++ b/youtube_dl/extractor/libsyn.py
@@ -8,9 +8,9 @@ from ..utils import unified_strdate
class LibsynIE(InfoExtractor):
- _VALID_URL = r'https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+)'
+ _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
- _TEST = {
+ _TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
'md5': '443360ee1b58007bc3dcf09b41d093bb',
'info_dict': {
@@ -19,12 +19,24 @@ class LibsynIE(InfoExtractor):
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
+ 'thumbnail': 're:^https?://.*',
},
- }
+ }, {
+ 'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
+ 'md5': '6c5cb21acd622d754d3b1a92b582ce42',
+ 'info_dict': {
+ 'id': '3727166',
+ 'ext': 'mp3',
+ 'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
+ 'upload_date': '20150818',
+ 'thumbnail': 're:^https?://.*',
+ }
+ }]
def _real_extract(self, url):
- video_id = self._match_id(url)
-
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+ url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
formats = [{
@@ -32,20 +44,18 @@ class LibsynIE(InfoExtractor):
} for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
podcast_title = self._search_regex(
- r'<h2>([^<]+)</h2>', webpage, 'title')
+ r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
episode_title = self._search_regex(
- r'<h3>([^<]+)</h3>', webpage, 'title', default=None)
+ r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<div id="info_text_body">(.+?)</div>', webpage,
- 'description', fatal=False)
-
+ 'description', default=None)
thumbnail = self._search_regex(
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
-
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py
new file mode 100644
index 000000000..fb03dd527
--- /dev/null
+++ b/youtube_dl/extractor/limelight.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ float_or_none,
+ int_or_none,
+)
+
+
+class LimelightBaseIE(InfoExtractor):
+ _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s'
+ _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json'
+
+ def _call_playlist_service(self, item_id, method, fatal=True):
+ return self._download_json(
+ self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method),
+ item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal)
+
+ def _call_api(self, organization_id, item_id, method):
+ return self._download_json(
+ self._API_URL % (organization_id, self._API_PATH, item_id, method),
+ item_id, 'Downloading API %s JSON' % method)
+
+ def _extract(self, item_id, pc_method, mobile_method, meta_method):
+ pc = self._call_playlist_service(item_id, pc_method)
+ metadata = self._call_api(pc['orgId'], item_id, meta_method)
+ mobile = self._call_playlist_service(item_id, mobile_method, fatal=False)
+ return pc, mobile, metadata
+
+ def _extract_info(self, streams, mobile_urls, properties):
+ video_id = properties['media_id']
+ formats = []
+
+ for stream in streams:
+ stream_url = stream.get('url')
+ if not stream_url:
+ continue
+ if '.f4m' in stream_url:
+ formats.extend(self._extract_f4m_formats(stream_url, video_id))
+ else:
+ fmt = {
+ 'url': stream_url,
+ 'abr': float_or_none(stream.get('audioBitRate')),
+ 'vbr': float_or_none(stream.get('videoBitRate')),
+ 'fps': float_or_none(stream.get('videoFrameRate')),
+ 'width': int_or_none(stream.get('videoWidthInPixels')),
+ 'height': int_or_none(stream.get('videoHeightInPixels')),
+ 'ext': determine_ext(stream_url)
+ }
+ rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', stream_url)
+ if rtmp:
+ format_id = 'rtmp'
+ if stream.get('videoBitRate'):
+ format_id += '-%d' % int_or_none(stream['videoBitRate'])
+ fmt.update({
+ 'url': rtmp.group('url'),
+ 'play_path': rtmp.group('playpath'),
+ 'app': rtmp.group('app'),
+ 'ext': 'flv',
+ 'format_id': format_id,
+ })
+ formats.append(fmt)
+
+ for mobile_url in mobile_urls:
+ media_url = mobile_url.get('mobileUrl')
+ if not media_url:
+ continue
+ format_id = mobile_url.get('targetMediaPlatform')
+ if determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ preference=-1, m3u8_id=format_id))
+ else:
+ formats.append({
+ 'url': media_url,
+ 'format_id': format_id,
+ 'preference': -1,
+ })
+
+ self._sort_formats(formats)
+
+ title = properties['title']
+ description = properties.get('description')
+ timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date'))
+ duration = float_or_none(properties.get('duration_in_milliseconds'), 1000)
+ filesize = int_or_none(properties.get('total_storage_in_bytes'))
+ categories = [properties.get('category')]
+ tags = properties.get('tags', [])
+ thumbnails = [{
+ 'url': thumbnail['url'],
+ 'width': int_or_none(thumbnail.get('width')),
+ 'height': int_or_none(thumbnail.get('height')),
+ } for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')]
+
+ subtitles = {}
+ for caption in properties.get('captions', {}):
+ lang = caption.get('language_code')
+ subtitles_url = caption.get('url')
+ if lang and subtitles_url:
+ subtitles[lang] = [{
+ 'url': subtitles_url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'formats': formats,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'filesize': filesize,
+ 'categories': categories,
+ 'tags': tags,
+ 'thumbnails': thumbnails,
+ 'subtitles': subtitles,
+ }
+
+
+class LimelightMediaIE(LimelightBaseIE):
+ IE_NAME = 'limelight'
+ _VALID_URL = r'(?:limelight:media:|http://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})'
+ _TESTS = [{
+ 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
+ 'info_dict': {
+ 'id': '3ffd040b522b4485b6d84effc750cd86',
+ 'ext': 'flv',
+ 'title': 'HaP and the HB Prince Trailer',
+ 'description': 'md5:8005b944181778e313d95c1237ddb640',
+ 'thumbnail': 're:^https?://.*\.jpeg$',
+ 'duration': 144.23,
+ 'timestamp': 1244136834,
+ 'upload_date': '20090604',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # video with subtitles
+ 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335',
+ 'info_dict': {
+ 'id': 'a3e00274d4564ec4a9b29b9466432335',
+ 'ext': 'flv',
+ 'title': '3Play Media Overview Video',
+ 'description': '',
+ 'thumbnail': 're:^https?://.*\.jpeg$',
+ 'duration': 78.101,
+ 'timestamp': 1338929955,
+ 'upload_date': '20120605',
+ 'subtitles': 'mincount:9',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }]
+ _PLAYLIST_SERVICE_PATH = 'media'
+ _API_PATH = 'media'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ pc, mobile, metadata = self._extract(
+ video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties')
+
+ return self._extract_info(
+ pc['playlistItems'][0].get('streams', []),
+ mobile['mediaList'][0].get('mobileUrls', []) if mobile else [],
+ metadata)
+
+
+class LimelightChannelIE(LimelightBaseIE):
+ IE_NAME = 'limelight:channel'
+ _VALID_URL = r'(?:limelight:channel:|http://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})'
+ _TEST = {
+ 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
+ 'info_dict': {
+ 'id': 'ab6a524c379342f9b23642917020c082',
+ 'title': 'Javascript Sample Code',
+ },
+ 'playlist_mincount': 3,
+ }
+ _PLAYLIST_SERVICE_PATH = 'channel'
+ _API_PATH = 'channels'
+
+ def _real_extract(self, url):
+ channel_id = self._match_id(url)
+
+ pc, mobile, medias = self._extract(
+ channel_id, 'getPlaylistByChannelId',
+ 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media')
+
+ entries = [
+ self._extract_info(
+ pc['playlistItems'][i].get('streams', []),
+ mobile['mediaList'][i].get('mobileUrls', []) if mobile else [],
+ medias['media_list'][i])
+ for i in range(len(medias['media_list']))]
+
+ return self.playlist_result(entries, channel_id, pc['title'])
+
+
+class LimelightChannelListIE(LimelightBaseIE):
+ IE_NAME = 'limelight:channel_list'
+ _VALID_URL = r'(?:limelight:channel_list:|http://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})'
+ _TEST = {
+ 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
+ 'info_dict': {
+ 'id': '301b117890c4465c8179ede21fd92e2b',
+ 'title': 'Website - Hero Player',
+ },
+ 'playlist_mincount': 2,
+ }
+ _PLAYLIST_SERVICE_PATH = 'channel_list'
+
+ def _real_extract(self, url):
+ channel_list_id = self._match_id(url)
+
+ channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById')
+
+ entries = [
+ self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel')
+ for channel in channel_list['channelList']]
+
+ return self.playlist_result(entries, channel_list_id, channel_list['title'])
diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py
index a00f6e5e5..5c973e75c 100644
--- a/youtube_dl/extractor/lynda.py
+++ b/youtube_dl/extractor/lynda.py
@@ -11,13 +11,13 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ clean_html,
int_or_none,
)
class LyndaBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
- _SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true'
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_NETRC_MACHINE = 'lynda'
@@ -41,7 +41,7 @@ class LyndaBaseIE(InfoExtractor):
request, None, 'Logging in as %s' % username)
# Not (yet) logged in
- m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
+ m = re.search(r'loginResultJson\s*=\s*\'(?P<json>[^\']+)\';', login_page)
if m is not None:
response = m.group('json')
response_json = json.loads(response)
@@ -70,7 +70,16 @@ class LyndaBaseIE(InfoExtractor):
request, None,
'Confirming log in and log out from another device')
- if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
+ if all(not re.search(p, login_page) for p in ('isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
+ if 'login error' in login_page:
+ mobj = re.search(
+ r'(?s)<h1[^>]+class="topmost">(?P<title>[^<]+)</h1>\s*<div>(?P<description>.+?)</div>',
+ login_page)
+ if mobj:
+ raise ExtractorError(
+ 'lynda returned error: %s - %s'
+ % (mobj.group('title'), clean_html(mobj.group('description'))),
+ expected=True)
raise ExtractorError('Unable to log in')
@@ -109,9 +118,7 @@ class LyndaIE(LyndaBaseIE):
'lynda returned error: %s' % video_json['Message'], expected=True)
if video_json['HasAccess'] is False:
- raise ExtractorError(
- 'Video %s is only available for members. '
- % video_id + self._ACCOUNT_CREDENTIALS_HINT, expected=True)
+ self.raise_login_required('Video %s is only available for members' % video_id)
video_id = compat_str(video_json['ID'])
duration = video_json['DurationInSeconds']
@@ -133,13 +140,14 @@ class LyndaIE(LyndaBaseIE):
prioritized_streams = video_json.get('PrioritizedStreams')
if prioritized_streams:
- formats.extend([
- {
- 'url': video_url,
- 'width': int_or_none(format_id),
- 'format_id': format_id,
- } for format_id, video_url in prioritized_streams['0'].items()
- ])
+ for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
+ formats.extend([
+ {
+ 'url': video_url,
+ 'width': int_or_none(format_id),
+ 'format_id': '%s-%s' % (prioritized_stream_id, format_id),
+ } for format_id, video_url in prioritized_stream.items()
+ ])
self._check_formats(formats, video_id)
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/mailru.py b/youtube_dl/extractor/mailru.py
index 54a14cb94..ab1300185 100644
--- a/youtube_dl/extractor/mailru.py
+++ b/youtube_dl/extractor/mailru.py
@@ -25,6 +25,7 @@ class MailRuIE(InfoExtractor):
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
},
+ 'skip': 'Not accessible from Travis CI server',
},
{
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
@@ -39,6 +40,7 @@ class MailRuIE(InfoExtractor):
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
+ 'skip': 'Not accessible from Travis CI server',
},
]
diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py
index 5fdd19027..fc7499958 100644
--- a/youtube_dl/extractor/mdr.py
+++ b/youtube_dl/extractor/mdr.py
@@ -29,7 +29,7 @@ class MDRIE(InfoExtractor):
doc = self._download_xml(domain + xmlurl, video_id)
formats = []
for a in doc.findall('./assets/asset'):
- url_el = a.find('.//progressiveDownloadUrl')
+ url_el = a.find('./progressiveDownloadUrl')
if url_el is None:
continue
abr = int(a.find('bitrateAudio').text) // 1000
diff --git a/youtube_dl/extractor/megavideoz.py b/youtube_dl/extractor/megavideoz.py
deleted file mode 100644
index af7ff07ea..000000000
--- a/youtube_dl/extractor/megavideoz.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- float_or_none,
- xpath_text,
-)
-
-
-class MegaVideozIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?megavideoz\.eu/video/(?P<id>[^/]+)(?:/(?P<display_id>[^/]+))?'
- _TEST = {
- 'url': 'http://megavideoz.eu/video/WM6UB919XMXH/SMPTE-Universal-Film-Leader',
- 'info_dict': {
- 'id': '48723',
- 'display_id': 'SMPTE-Universal-Film-Leader',
- 'ext': 'mp4',
- 'title': 'SMPTE Universal Film Leader',
- 'thumbnail': 're:https?://.*?\.jpg',
- 'duration': 10.93,
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- display_id = mobj.group('display_id') or video_id
-
- webpage = self._download_webpage(url, display_id)
-
- if any(p in webpage for p in ('>Video Not Found<', '>404 Error<')):
- raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-
- config = self._download_xml(
- self._search_regex(
- r"var\s+cnf\s*=\s*'([^']+)'", webpage, 'cnf url'),
- display_id)
-
- video_url = xpath_text(config, './file', 'video url', fatal=True)
- title = xpath_text(config, './title', 'title', fatal=True)
- thumbnail = xpath_text(config, './image', 'thumbnail')
- duration = float_or_none(xpath_text(config, './duration', 'duration'))
- video_id = xpath_text(config, './mediaid', 'video id') or video_id
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'url': video_url,
- 'title': title,
- 'thumbnail': thumbnail,
- 'duration': duration
- }
diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py
index d7ab6a9ae..f088ab9e2 100644
--- a/youtube_dl/extractor/mit.py
+++ b/youtube_dl/extractor/mit.py
@@ -18,12 +18,12 @@ class TechTVMITIE(InfoExtractor):
_TEST = {
'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set',
- 'md5': '1f8cb3e170d41fd74add04d3c9330e5f',
+ 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7',
'info_dict': {
'id': '25418',
'ext': 'mp4',
- 'title': 'MIT DNA Learning Center Set',
- 'description': 'md5:82313335e8a8a3f243351ba55bc1b474',
+ 'title': 'MIT DNA and Protein Sets',
+ 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d',
},
}
@@ -33,8 +33,8 @@ class TechTVMITIE(InfoExtractor):
'http://techtv.mit.edu/videos/%s' % video_id, video_id)
clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page)
- base_url = self._search_regex(
- r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url')
+ base_url = self._proto_relative_url(self._search_regex(
+ r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:')
formats_json = self._search_regex(
r'bitrates: (\[.+?\])', raw_page, 'video formats')
formats_mit = json.loads(formats_json)
diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py
index 852d72266..54993e2c9 100644
--- a/youtube_dl/extractor/mitele.py
+++ b/youtube_dl/extractor/mitele.py
@@ -1,74 +1,85 @@
from __future__ import unicode_literals
-import json
-
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_parse_unquote,
- compat_urlparse,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
+ encode_dict,
get_element_by_attribute,
- parse_duration,
- strip_jsonp,
+ int_or_none,
)
class MiTeleIE(InfoExtractor):
- IE_NAME = 'mitele.es'
+ IE_DESC = 'mitele.es'
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
+ 'md5': 'ace7635b2a0b286aaa37d3ff192d2a8a',
'info_dict': {
- 'id': '0fce117d',
- 'ext': 'mp4',
- 'title': 'Programa 144 - Tor, la web invisible',
- 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+ 'id': '0NF1jJnxS1Wu3pHrmvFyw2',
'display_id': 'programa-144',
+ 'ext': 'flv',
+ 'title': 'Tor, la web invisible',
+ 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+ 'thumbnail': 're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
- 'params': {
- # m3u8 download
- 'skip_download': True,
- },
}]
def _real_extract(self, url):
- episode = self._match_id(url)
- webpage = self._download_webpage(url, episode)
- embed_data_json = self._search_regex(
- r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
- ).replace('\'', '"')
- embed_data = json.loads(embed_data_json)
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
- domain = embed_data['mediaUrl']
- if not domain.startswith('http'):
- # only happens in telecinco.es videos
- domain = 'http://' + domain
- info_url = compat_urlparse.urljoin(
- domain,
- compat_urllib_parse_unquote(embed_data['flashvars']['host'])
- )
- info_el = self._download_xml(info_url, episode).find('./video/info')
+ config_url = self._search_regex(
+ r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url')
- video_link = info_el.find('videoUrl/link').text
- token_query = compat_urllib_parse.urlencode({'id': video_link})
- token_info = self._download_json(
- embed_data['flashvars']['ov_tk'] + '?' + token_query,
- episode,
- transform_source=strip_jsonp
- )
- formats = self._extract_m3u8_formats(
- token_info['tokenizedUrl'], episode, ext='mp4')
+ config = self._download_json(
+ config_url, display_id, 'Downloading config JSON')
+
+ mmc = self._download_json(
+ config['services']['mmc'], display_id, 'Downloading mmc JSON')
+
+ formats = []
+ for location in mmc['locations']:
+ gat = self._proto_relative_url(location.get('gat'), 'http:')
+ bas = location.get('bas')
+ loc = location.get('loc')
+ ogn = location.get('ogn')
+ if None in (gat, bas, loc, ogn):
+ continue
+ token_data = {
+ 'bas': bas,
+ 'icd': loc,
+ 'ogn': ogn,
+ 'sta': '0',
+ }
+ media = self._download_json(
+ '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data)).encode('utf-8')),
+ display_id, 'Downloading %s JSON' % location['loc'])
+ file_ = media.get('file')
+ if not file_:
+ continue
+ formats.extend(self._extract_f4m_formats(
+ file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
+ display_id, f4m_id=loc))
+
+ title = self._search_regex(
+ r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title')
+
+ video_id = self._search_regex(
+ r'data-media-id\s*=\s*"([^"]+)"', webpage,
+ 'data media id', default=None) or display_id
+ thumbnail = config.get('poster', {}).get('imageUrl')
+ duration = int_or_none(mmc.get('duration'))
return {
- 'id': embed_data['videoId'],
- 'display_id': episode,
- 'title': info_el.find('title').text,
- 'formats': formats,
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
'description': get_element_by_attribute('class', 'text', webpage),
- 'thumbnail': info_el.find('thumb').text,
- 'duration': parse_duration(info_el.find('duration').text),
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
index 88dcd4f73..69e4bcd1a 100644
--- a/youtube_dl/extractor/moniker.py
+++ b/youtube_dl/extractor/moniker.py
@@ -9,7 +9,10 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ remove_start,
+)
class MonikerIE(InfoExtractor):
@@ -25,6 +28,14 @@ class MonikerIE(InfoExtractor):
'title': 'youtube-dl test video',
},
}, {
+ 'url': 'http://allmyvideos.net/embed-jih3nce3x6wn',
+ 'md5': '710883dee1bfc370ecf9fa6a89307c88',
+ 'info_dict': {
+ 'id': 'jih3nce3x6wn',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video',
+ },
+ }, {
'url': 'http://vidspot.net/l2ngsmhs8ci5',
'md5': '710883dee1bfc370ecf9fa6a89307c88',
'info_dict': {
@@ -38,7 +49,10 @@ class MonikerIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ orig_video_id = self._match_id(url)
+ video_id = remove_start(orig_video_id, 'embed-')
+ url = url.replace(orig_video_id, video_id)
+ assert re.match(self._VALID_URL, url) is not None
orig_webpage = self._download_webpage(url, video_id)
if '>File Not Found<' in orig_webpage:
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index b48fac5e3..302c9bf35 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -67,7 +67,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
- if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
+ if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
@@ -114,7 +114,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
- mediagen_url += '&acceptMethods=fms'
+ mediagen_url += '&' if '?' in mediagen_url else '?'
+ mediagen_url += 'acceptMethods=fms'
mediagen_doc = self._download_xml(mediagen_url, video_id,
'Downloading video urls')
@@ -141,7 +142,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
if title_el is None:
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None:
- title_el = itemdoc.find('.//title')
+ title_el = itemdoc.find('.//title') or itemdoc.find('./title')
if title_el.text is None:
title_el = None
@@ -174,8 +175,11 @@ class MTVServicesInfoExtractor(InfoExtractor):
if self._LANG:
info_url += 'lang=%s&' % self._LANG
info_url += data
+ return self._get_videos_info_from_url(info_url, video_id)
+
+ def _get_videos_info_from_url(self, url, video_id):
idoc = self._download_xml(
- info_url, video_id,
+ url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
@@ -196,7 +200,13 @@ class MTVServicesInfoExtractor(InfoExtractor):
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
- webpage, 'mgid')
+ webpage, 'mgid', default=None)
+
+ if not mgid:
+ sm4_embed = self._html_search_meta(
+ 'sm4:video:embed', webpage, 'sm4 embed', default='')
+ mgid = self._search_regex(
+ r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
videos_info = self._get_videos_info(mgid)
return videos_info
@@ -218,6 +228,13 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
},
}
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
+ if mobj:
+ return mobj.group('url')
+
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
site_id = uri.replace(video_id, '')
@@ -288,3 +305,65 @@ class MTVIggyIE(MTVServicesInfoExtractor):
}
}
_FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/'
+
+
+class MTVDEIE(MTVServicesInfoExtractor):
+ IE_NAME = 'mtv.de'
+ _VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
+ _TESTS = [{
+ 'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
+ 'info_dict': {
+ 'id': 'music_video-a50bc5f0b3aa4b3190aa',
+ 'ext': 'mp4',
+ 'title': 'MusicVideo_cro-traum',
+ 'description': 'Cro - Traum',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
+ 'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
+ 'info_dict': {
+ 'id': 'local_playlist-f5ae778b9832cc837189',
+ 'ext': 'mp4',
+ 'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # single video in pagePlaylist with different id
+ 'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
+ 'info_dict': {
+ 'id': 'local_playlist-4e760566473c4c8c5344',
+ 'ext': 'mp4',
+ 'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
+ 'description': 'MTV Movies Supercut',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ playlist = self._parse_json(
+ self._search_regex(
+ r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
+ video_id)
+
+ # news pages contain single video in playlist with different id
+ if len(playlist) == 1:
+ return self._get_videos_info_from_url(playlist[0]['mrss'], video_id)
+
+ for item in playlist:
+ item_id = item.get('id')
+ if item_id and compat_str(item_id) == video_id:
+ return self._get_videos_info_from_url(item['mrss'], video_id)
diff --git a/youtube_dl/extractor/musicvault.py b/youtube_dl/extractor/musicvault.py
deleted file mode 100644
index 0e46ac7c1..000000000
--- a/youtube_dl/extractor/musicvault.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-
-
-class MusicVaultIE(InfoExtractor):
- _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
- _TEST = {
- 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
- 'md5': '3adcbdb3dcc02d647539e53f284ba171',
- 'info_dict': {
- 'id': '1010863',
- 'ext': 'mp4',
- 'uploader_id': 'the-allman-brothers-band',
- 'title': 'Straight from the Heart',
- 'duration': 244,
- 'uploader': 'The Allman Brothers Band',
- 'thumbnail': 're:^https?://.*/thumbnail/.*',
- 'upload_date': '20131219',
- 'location': 'Capitol Theatre (Passaic, NJ)',
- 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
- 'timestamp': int,
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, display_id)
-
- thumbnail = self._search_regex(
- r'<meta itemprop="thumbnail" content="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
-
- data_div = self._search_regex(
- r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
- uploader = self._html_search_regex(
- r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
- title = self._html_search_regex(
- r'<h2.*?>(.*?)</h2>', data_div, 'title')
- location = self._html_search_regex(
- r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
-
- kaltura_id = self._search_regex(
- r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
- webpage, 'kaltura ID')
- wid = self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid')
-
- return {
- 'id': mobj.group('id'),
- '_type': 'url_transparent',
- 'url': 'kaltura:%s:%s' % (wid, kaltura_id),
- 'ie_key': 'Kaltura',
- 'display_id': display_id,
- 'uploader_id': mobj.group('uploader_id'),
- 'thumbnail': thumbnail,
- 'description': self._html_search_meta('description', webpage),
- 'location': location,
- 'title': title,
- 'uploader': uploader,
- }
diff --git a/youtube_dl/extractor/mwave.py b/youtube_dl/extractor/mwave.py
new file mode 100644
index 000000000..66b523197
--- /dev/null
+++ b/youtube_dl/extractor/mwave.py
@@ -0,0 +1,58 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ parse_duration,
+)
+
+
+class MwaveIE(InfoExtractor):
+ _VALID_URL = r'https?://mwave\.interest\.me/mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859',
+ 'md5': 'c930e27b7720aaa3c9d0018dfc8ff6cc',
+ 'info_dict': {
+ 'id': '168859',
+ 'ext': 'flv',
+ 'title': '[M COUNTDOWN] SISTAR - SHAKE IT',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'M COUNTDOWN',
+ 'duration': 206,
+ 'view_count': int,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ vod_info = self._download_json(
+ 'http://mwave.interest.me/onair/vod_info.m?vodtype=CL&sectorid=&endinfo=Y&id=%s' % video_id,
+ video_id, 'Download vod JSON')
+
+ formats = []
+ for num, cdn_info in enumerate(vod_info['cdn']):
+ stream_url = cdn_info.get('url')
+ if not stream_url:
+ continue
+ stream_name = cdn_info.get('name') or compat_str(num)
+ f4m_stream = self._download_json(
+ stream_url, video_id,
+ 'Download %s stream JSON' % stream_name)
+ f4m_url = f4m_stream.get('fileurl')
+ if not f4m_url:
+ continue
+ formats.extend(
+ self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name))
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': vod_info['title'],
+ 'thumbnail': vod_info.get('cover'),
+ 'uploader': vod_info.get('program_title'),
+ 'duration': parse_duration(vod_info.get('time')),
+ 'view_count': int_or_none(vod_info.get('hit')),
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/nationalgeographic.py b/youtube_dl/extractor/nationalgeographic.py
index f793b72f5..6fc9e7b05 100644
--- a/youtube_dl/extractor/nationalgeographic.py
+++ b/youtube_dl/extractor/nationalgeographic.py
@@ -8,18 +8,30 @@ from ..utils import (
class NationalGeographicIE(InfoExtractor):
- _VALID_URL = r'http://video\.nationalgeographic\.com/video/.*?'
-
- _TEST = {
- 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
- 'info_dict': {
- 'id': '4DmDACA6Qtk_',
- 'ext': 'flv',
- 'title': 'Mating Crabs Busted by Sharks',
- 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
+ _VALID_URL = r'http://video\.nationalgeographic\.com/.*?'
+
+ _TESTS = [
+ {
+ 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
+ 'info_dict': {
+ 'id': '4DmDACA6Qtk_',
+ 'ext': 'flv',
+ 'title': 'Mating Crabs Busted by Sharks',
+ 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
+ },
+ 'add_ie': ['ThePlatform'],
},
- 'add_ie': ['ThePlatform'],
- }
+ {
+ 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws',
+ 'info_dict': {
+ 'id': '_JeBD_D7PlS5',
+ 'ext': 'flv',
+ 'title': 'The Real Jaws',
+ 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6',
+ },
+ 'add_ie': ['ThePlatform'],
+ },
+ ]
def _real_extract(self, url):
name = url_basename(url)
@@ -37,5 +49,6 @@ class NationalGeographicIE(InfoExtractor):
return self.url_result(smuggle_url(
'http://link.theplatform.com/s/ngs/%s?format=SMIL&formats=MPEG4&manifest=f4m' % theplatform_id,
- # For some reason, the normal links don't work and we must force the use of f4m
+ # For some reason, the normal links don't work and we must force
+ # the use of f4m
{'force_smil_url': True}))
diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py
index 925967753..1f5fc2145 100644
--- a/youtube_dl/extractor/naver.py
+++ b/youtube_dl/extractor/naver.py
@@ -10,7 +10,6 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
- clean_html,
)
@@ -46,11 +45,11 @@ class NaverIE(InfoExtractor):
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
- m_error = re.search(
- r'(?s)<div class="(?:nation_error|nation_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
- webpage)
- if m_error:
- raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
+ error = self._html_search_regex(
+ r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
+ webpage, 'error', default=None)
+ if error:
+ raise ExtractorError(error, expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index dc2091be0..e683d24c4 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -124,7 +124,7 @@ class NBCSportsIE(InfoExtractor):
class NBCNewsIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?nbcnews\.com/
(?:video/.+?/(?P<id>\d+)|
- (?:feature|nightly-news)/[^/]+/(?P<title>.+))
+ (?:watch|feature|nightly-news)/[^/]+/(?P<title>.+))
'''
_TESTS = [
@@ -169,6 +169,10 @@ class NBCNewsIE(InfoExtractor):
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
},
},
+ {
+ 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
@@ -232,3 +236,28 @@ class NBCNewsIE(InfoExtractor):
'url': info['videoAssets'][-1]['publicUrl'],
'ie_key': 'ThePlatform',
}
+
+
+class MSNBCIE(InfoExtractor):
+ # https URLs redirect to corresponding http ones
+ _VALID_URL = r'http://www\.msnbc\.com/[^/]+/watch/(?P<id>[^/]+)'
+ _TEST = {
+ 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
+ 'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
+ 'info_dict': {
+ 'id': 'n_hayes_Aimm_140801_272214',
+ 'ext': 'mp4',
+ 'title': 'The chaotic GOP immigration vote',
+ 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1406937606,
+ 'upload_date': '20140802',
+ 'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'],
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ embed_url = self._html_search_meta('embedURL', webpage)
+ return self.url_result(embed_url)
diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py
index 79a13958b..e3cc6fde8 100644
--- a/youtube_dl/extractor/ndr.py
+++ b/youtube_dl/extractor/ndr.py
@@ -1,130 +1,380 @@
-# encoding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
+ determine_ext,
int_or_none,
+ parse_iso8601,
qualities,
- parse_duration,
)
class NDRBaseIE(InfoExtractor):
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ return self._extract_embed(webpage, display_id)
+
- page = self._download_webpage(url, video_id, 'Downloading page')
+class NDRIE(NDRBaseIE):
+ IE_NAME = 'ndr'
+ IE_DESC = 'NDR.de - Norddeutscher Rundfunk'
+ _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html'
+ _TESTS = [{
+ # httpVideo, same content id
+ 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
+ 'md5': '6515bc255dc5c5f8c85bbc38e035a659',
+ 'info_dict': {
+ 'id': 'hafengeburtstag988',
+ 'display_id': 'Party-Poette-und-Parade',
+ 'ext': 'mp4',
+ 'title': 'Party, Pötte und Parade',
+ 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c',
+ 'uploader': 'ndrtv',
+ 'timestamp': 1431108900,
+ 'upload_date': '20150510',
+ 'duration': 3498,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideo, different content id
+ 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html',
+ 'md5': '1043ff203eab307f0c51702ec49e9a71',
+ 'info_dict': {
+ 'id': 'osna272',
+ 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch',
+ 'ext': 'mp4',
+ 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights',
+ 'description': 'md5:32e9b800b3d2d4008103752682d5dc01',
+ 'uploader': 'ndrtv',
+ 'timestamp': 1442059200,
+ 'upload_date': '20150912',
+ 'duration': 510,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpAudio, same content id
+ 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html',
+ 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
+ 'info_dict': {
+ 'id': 'audio51535',
+ 'display_id': 'La-Valette-entgeht-der-Hinrichtung',
+ 'ext': 'mp3',
+ 'title': 'La Valette entgeht der Hinrichtung',
+ 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
+ 'uploader': 'ndrinfo',
+ 'timestamp': 1290626100,
+ 'upload_date': '20140729',
+ 'duration': 884,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }]
- title = self._og_search_title(page).strip()
- description = self._og_search_description(page)
- if description:
- description = description.strip()
+ def _extract_embed(self, webpage, display_id):
+ embed_url = self._html_search_meta(
+ 'embedURL', webpage, 'embed URL', fatal=True)
+ description = self._search_regex(
+ r'<p[^>]+itemprop="description">([^<]+)</p>',
+ webpage, 'description', fatal=False)
+ timestamp = parse_iso8601(
+ self._search_regex(
+ r'<span itemprop="datePublished" content="([^"]+)">',
+ webpage, 'upload date', fatal=False))
+ return {
+ '_type': 'url_transparent',
+ 'url': embed_url,
+ 'display_id': display_id,
+ 'description': description,
+ 'timestamp': timestamp,
+ }
- duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', default=None))
- if not duration:
- duration = parse_duration(self._html_search_regex(
- r'(<span class="min">\d+</span>:<span class="sec">\d+</span>)',
- page, 'duration', default=None))
- formats = []
+class NJoyIE(NDRBaseIE):
+ IE_NAME = 'njoy'
+ IE_DESC = 'N-JOY'
+ _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html'
+ _TESTS = [{
+ # httpVideo, same content id
+ 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
+ 'md5': 'cb63be60cd6f9dd75218803146d8dc67',
+ 'info_dict': {
+ 'id': 'comedycontest2480',
+ 'display_id': 'Benaissa-beim-NDR-Comedy-Contest',
+ 'ext': 'mp4',
+ 'title': 'Benaissa beim NDR Comedy Contest',
+ 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39',
+ 'uploader': 'ndrtv',
+ 'upload_date': '20141129',
+ 'duration': 654,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideo, different content id
+ 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html',
+ 'md5': '417660fffa90e6df2fda19f1b40a64d8',
+ 'info_dict': {
+ 'id': 'dockville882',
+ 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-',
+ 'ext': 'mp4',
+ 'title': '"Ich hab noch nie" mit Felix Jaehn',
+ 'description': 'md5:85dd312d53be1b99e1f998a16452a2f3',
+ 'uploader': 'njoy',
+ 'upload_date': '20150822',
+ 'duration': 211,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }]
+
+ def _extract_embed(self, webpage, display_id):
+ video_id = self._search_regex(
+ r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id')
+ description = self._search_regex(
+ r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>',
+ webpage, 'description', fatal=False)
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'NDREmbedBase',
+ 'url': 'ndr:%s' % video_id,
+ 'display_id': display_id,
+ 'description': description,
+ }
- mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
- if mp3_url:
- formats.append({
- 'url': mp3_url.group('audio'),
- 'format_id': 'mp3',
- })
- thumbnail = None
+class NDREmbedBaseIE(InfoExtractor):
+ IE_NAME = 'ndr:embed:base'
+ _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)'
+ _TESTS = [{
+ 'url': 'ndr:soundcheck3366',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json',
+ 'only_matching': True,
+ }]
- video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
- if video_url:
- thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
- if thumbnails:
- quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
- largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
- thumbnail = 'http://www.ndr.de' + largest[0]
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id') or mobj.group('id_s')
- for format_id in 'lo', 'hi', 'hq':
- formats.append({
- 'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
- 'format_id': format_id,
- })
+ ppjson = self._download_json(
+ 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id)
- if not formats:
- raise ExtractorError('No media links available for %s' % video_id)
+ playlist = ppjson['playlist']
+
+ formats = []
+ quality_key = qualities(('xs', 's', 'm', 'l', 'xl'))
+
+ for format_id, f in playlist.items():
+ src = f.get('src')
+ if not src:
+ continue
+ ext = determine_ext(src, None)
+ if ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds'))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ src, video_id, m3u8_id='hls', entry_protocol='m3u8_native'))
+ else:
+ quality = f.get('quality')
+ ff = {
+ 'url': src,
+ 'format_id': quality or format_id,
+ 'quality': quality_key(quality),
+ }
+ type_ = f.get('type')
+ if type_ and type_.split('/')[0] == 'audio':
+ ff['vcodec'] = 'none'
+ ff['ext'] = ext or 'mp3'
+ formats.append(ff)
+ self._sort_formats(formats)
+
+ config = playlist['config']
+
+ live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive']
+ title = config['title']
+ if live:
+ title = self._live_title(title)
+ uploader = ppjson.get('config', {}).get('branding')
+ upload_date = ppjson.get('config', {}).get('publicationDate')
+ duration = int_or_none(config.get('duration'))
+
+ thumbnails = [{
+ 'id': thumbnail.get('quality') or thumbnail_id,
+ 'url': thumbnail['src'],
+ 'preference': quality_key(thumbnail.get('quality')),
+ } for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')]
return {
'id': video_id,
'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
+ 'is_live': live,
+ 'uploader': uploader if uploader != '-' else None,
+ 'upload_date': upload_date[0:8] if upload_date else None,
'duration': duration,
+ 'thumbnails': thumbnails,
'formats': formats,
}
-class NDRIE(NDRBaseIE):
- IE_NAME = 'ndr'
- IE_DESC = 'NDR.de - Mediathek'
- _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html'
-
- _TESTS = [
- {
- 'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
- 'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
- 'note': 'Video file',
- 'info_dict': {
- 'id': '25866',
- 'ext': 'mp4',
- 'title': 'Kartoffeltage in der Lewitz',
- 'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
- 'duration': 166,
- },
- 'skip': '404 Not found',
- },
- {
- 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
- 'md5': 'dadc003c55ae12a5d2f6bd436cd73f59',
- 'info_dict': {
- 'id': '988',
- 'ext': 'mp4',
- 'title': 'Party, Pötte und Parade',
- 'description': 'Hunderttausende feiern zwischen Speicherstadt und St. Pauli den 826. Hafengeburtstag. Die NDR Sondersendung zeigt die schönsten und spektakulärsten Bilder vom Auftakt.',
- 'duration': 3498,
- },
- },
- {
- 'url': 'http://www.ndr.de/info/audio51535.html',
- 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
- 'note': 'Audio file',
- 'info_dict': {
- 'id': '51535',
- 'ext': 'mp3',
- 'title': 'La Valette entgeht der Hinrichtung',
- 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
- 'duration': 884,
- }
- }
- ]
-
+class NDREmbedIE(NDREmbedBaseIE):
+ IE_NAME = 'ndr:embed'
+ _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
+ _TESTS = [{
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html',
+ 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9',
+ 'info_dict': {
+ 'id': 'ndraktuell28488',
+ 'ext': 'mp4',
+ 'title': 'Norddeutschland begrüßt Flüchtlinge',
+ 'is_live': False,
+ 'uploader': 'ndrtv',
+ 'upload_date': '20150907',
+ 'duration': 132,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html',
+ 'md5': '002085c44bae38802d94ae5802a36e78',
+ 'info_dict': {
+ 'id': 'soundcheck3366',
+ 'ext': 'mp4',
+ 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen',
+ 'is_live': False,
+ 'uploader': 'ndr2',
+ 'upload_date': '20150912',
+ 'duration': 3554,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/info/audio51535-player.html',
+ 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
+ 'info_dict': {
+ 'id': 'audio51535',
+ 'ext': 'mp3',
+ 'title': 'La Valette entgeht der Hinrichtung',
+ 'is_live': False,
+ 'uploader': 'ndrinfo',
+ 'upload_date': '20140729',
+ 'duration': 884,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html',
+ 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c',
+ 'info_dict': {
+ 'id': 'visite11010',
+ 'ext': 'mp4',
+ 'title': 'Visite - die ganze Sendung',
+ 'is_live': False,
+ 'uploader': 'ndrtv',
+ 'upload_date': '20150902',
+ 'duration': 3525,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideoLive
+ 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html',
+ 'info_dict': {
+ 'id': 'livestream217',
+ 'ext': 'flv',
+ 'title': 're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
+ 'is_live': True,
+ 'upload_date': '20150910',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/doku952-player.html',
+ 'only_matching': True,
+ }]
-class NJoyIE(NDRBaseIE):
- IE_NAME = 'N-JOY'
- _VALID_URL = r'https?://www\.n-joy\.de/.+?(?P<id>\d+)\.html'
- _TEST = {
- 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
- 'md5': 'cb63be60cd6f9dd75218803146d8dc67',
+class NJoyEmbedIE(NDREmbedBaseIE):
+ IE_NAME = 'njoy:embed'
+ _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
+ _TESTS = [{
+ # httpVideo
+ 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html',
+ 'md5': '8483cbfe2320bd4d28a349d62d88bd74',
'info_dict': {
- 'id': '2480',
+ 'id': 'doku948',
'ext': 'mp4',
- 'title': 'Benaissa beim NDR Comedy Contest',
- 'description': 'Von seinem sehr "behaarten" Leben lässt sich Benaissa trotz aller Schwierigkeiten nicht unterkriegen.',
- 'duration': 654,
- }
- }
+ 'title': 'Zehn Jahre Reeperbahn Festival - die Doku',
+ 'is_live': False,
+ 'upload_date': '20150807',
+ 'duration': 1011,
+ },
+ }, {
+ # httpAudio
+ 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html',
+ 'md5': 'd989f80f28ac954430f7b8a48197188a',
+ 'info_dict': {
+ 'id': 'stefanrichter100',
+ 'ext': 'mp3',
+ 'title': 'Interview mit einem Augenzeugen',
+ 'is_live': False,
+ 'uploader': 'njoy',
+ 'upload_date': '20150909',
+ 'duration': 140,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpAudioLive, no explicit ext
+ 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html',
+ 'info_dict': {
+ 'id': 'webradioweltweit100',
+ 'ext': 'mp3',
+ 'title': 're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
+ 'is_live': True,
+ 'uploader': 'njoy',
+ 'upload_date': '20150810',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html',
+ 'only_matching': True,
+ }]
diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py
index c10784f6b..d1688457f 100644
--- a/youtube_dl/extractor/nextmedia.py
+++ b/youtube_dl/extractor/nextmedia.py
@@ -126,7 +126,8 @@ class AppleDailyIE(NextMediaIE):
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd',
'upload_date': '20150128',
- }
+ },
+ 'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
# No thumbnail
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/',
@@ -140,10 +141,19 @@ class AppleDailyIE(NextMediaIE):
},
'expected_warnings': [
'video thumbnail',
- ]
+ ],
+ 'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/',
- 'only_matching': True,
+ 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d',
+ 'info_dict': {
+ 'id': '35770334',
+ 'ext': 'mp4',
+ 'title': '咖啡占卜測 XU裝熟指數',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748',
+ 'upload_date': '20140417',
+ },
}]
_URL_PATTERN = r'\{url: \'(.+)\'\}'
diff --git a/youtube_dl/extractor/nfl.py b/youtube_dl/extractor/nfl.py
index dc54634a5..200874d68 100644
--- a/youtube_dl/extractor/nfl.py
+++ b/youtube_dl/extractor/nfl.py
@@ -16,53 +16,118 @@ from ..utils import (
class NFLIE(InfoExtractor):
IE_NAME = 'nfl.com'
- _VALID_URL = r'''(?x)https?://
- (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
- (?:.+?/)*
- (?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
- _TESTS = [
- {
- 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
- 'md5': '394ef771ddcd1354f665b471d78ec4c6',
- 'info_dict': {
- 'id': '0ap3000000398478',
- 'ext': 'mp4',
- 'title': 'Week 3: Redskins vs. Eagles highlights',
- 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
- 'upload_date': '20140921',
- 'timestamp': 1411337580,
- 'thumbnail': 're:^https?://.*\.jpg$',
- }
+ _VALID_URL = r'''(?x)
+ https?://
+ (?P<host>
+ (?:www\.)?
+ (?:
+ (?:
+ nfl|
+ buffalobills|
+ miamidolphins|
+ patriots|
+ newyorkjets|
+ baltimoreravens|
+ bengals|
+ clevelandbrowns|
+ steelers|
+ houstontexans|
+ colts|
+ jaguars|
+ titansonline|
+ denverbroncos|
+ kcchiefs|
+ raiders|
+ chargers|
+ dallascowboys|
+ giants|
+ philadelphiaeagles|
+ redskins|
+ chicagobears|
+ detroitlions|
+ packers|
+ vikings|
+ atlantafalcons|
+ panthers|
+ neworleanssaints|
+ buccaneers|
+ azcardinals|
+ stlouisrams|
+ 49ers|
+ seahawks
+ )\.com|
+ .+?\.clubs\.nfl\.com
+ )
+ )/
+ (?:.+?/)*
+ (?P<id>[^/#?&]+)
+ '''
+ _TESTS = [{
+ 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
+ 'md5': '394ef771ddcd1354f665b471d78ec4c6',
+ 'info_dict': {
+ 'id': '0ap3000000398478',
+ 'ext': 'mp4',
+ 'title': 'Week 3: Redskins vs. Eagles highlights',
+ 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
+ 'upload_date': '20140921',
+ 'timestamp': 1411337580,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
+ 'info_dict': {
+ 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'ext': 'mp4',
+ 'title': 'LIVE: Post Game vs. Browns',
+ 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
+ 'upload_date': '20131229',
+ 'timestamp': 1388354455,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
+ 'info_dict': {
+ 'id': '0ap3000000467607',
+ 'ext': 'mp4',
+ 'title': 'Frustrations flare on the field',
+ 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
+ 'timestamp': 1422850320,
+ 'upload_date': '20150202',
},
- {
- 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
- 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
- 'info_dict': {
- 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
- 'ext': 'mp4',
- 'title': 'LIVE: Post Game vs. Browns',
- 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
- 'upload_date': '20131229',
- 'timestamp': 1388354455,
- 'thumbnail': 're:^https?://.*\.jpg$',
- }
+ }, {
+ 'url': 'http://www.patriots.com/video/2015/09/18/10-days-gillette',
+ 'md5': '4c319e2f625ffd0b481b4382c6fc124c',
+ 'info_dict': {
+ 'id': 'n-238346',
+ 'ext': 'mp4',
+ 'title': '10 Days at Gillette',
+ 'description': 'md5:8cd9cd48fac16de596eadc0b24add951',
+ 'timestamp': 1442618809,
+ 'upload_date': '20150918',
},
- {
- 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
- 'info_dict': {
- 'id': '0ap3000000467607',
- 'ext': 'mp4',
- 'title': 'Frustrations flare on the field',
- 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
- 'timestamp': 1422850320,
- 'upload_date': '20150202',
- },
+ }, {
+ # lowercase data-contentid
+ 'url': 'http://www.steelers.com/news/article-1/Tomlin-on-Ben-getting-Vick-ready/56399c96-4160-48cf-a7ad-1d17d4a3aef7',
+ 'info_dict': {
+ 'id': '12693586-6ea9-4743-9c1c-02c59e4a5ef2',
+ 'ext': 'mp4',
+ 'title': 'Tomlin looks ahead to Ravens on a short week',
+ 'description': 'md5:32f3f7b139f43913181d5cbb24ecad75',
+ 'timestamp': 1443459651,
+ 'upload_date': '20150928',
},
- {
- 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
- 'only_matching': True,
- }
- ]
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.buffalobills.com/video/videos/Rex_Ryan_Show_World_Wide_Rex/b1dcfab2-3190-4bb1-bfc0-d6e603d6601a',
+ 'only_matching': True,
+ }]
@staticmethod
def prepend_host(host, url):
@@ -95,13 +160,14 @@ class NFLIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
- r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL',
- default='static/content/static/config/video/config.json'))
+ r'(?:(?:config|configURL)\s*:\s*|<nflcs:avplayer[^>]+data-config\s*=\s*)(["\'])(?P<config>.+?)\1',
+ webpage, 'config URL', default='static/content/static/config/video/config.json',
+ group='config'))
# For articles, the id in the url is not the video id
video_id = self._search_regex(
- r'contentId\s*:\s*"([^"]+)"', webpage, 'video id', default=video_id)
- config = self._download_json(config_url, video_id,
- note='Downloading player config')
+ r'(?:<nflcs:avplayer[^>]+data-content[Ii]d\s*=\s*|content[Ii]d\s*:\s*)(["\'])(?P<id>.+?)\1',
+ webpage, 'video id', default=video_id, group='id')
+ config = self._download_json(config_url, video_id, 'Downloading player config')
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py
index 279b18386..e98a5ef89 100644
--- a/youtube_dl/extractor/nhl.py
+++ b/youtube_dl/extractor/nhl.py
@@ -72,7 +72,7 @@ class NHLBaseInfoExtractor(InfoExtractor):
class NHLIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com'
- _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console)?(?:\?(?:.*?[?&])?)(?:id|hlg)=(?P<id>[-0-9a-zA-Z,]+)'
+ _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)'
_TESTS = [{
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
@@ -136,6 +136,9 @@ class NHLIE(NHLBaseInfoExtractor):
'params': {
'skip_download': True, # Requires rtmpdump
}
+ }, {
+ 'url': 'http://video.nhl.com/videocenter/embed?playlist=836127',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -146,9 +149,9 @@ class NHLIE(NHLBaseInfoExtractor):
class NHLNewsIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:news'
IE_DESC = 'NHL news'
- _VALID_URL = r'https?://(?:www\.)?nhl\.com/ice/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
+ _VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.nhl.com/ice/news.htm?id=750727',
'md5': '4b3d1262e177687a3009937bd9ec0be8',
'info_dict': {
@@ -159,13 +162,26 @@ class NHLNewsIE(NHLBaseInfoExtractor):
'duration': 37,
'upload_date': '20150128',
},
- }
+ }, {
+ # iframe embed
+ 'url': 'http://sabres.nhl.com/club/news.htm?id=780189',
+ 'md5': '9f663d1c006c90ac9fb82777d4294e12',
+ 'info_dict': {
+ 'id': '836127',
+ 'ext': 'mp4',
+ 'title': 'Morning Skate: OTT vs. BUF (9/23/15)',
+ 'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.",
+ 'duration': 93,
+ 'upload_date': '20150923',
+ },
+ }]
def _real_extract(self, url):
news_id = self._match_id(url)
webpage = self._download_webpage(url, news_id)
video_id = self._search_regex(
- [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'"],
+ [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'",
+ r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'],
webpage, 'video id')
return self._real_extract_video(video_id)
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index 0f8aa5ada..bda1cff05 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -12,6 +12,7 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
int_or_none,
parse_duration,
@@ -100,10 +101,7 @@ class NiconicoIE(InfoExtractor):
'mail': username,
'password': password,
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py
index 7f842b5c2..a06d38afd 100644
--- a/youtube_dl/extractor/ninegag.py
+++ b/youtube_dl/extractor/ninegag.py
@@ -1,7 +1,6 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
from ..utils import str_to_int
@@ -9,61 +8,93 @@ from ..utils import str_to_int
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
- _VALID_URL = r'''(?x)^https?://(?:www\.)?9gag\.tv/
- (?:
- v/(?P<numid>[0-9]+)|
- p/(?P<id>[a-zA-Z0-9]+)/(?P<display_id>[^?#/]+)
- )
- '''
+ _VALID_URL = r'https?://(?:www\.)?9gag(?:\.com/tv|\.tv)/(?:p|embed)/(?P<id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^?#/]+))?'
_TESTS = [{
- "url": "http://9gag.tv/v/1912",
- "info_dict": {
- "id": "1912",
- "ext": "mp4",
- "description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)",
- "title": "\"People Are Awesome 2013\" Is Absolutely Awesome",
+ 'url': 'http://9gag.com/tv/p/Kk2X5/people-are-awesome-2013-is-absolutely-awesome',
+ 'info_dict': {
+ 'id': 'Kk2X5',
+ 'ext': 'mp4',
+ 'description': 'This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)',
+ 'title': '\"People Are Awesome 2013\" Is Absolutely Awesome',
'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA',
'uploader': 'CompilationChannel',
'upload_date': '20131110',
- "view_count": int,
- "thumbnail": "re:^https?://",
+ 'view_count': int,
},
- 'add_ie': ['Youtube']
+ 'add_ie': ['Youtube'],
}, {
- 'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
+ 'url': 'http://9gag.com/tv/p/aKolP3',
'info_dict': {
- 'id': 'KklwM',
+ 'id': 'aKolP3',
'ext': 'mp4',
- 'display_id': 'alternate-banned-opening-scene-of-gravity',
- "description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.",
- 'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie",
- 'uploader': 'Krishna Shenoi',
- 'upload_date': '20140401',
- 'uploader_id': 'krishnashenoi93',
+ 'title': 'This Guy Travelled 11 countries In 44 days Just To Make This Amazing Video',
+ 'description': "I just saw more in 1 minute than I've seen in 1 year. This guy's video is epic!!",
+ 'uploader_id': 'rickmereki',
+ 'uploader': 'Rick Mereki',
+ 'upload_date': '20110803',
+ 'view_count': int,
},
+ 'add_ie': ['Vimeo'],
+ }, {
+ 'url': 'http://9gag.com/tv/p/KklwM',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://9gag.tv/p/Kk2X5',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://9gag.com/tv/embed/a5Dmvl',
+ 'only_matching': True,
}]
+ _EXTERNAL_VIDEO_PROVIDER = {
+ '1': {
+ 'url': '%s',
+ 'ie_key': 'Youtube',
+ },
+ '2': {
+ 'url': 'http://player.vimeo.com/video/%s',
+ 'ie_key': 'Vimeo',
+ },
+ '3': {
+ 'url': 'http://instagram.com/p/%s',
+ 'ie_key': 'Instagram',
+ },
+ '4': {
+ 'url': 'http://vine.co/v/%s',
+ 'ie_key': 'Vine',
+ },
+ }
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('numid') or mobj.group('id')
+ video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
- post_view = json.loads(self._html_search_regex(
- r'var postView = new app\.PostView\({\s*post:\s*({.+?}),\s*posts:\s*prefetchedCurrentPost', webpage, 'post view'))
+ post_view = self._parse_json(
+ self._search_regex(
+ r'var\s+postView\s*=\s*new\s+app\.PostView\({\s*post:\s*({.+?})\s*,\s*posts:\s*prefetchedCurrentPost',
+ webpage, 'post view'),
+ display_id)
- youtube_id = post_view['videoExternalId']
+ ie_key = None
+ source_url = post_view.get('sourceUrl')
+ if not source_url:
+ external_video_id = post_view['videoExternalId']
+ external_video_provider = post_view['videoExternalProvider']
+ source_url = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['url'] % external_video_id
+ ie_key = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['ie_key']
title = post_view['title']
- description = post_view['description']
- view_count = str_to_int(post_view['externalView'])
+ description = post_view.get('description')
+ view_count = str_to_int(post_view.get('externalView'))
thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w')
return {
'_type': 'url_transparent',
- 'url': youtube_id,
- 'ie_key': 'Youtube',
+ 'url': source_url,
+ 'ie_key': ie_key,
'id': video_id,
'display_id': display_id,
'title': title,
diff --git a/youtube_dl/extractor/nowness.py b/youtube_dl/extractor/nowness.py
index 6b2f3f55a..b97f62fdb 100644
--- a/youtube_dl/extractor/nowness.py
+++ b/youtube_dl/extractor/nowness.py
@@ -1,64 +1,134 @@
# encoding: utf-8
from __future__ import unicode_literals
-import re
-
from .brightcove import BrightcoveIE
from .common import InfoExtractor
from ..utils import ExtractorError
+from ..compat import (
+ compat_str,
+ compat_urllib_request,
+)
+
+
+class NownessBaseIE(InfoExtractor):
+ def _extract_url_result(self, post):
+ if post['type'] == 'video':
+ for media in post['media']:
+ if media['type'] == 'video':
+ video_id = media['content']
+ source = media['source']
+ if source == 'brightcove':
+ player_code = self._download_webpage(
+ 'http://www.nowness.com/iframe?id=%s' % video_id, video_id,
+ note='Downloading player JavaScript',
+ errnote='Unable to download player JavaScript')
+ bc_url = BrightcoveIE._extract_brightcove_url(player_code)
+ if bc_url is None:
+ raise ExtractorError('Could not find player definition')
+ return self.url_result(bc_url, 'Brightcove')
+ elif source == 'vimeo':
+ return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
+ elif source == 'youtube':
+ return self.url_result(video_id, 'Youtube')
+ elif source == 'cinematique':
+ # youtube-dl currently doesn't support cinematique
+ # return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique')
+ pass
+ def _api_request(self, url, request_path):
+ display_id = self._match_id(url)
+ request = compat_urllib_request.Request(
+ 'http://api.nowness.com/api/' + request_path % display_id,
+ headers={
+ 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
+ })
+ return display_id, self._download_json(request, display_id)
-class NownessIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
- _TESTS = [
- {
- 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
- 'md5': '068bc0202558c2e391924cb8cc470676',
- 'info_dict': {
- 'id': '2520295746001',
- 'ext': 'mp4',
- 'title': 'Candor: The Art of Gesticulation',
- 'description': 'Candor: The Art of Gesticulation',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'uploader': 'Nowness',
- }
+class NownessIE(NownessBaseIE):
+ IE_NAME = 'nowness'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])'
+ _TESTS = [{
+ 'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation',
+ 'md5': '068bc0202558c2e391924cb8cc470676',
+ 'info_dict': {
+ 'id': '2520295746001',
+ 'ext': 'mp4',
+ 'title': 'Candor: The Art of Gesticulation',
+ 'description': 'Candor: The Art of Gesticulation',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
},
- {
- 'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr',
- 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
- 'info_dict': {
- 'id': '3716354522001',
- 'ext': 'mp4',
- 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
- 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'uploader': 'Nowness',
- }
+ }, {
+ 'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr',
+ 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
+ 'info_dict': {
+ 'id': '3716354522001',
+ 'ext': 'mp4',
+ 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
},
- ]
+ }, {
+ # vimeo
+ 'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut',
+ 'md5': '9a5a6a8edf806407e411296ab6bc2a49',
+ 'info_dict': {
+ 'id': '130020913',
+ 'ext': 'mp4',
+ 'title': 'Bleu, Blanc, Rouge - A Godard Supercut',
+ 'description': 'md5:f0ea5f1857dffca02dbd37875d742cec',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'upload_date': '20150607',
+ 'uploader': 'Cinema Sem Lei',
+ 'uploader_id': 'cinemasemlei',
+ },
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('slug')
+ _, post = self._api_request(url, 'post/getBySlug/%s')
+ return self._extract_url_result(post)
- webpage = self._download_webpage(url, video_id)
- player_url = self._search_regex(
- r'"([^"]+/content/issue-[0-9.]+.js)"', webpage, 'player URL')
- real_id = self._search_regex(
- r'\sdata-videoId="([0-9]+)"', webpage, 'internal video ID')
- player_code = self._download_webpage(
- player_url, video_id,
- note='Downloading player JavaScript',
- errnote='Player download failed')
- player_code = player_code.replace("'+d+'", real_id)
+class NownessPlaylistIE(NownessBaseIE):
+ IE_NAME = 'nowness:playlist'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues',
+ 'info_dict': {
+ 'id': '3286',
+ },
+ 'playlist_mincount': 8,
+ }
- bc_url = BrightcoveIE._extract_brightcove_url(player_code)
- if bc_url is None:
- raise ExtractorError('Could not find player definition')
- return {
- '_type': 'url',
- 'url': bc_url,
- 'ie_key': 'Brightcove',
- }
+ def _real_extract(self, url):
+ playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s')
+ entries = [self._extract_url_result(item) for item in playlist['items']]
+ return self.playlist_result(entries, playlist_id)
+
+
+class NownessSeriesIE(NownessBaseIE):
+ IE_NAME = 'nowness:series'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])'
+ _TEST = {
+ 'url': 'https://www.nowness.com/series/60-seconds',
+ 'info_dict': {
+ 'id': '60',
+ 'title': '60 Seconds',
+ 'description': 'One-minute wisdom in a new NOWNESS series',
+ },
+ 'playlist_mincount': 4,
+ }
+
+ def _real_extract(self, url):
+ display_id, series = self._api_request(url, 'series/getBySlug/%s')
+ entries = [self._extract_url_result(post) for post in series['posts']]
+ series_title = None
+ series_description = None
+ translations = series.get('translations', [])
+ if translations:
+ series_title = translations[0].get('title') or translations[0]['seoTitle']
+ series_description = translations[0].get('seoDescription')
+ return self.playlist_result(
+ entries, compat_str(series['id']), series_title, series_description)
diff --git a/youtube_dl/extractor/nowtv.py b/youtube_dl/extractor/nowtv.py
index 0b5ff4760..b0bdffc4e 100644
--- a/youtube_dl/extractor/nowtv.py
+++ b/youtube_dl/extractor/nowtv.py
@@ -1,12 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
+ determine_ext,
int_or_none,
parse_iso8601,
parse_duration,
@@ -15,7 +14,7 @@ from ..utils import (
class NowTVIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?nowtv\.de/(?P<station>rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/player'
+ _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/(?:player|preview)'
_TESTS = [{
# rtl
@@ -23,7 +22,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203519',
'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Die neuen Bauern und eine Hochzeit',
'description': 'md5:e234e1ed6d63cf06be5c070442612e7e',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -32,7 +31,7 @@ class NowTVIE(InfoExtractor):
'duration': 2786,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -41,7 +40,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203481',
'display_id': 'berlin-tag-nacht/berlin-tag-nacht-folge-934',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Berlin - Tag & Nacht (Folge 934)',
'description': 'md5:c85e88c2e36c552dfe63433bc9506dd0',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -50,7 +49,7 @@ class NowTVIE(InfoExtractor):
'duration': 2641,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -59,7 +58,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '165780',
'display_id': 'alarm-fuer-cobra-11-die-autobahnpolizei/hals-und-beinbruch-2014-08-23-21-10-00',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Hals- und Beinbruch',
'description': 'md5:b50d248efffe244e6f56737f0911ca57',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -68,7 +67,7 @@ class NowTVIE(InfoExtractor):
'duration': 2742,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -77,7 +76,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '99205',
'display_id': 'medicopter-117/angst',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Angst!',
'description': 'md5:30cbc4c0b73ec98bcd73c9f2a8c17c4e',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -86,7 +85,7 @@ class NowTVIE(InfoExtractor):
'duration': 3025,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -95,7 +94,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203521',
'display_id': 'ratgeber-geld/thema-ua-der-erste-blick-die-apple-watch',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Thema u.a.: Der erste Blick: Die Apple Watch',
'description': 'md5:4312b6c9d839ffe7d8caf03865a531af',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -104,7 +103,7 @@ class NowTVIE(InfoExtractor):
'duration': 1083,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -113,7 +112,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '128953',
'display_id': 'der-hundeprofi/buero-fall-chihuahua-joel',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': "Büro-Fall / Chihuahua 'Joel'",
'description': 'md5:e62cb6bf7c3cc669179d4f1eb279ad8d',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -122,15 +121,25 @@ class NowTVIE(InfoExtractor):
'duration': 3092,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
+ }, {
+ 'url': 'http://www.nowtv.de/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.at/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview?return=/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player',
+ 'only_matching': True,
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('id')
- station = mobj.group('station')
+ display_id = self._match_id(url)
+ display_id_split = display_id.split('/')
+ if len(display_id) > 2:
+ display_id = '/'.join((display_id_split[0], display_id_split[-1]))
info = self._download_json(
'https://api.nowtv.de/v3/movies/%s?fields=id,title,free,geoblocked,articleLong,articleShort,broadcastStartDate,seoUrl,duration,format,files' % display_id,
@@ -148,29 +157,19 @@ class NowTVIE(InfoExtractor):
raise ExtractorError(
'Video %s is not available for free' % video_id, expected=True)
- f = info.get('format', {})
- station = f.get('station') or station
-
- STATIONS = {
- 'rtl': 'rtlnow',
- 'rtl2': 'rtl2now',
- 'vox': 'voxnow',
- 'nitro': 'rtlnitronow',
- 'ntv': 'n-tvnow',
- 'superrtl': 'superrtlnow'
- }
-
formats = []
for item in files['items']:
- item_path = remove_start(item['path'], '/')
- tbr = int_or_none(item['bitrate'])
- m3u8_url = 'http://hls.fra.%s.de/hls-vod-enc/%s.m3u8' % (STATIONS[station], item_path)
- m3u8_url = m3u8_url.replace('now/', 'now/videos/')
+ if determine_ext(item['path']) != 'f4v':
+ continue
+ app, play_path = remove_start(item['path'], '/').split('/', 1)
formats.append({
- 'url': m3u8_url,
- 'format_id': '%s-%sk' % (item['id'], tbr),
- 'ext': 'mp4',
- 'tbr': tbr,
+ 'url': 'rtmpe://fms.rtl.de',
+ 'app': app,
+ 'play_path': 'mp4:%s' % play_path,
+ 'ext': 'flv',
+ 'page_url': 'http://rtlnow.rtl.de',
+ 'player_url': 'http://cdn.static-fra.de/now/vodplayer.swf',
+ 'tbr': int_or_none(item.get('bitrate')),
})
self._sort_formats(formats)
@@ -178,6 +177,8 @@ class NowTVIE(InfoExtractor):
description = info.get('articleLong') or info.get('articleShort')
timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ')
duration = parse_duration(info.get('duration'))
+
+ f = info.get('format', {})
thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo')
return {
diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py
index dec09cdfe..17baa9679 100644
--- a/youtube_dl/extractor/nowvideo.py
+++ b/youtube_dl/extractor/nowvideo.py
@@ -7,7 +7,7 @@ class NowVideoIE(NovaMovIE):
IE_NAME = 'nowvideo'
IE_DESC = 'NowVideo'
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co|li)'}
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'}
_HOST = 'www.nowvideo.ch'
diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py
index 0c2d02c10..eb12fb810 100644
--- a/youtube_dl/extractor/npo.py
+++ b/youtube_dl/extractor/npo.py
@@ -407,6 +407,7 @@ class NPORadioFragmentIE(InfoExtractor):
class VPROIE(NPOIE):
+ IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index d066a96db..8ac38a174 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
float_or_none,
@@ -49,7 +50,7 @@ class NRKIE(InfoExtractor):
if data['usageRights']['isGeoBlocked']:
raise ExtractorError(
- 'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge',
+ 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
expected=True)
video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81'
@@ -196,20 +197,6 @@ class NRKTVIE(InfoExtractor):
}
]
- def _debug_print(self, txt):
- if self._downloader.params.get('verbose', False):
- self.to_screen('[debug] %s' % txt)
-
- def _get_subtitles(self, subtitlesurl, video_id, baseurl):
- url = "%s%s" % (baseurl, subtitlesurl)
- self._debug_print('%s: Subtitle url: %s' % (video_id, url))
- captions = self._download_xml(
- url, video_id, 'Downloading subtitles')
- lang = captions.get('lang', 'no')
- return {lang: [
- {'ext': 'ttml', 'url': url},
- ]}
-
def _extract_f4m(self, manifest_url, video_id):
return self._extract_f4m_formats(
manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id, f4m_id='hds')
@@ -218,7 +205,7 @@ class NRKTVIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
part_id = mobj.group('part_id')
- baseurl = mobj.group('baseurl')
+ base_url = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
@@ -278,11 +265,14 @@ class NRKTVIE(InfoExtractor):
self._sort_formats(formats)
subtitles_url = self._html_search_regex(
- r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
- webpage, 'subtitle URL', default=None)
- subtitles = None
+ r'data-subtitlesurl\s*=\s*(["\'])(?P<url>.+?)\1',
+ webpage, 'subtitle URL', default=None, group='url')
+ subtitles = {}
if subtitles_url:
- subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl)
+ subtitles['no'] = [{
+ 'ext': 'ttml',
+ 'url': compat_urlparse.urljoin(base_url, subtitles_url),
+ }]
return {
'id': video_id,
diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py
index 215ffe87b..ccc88cfb1 100644
--- a/youtube_dl/extractor/odnoklassniki.py
+++ b/youtube_dl/extractor/odnoklassniki.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
+ ExtractorError,
unified_strdate,
int_or_none,
qualities,
@@ -12,20 +13,23 @@ from ..utils import (
class OdnoklassnikiIE(InfoExtractor):
- _VALID_URL = r'https?://(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
- 'md5': '8e24ad2da6f387948e7a7d44eb8668fe',
+ 'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc',
'info_dict': {
'id': '20079905452',
'ext': 'mp4',
'title': 'Культура меняет нас (прекрасный ролик!))',
'duration': 100,
+ 'upload_date': '20141207',
'uploader_id': '330537914540',
'uploader': 'Виталий Добровольский',
'like_count': int,
+ 'age_limit': 0,
},
+ 'skip': 'Video has been blocked',
}, {
# metadataUrl
'url': 'http://ok.ru/video/63567059965189-0',
@@ -35,13 +39,33 @@ class OdnoklassnikiIE(InfoExtractor):
'ext': 'mp4',
'title': 'Девушка без комплексов ...',
'duration': 191,
+ 'upload_date': '20150518',
'uploader_id': '534380003155',
- 'uploader': 'Андрей Мещанинов',
+ 'uploader': '☭ Андрей Мещанинов ☭',
'like_count': int,
+ 'age_limit': 0,
+ },
+ }, {
+ # YouTube embed (metadataUrl, provider == USER_YOUTUBE)
+ 'url': 'http://ok.ru/video/64211978996595-1',
+ 'md5': '5d7475d428845cd2e13bae6f1a992278',
+ 'info_dict': {
+ 'id': '64211978996595-1',
+ 'ext': 'mp4',
+ 'title': 'Космическая среда от 26 августа 2015',
+ 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0',
+ 'duration': 440,
+ 'upload_date': '20150826',
+ 'uploader_id': '750099571',
+ 'uploader': 'Алина П',
+ 'age_limit': 0,
},
}, {
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
'only_matching': True,
+ }, {
+ 'url': 'http://www.ok.ru/video/20648036891',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -50,9 +74,16 @@ class OdnoklassnikiIE(InfoExtractor):
webpage = self._download_webpage(
'http://ok.ru/video/%s' % video_id, video_id)
+ error = self._search_regex(
+ r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<',
+ webpage, 'error', default=None)
+ if error:
+ raise ExtractorError(error, expected=True)
+
player = self._parse_json(
unescapeHTML(self._search_regex(
- r'data-attributes="([^"]+)"', webpage, 'player')),
+ r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id,
+ webpage, 'player', group='player')),
video_id)
flashvars = player['flashvars']
@@ -85,16 +116,7 @@ class OdnoklassnikiIE(InfoExtractor):
like_count = int_or_none(metadata.get('likeCount'))
- quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
-
- formats = [{
- 'url': f['url'],
- 'ext': 'mp4',
- 'format_id': f['name'],
- 'quality': quality(f['name']),
- } for f in metadata['videos']]
-
- return {
+ info = {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
@@ -104,5 +126,24 @@ class OdnoklassnikiIE(InfoExtractor):
'uploader_id': uploader_id,
'like_count': like_count,
'age_limit': age_limit,
- 'formats': formats,
}
+
+ if metadata.get('provider') == 'USER_YOUTUBE':
+ info.update({
+ '_type': 'url_transparent',
+ 'url': movie['contentId'],
+ })
+ return info
+
+ quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
+
+ formats = [{
+ 'url': f['url'],
+ 'ext': 'mp4',
+ 'format_id': f['name'],
+ 'quality': quality(f['name']),
+ } for f in metadata['videos']]
+ self._sort_formats(formats)
+
+ info['formats'] = formats
+ return info
diff --git a/youtube_dl/extractor/openfilm.py b/youtube_dl/extractor/openfilm.py
deleted file mode 100644
index d2ceedd01..000000000
--- a/youtube_dl/extractor/openfilm.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import unicode_literals
-
-import json
-
-from .common import InfoExtractor
-from ..compat import compat_urllib_parse_unquote_plus
-from ..utils import (
- parse_iso8601,
- parse_age_limit,
- int_or_none,
-)
-
-
-class OpenFilmIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)openfilm\.com/videos/(?P<id>.+)'
- _TEST = {
- 'url': 'http://www.openfilm.com/videos/human-resources-remastered',
- 'md5': '42bcd88c2f3ec13b65edf0f8ad1cac37',
- 'info_dict': {
- 'id': '32736',
- 'display_id': 'human-resources-remastered',
- 'ext': 'mp4',
- 'title': 'Human Resources (Remastered)',
- 'description': 'Social Engineering in the 20th Century.',
- 'thumbnail': 're:^https?://.*\.jpg$',
- 'duration': 7164,
- 'timestamp': 1334756988,
- 'upload_date': '20120418',
- 'uploader_id': '41117',
- 'view_count': int,
- 'age_limit': 0,
- },
- }
-
- def _real_extract(self, url):
- display_id = self._match_id(url)
-
- webpage = self._download_webpage(url, display_id)
-
- player = compat_urllib_parse_unquote_plus(
- self._og_search_video_url(webpage))
-
- video = json.loads(self._search_regex(
- r'\bp=({.+?})(?:&|$)', player, 'video JSON'))
-
- video_url = '%s1.mp4' % video['location']
- video_id = video.get('video_id')
- display_id = video.get('alias') or display_id
- title = video.get('title')
- description = video.get('description')
- thumbnail = video.get('main_thumb')
- duration = int_or_none(video.get('duration'))
- timestamp = parse_iso8601(video.get('dt_published'), ' ')
- uploader_id = video.get('user_id')
- view_count = int_or_none(video.get('views_count'))
- age_limit = parse_age_limit(video.get('age_limit'))
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'url': video_url,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'timestamp': timestamp,
- 'uploader_id': uploader_id,
- 'view_count': view_count,
- 'age_limit': age_limit,
- }
diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py
index fec5d65ad..3448736a2 100644
--- a/youtube_dl/extractor/pbs.py
+++ b/youtube_dl/extractor/pbs.py
@@ -32,7 +32,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2365006249',
'ext': 'mp4',
- 'title': 'A More Perfect Union',
+ 'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
'duration': 3190,
},
@@ -46,7 +46,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2365297690',
'ext': 'mp4',
- 'title': 'Losing Iraq',
+ 'title': 'FRONTLINE - Losing Iraq',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 5050,
},
@@ -60,7 +60,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2201174722',
'ext': 'mp4',
- 'title': 'Cyber Schools Gain Popularity, but Quality Questions Persist',
+ 'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
'duration': 801,
},
@@ -72,7 +72,7 @@ class PBSIE(InfoExtractor):
'id': '2365297708',
'ext': 'mp4',
'description': 'md5:68d87ef760660eb564455eb30ca464fe',
- 'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
+ 'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
},
@@ -88,10 +88,11 @@ class PBSIE(InfoExtractor):
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
- 'title': 'Killer Typhoon',
+ 'title': 'NOVA - Killer Typhoon',
'duration': 3172,
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140122',
+ 'age_limit': 10,
},
'params': {
'skip_download': True, # requires ffmpeg
@@ -107,12 +108,46 @@ class PBSIE(InfoExtractor):
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/',
'info_dict': {
- 'id': '2280706814',
+ 'id': '2276541483',
'display_id': 'player',
'ext': 'mp4',
- 'title': 'Death and the Civil War',
+ 'title': 'American Experience - Death and the Civil War, Chapter 1',
'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
- 'duration': 6705,
+ 'duration': 682,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
+ },
+ {
+ 'url': 'http://video.pbs.org/video/2365367186/',
+ 'info_dict': {
+ 'id': '2365367186',
+ 'display_id': '2365367186',
+ 'ext': 'mp4',
+ 'title': 'To Catch A Comet - Full Episode',
+ 'description': 'On November 12, 2014, billions of kilometers from Earth, spacecraft orbiter Rosetta and lander Philae did what no other had dared to attempt \u2014 land on the volatile surface of a comet as it zooms around the sun at 67,000 km/hr. The European Space Agency hopes this mission can help peer into our past and unlock secrets of our origins.',
+ 'duration': 3342,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
+ 'skip': 'Expired',
+ },
+ {
+ # Video embedded in iframe containing angle brackets as attribute's value (e.g.
+ # "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see
+ # https://github.com/rg3/youtube-dl/issues/7059)
+ 'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/',
+ 'info_dict': {
+ 'id': '2365546844',
+ 'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
+ 'ext': 'mp4',
+ 'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
+ 'description': 'md5:61db2ddf27c9912f09c241014b118ed1',
+ 'duration': 1480,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
@@ -120,6 +155,12 @@ class PBSIE(InfoExtractor):
},
}
]
+ _ERRORS = {
+ 101: 'We\'re sorry, but this video is not yet available.',
+ 403: 'We\'re sorry, but this video is not available in your region due to right restrictions.',
+ 404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.',
+ 410: 'This video has expired and is no longer available for online streaming.',
+ }
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -151,7 +192,7 @@ class PBSIE(InfoExtractor):
return media_id, presumptive_id, upload_date
url = self._search_regex(
- r'<iframe\s+[^>]*\s+src=["\']([^\'"]+partnerplayer[^\'"]+)["\']',
+ r'(?s)<iframe[^>]+?(?:[a-z-]+?=["\'].*?["\'][^>]+?)*?\bsrc=["\']([^\'"]+partnerplayer[^\'"]+)["\']',
webpage, 'player URL')
mobj = re.match(self._VALID_URL, url)
@@ -197,13 +238,11 @@ class PBSIE(InfoExtractor):
'Downloading %s video url info' % encoding_name)
if redirect_info['status'] == 'error':
- if redirect_info['http_code'] == 403:
- message = (
- 'The video is not available in your region due to '
- 'right restrictions')
- else:
- message = redirect_info['message']
- raise ExtractorError(message, expected=True)
+ raise ExtractorError(
+ '%s said: %s' % (
+ self.IE_NAME,
+ self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])),
+ expected=True)
format_url = redirect_info.get('url')
if not format_url:
@@ -232,6 +271,12 @@ class PBSIE(InfoExtractor):
'url': closed_captions_url,
}]
+ # info['title'] is often incomplete (e.g. 'Full Episode', 'Episode 5', etc)
+ # Try turning it to 'program - title' naming scheme if possible
+ alt_title = info.get('program', {}).get('title')
+ if alt_title:
+ info['title'] = alt_title + ' - ' + re.sub(r'^' + alt_title + '[\s\-:]+', '', info['title'])
+
return {
'id': video_id,
'display_id': display_id,
diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py
new file mode 100644
index 000000000..8ad936758
--- /dev/null
+++ b/youtube_dl/extractor/periscope.py
@@ -0,0 +1,99 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+from ..utils import parse_iso8601
+
+
+class PeriscopeIE(InfoExtractor):
+ IE_DESC = 'Periscope'
+ _VALID_URL = r'https?://(?:www\.)?periscope\.tv/w/(?P<id>[^/?#]+)'
+ _TEST = {
+ 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',
+ 'md5': '65b57957972e503fcbbaeed8f4fa04ca',
+ 'info_dict': {
+ 'id': '56102209',
+ 'ext': 'mp4',
+ 'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗',
+ 'timestamp': 1438978559,
+ 'upload_date': '20150807',
+ 'uploader': 'Bec Boop',
+ 'uploader_id': '1465763',
+ },
+ 'skip': 'Expires in 24 hours',
+ }
+
+ def _call_api(self, method, token):
+ return self._download_json(
+ 'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token)
+
+ def _real_extract(self, url):
+ token = self._match_id(url)
+
+ broadcast_data = self._call_api('getBroadcastPublic', token)
+ broadcast = broadcast_data['broadcast']
+ status = broadcast['status']
+
+ uploader = broadcast.get('user_display_name') or broadcast_data.get('user', {}).get('display_name')
+ uploader_id = broadcast.get('user_id') or broadcast_data.get('user', {}).get('id')
+
+ title = '%s - %s' % (uploader, status) if uploader else status
+ state = broadcast.get('state').lower()
+ if state == 'running':
+ title = self._live_title(title)
+ timestamp = parse_iso8601(broadcast.get('created_at'))
+
+ thumbnails = [{
+ 'url': broadcast[image],
+ } for image in ('image_url', 'image_url_small') if broadcast.get(image)]
+
+ stream = self._call_api('getAccessPublic', token)
+
+ formats = []
+ for format_id in ('replay', 'rtmp', 'hls', 'https_hls'):
+ video_url = stream.get(format_id + '_url')
+ if not video_url:
+ continue
+ f = {
+ 'url': video_url,
+ 'ext': 'flv' if format_id == 'rtmp' else 'mp4',
+ }
+ if format_id != 'rtmp':
+ f['protocol'] = 'm3u8_native' if state == 'ended' else 'm3u8'
+ formats.append(f)
+ self._sort_formats(formats)
+
+ return {
+ 'id': broadcast.get('id') or token,
+ 'title': title,
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
+
+
+class QuickscopeIE(InfoExtractor):
+ IE_DESC = 'Quick Scope'
+ _VALID_URL = r'https?://watchonperiscope\.com/broadcast/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://watchonperiscope.com/broadcast/56180087',
+ 'only_matching': True,
+ }
+
+ def _real_extract(self, url):
+ broadcast_id = self._match_id(url)
+ request = compat_urllib_request.Request(
+ 'https://watchonperiscope.com/api/accessChannel', compat_urllib_parse.urlencode({
+ 'broadcast_id': broadcast_id,
+ 'entry_ticket': '',
+ 'from_push': 'false',
+ 'uses_sessions': 'true',
+ }).encode('utf-8'))
+ return self.url_result(
+ self._download_json(request, broadcast_id)['share_url'], 'Periscope')
diff --git a/youtube_dl/extractor/playtvak.py b/youtube_dl/extractor/playtvak.py
new file mode 100644
index 000000000..e360404f7
--- /dev/null
+++ b/youtube_dl/extractor/playtvak.py
@@ -0,0 +1,181 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urlparse,
+ compat_urllib_parse,
+)
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+ qualities,
+)
+
+
+class PlaytvakIE(InfoExtractor):
+ IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz'
+ _VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)'
+ _TESTS = [{
+ 'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko',
+ 'md5': '4525ae312c324b4be2f4603cc78ceb4a',
+ 'info_dict': {
+ 'id': 'A150730_150323_hodinovy-manzel_kuko',
+ 'ext': 'mp4',
+ 'title': 'Vyžeňte vosy a sršně ze zahrady',
+ 'description': 'md5:f93d398691044d303bc4a3de62f3e976',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'duration': 279,
+ 'timestamp': 1438732860,
+ 'upload_date': '20150805',
+ 'is_live': False,
+ }
+ }, { # live video test
+ 'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat',
+ 'info_dict': {
+ 'id': 'A150624_164934_planespotting_cat',
+ 'ext': 'flv',
+ 'title': 're:^Přímý přenos iDNES.cz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True, # requires rtmpdump
+ },
+ }, { # idnes.cz
+ 'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku',
+ 'md5': '819832ba33cd7016e58a6658577fe289',
+ 'info_dict': {
+ 'id': 'A150809_104116_domaci_pku',
+ 'ext': 'mp4',
+ 'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se',
+ 'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'duration': 39,
+ 'timestamp': 1438969140,
+ 'upload_date': '20150807',
+ 'is_live': False,
+ }
+ }, { # lidovky.cz
+ 'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE',
+ 'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8',
+ 'info_dict': {
+ 'id': 'A150808_214044_ln-video_ELE',
+ 'ext': 'mp4',
+ 'title': 'Táhni! Demonstrace proti imigrantům budila emoce',
+ 'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'timestamp': 1439052180,
+ 'upload_date': '20150808',
+ 'is_live': False,
+ }
+ }, { # metro.cz
+ 'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row',
+ 'md5': '84fc1deedcac37b7d4a6ccae7c716668',
+ 'info_dict': {
+ 'id': 'A141111_173251_metro-extra_row',
+ 'ext': 'mp4',
+ 'title': 'Recesisté udělali z billboardu kolotoč',
+ 'description': 'md5:7369926049588c3989a66c9c1a043c4c',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'timestamp': 1415725500,
+ 'upload_date': '20141111',
+ 'is_live': False,
+ }
+ }, {
+ 'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ info_url = self._html_search_regex(
+ r'Misc\.videoFLV\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url')
+
+ parsed_url = compat_urlparse.urlparse(info_url)
+
+ qs = compat_urlparse.parse_qs(parsed_url.query)
+ qs.update({
+ 'reklama': ['0'],
+ 'type': ['js'],
+ })
+
+ info_url = compat_urlparse.urlunparse(
+ parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+
+ json_info = self._download_json(
+ info_url, video_id,
+ transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
+
+ item = None
+ for i in json_info['items']:
+ if i.get('type') == 'video' or i.get('type') == 'stream':
+ item = i
+ break
+ if not item:
+ raise ExtractorError('No suitable stream found')
+
+ quality = qualities(('low', 'middle', 'high'))
+
+ formats = []
+ for fmt in item['video']:
+ video_url = fmt.get('file')
+ if not video_url:
+ continue
+
+ format_ = fmt['format']
+ format_id = '%s_%s' % (format_, fmt['quality'])
+ preference = None
+
+ if format_ in ('mp4', 'webm'):
+ ext = format_
+ elif format_ == 'rtmp':
+ ext = 'flv'
+ elif format_ == 'apple':
+ ext = 'mp4'
+ # Some streams have mp3 audio which does not play
+ # well with ffmpeg filter aac_adtstoasc
+ preference = -1
+ elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests
+ continue
+ else: # Other formats not supported yet
+ continue
+
+ formats.append({
+ 'url': video_url,
+ 'ext': ext,
+ 'format_id': format_id,
+ 'quality': quality(fmt.get('quality')),
+ 'preference': preference,
+ })
+ self._sort_formats(formats)
+
+ title = item['title']
+ is_live = item['type'] == 'stream'
+ if is_live:
+ title = self._live_title(title)
+ description = self._og_search_description(webpage, default=None) or self._html_search_meta(
+ 'description', webpage, 'description')
+ timestamp = None
+ duration = None
+ if not is_live:
+ duration = int_or_none(item.get('length'))
+ timestamp = item.get('published')
+ if timestamp:
+ timestamp = parse_iso8601(timestamp[:-5])
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': item.get('image'),
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'is_live': is_live,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/playwire.py b/youtube_dl/extractor/playwire.py
index bdc71017b..6d138ef25 100644
--- a/youtube_dl/extractor/playwire.py
+++ b/youtube_dl/extractor/playwire.py
@@ -19,7 +19,7 @@ class PlaywireIE(InfoExtractor):
'id': '3353705',
'ext': 'mp4',
'title': 'S04_RM_UCL_Rus',
- 'thumbnail': 're:^http://.*\.png$',
+ 'thumbnail': 're:^https?://.*\.png$',
'duration': 145.94,
},
}, {
diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
new file mode 100644
index 000000000..fd32836cc
--- /dev/null
+++ b/youtube_dl/extractor/pluralsight.py
@@ -0,0 +1,207 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_request,
+ compat_urlparse,
+)
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_duration,
+)
+
+
+class PluralsightIE(InfoExtractor):
+ IE_NAME = 'pluralsight'
+ _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/training/player\?author=(?P<author>[^&]+)&name=(?P<name>[^&]+)(?:&mode=live)?&clip=(?P<clip>\d+)&course=(?P<course>[^&]+)'
+ _LOGIN_URL = 'https://www.pluralsight.com/id/'
+ _NETRC_MACHINE = 'pluralsight'
+
+ _TEST = {
+ 'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas',
+ 'md5': '4d458cf5cf4c593788672419a8dd4cf8',
+ 'info_dict': {
+ 'id': 'hosting-sql-server-windows-azure-iaas-m7-mgmt-04',
+ 'ext': 'mp4',
+ 'title': 'Management of SQL Server - Demo Monitoring',
+ 'duration': 338,
+ },
+ 'skip': 'Requires pluralsight account credentials',
+ }
+
+ def _real_initialize(self):
+ self._login()
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ self.raise_login_required('Pluralsight account is required')
+
+ login_page = self._download_webpage(
+ self._LOGIN_URL, None, 'Downloading login page')
+
+ login_form = self._hidden_inputs(login_page)
+
+ login_form.update({
+ 'Username': username.encode('utf-8'),
+ 'Password': password.encode('utf-8'),
+ })
+
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
+ 'post url', default=self._LOGIN_URL, group='url')
+
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
+
+ request = compat_urllib_request.Request(
+ post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+
+ response = self._download_webpage(
+ request, None, 'Logging in as %s' % username)
+
+ error = self._search_regex(
+ r'<span[^>]+class="field-validation-error"[^>]*>([^<]+)</span>',
+ response, 'error message', default=None)
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ author = mobj.group('author')
+ name = mobj.group('name')
+ clip_id = mobj.group('clip')
+ course = mobj.group('course')
+
+ display_id = '%s-%s' % (name, clip_id)
+
+ webpage = self._download_webpage(url, display_id)
+
+ collection = self._parse_json(
+ self._search_regex(
+ r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)',
+ webpage, 'modules'),
+ display_id)
+
+ module, clip = None, None
+
+ for module_ in collection:
+ if module_.get('moduleName') == name:
+ module = module_
+ for clip_ in module_.get('clips', []):
+ clip_index = clip_.get('clipIndex')
+ if clip_index is None:
+ continue
+ if compat_str(clip_index) == clip_id:
+ clip = clip_
+ break
+
+ if not clip:
+ raise ExtractorError('Unable to resolve clip')
+
+ QUALITIES = {
+ 'low': {'width': 640, 'height': 480},
+ 'medium': {'width': 848, 'height': 640},
+ 'high': {'width': 1024, 'height': 768},
+ }
+
+ ALLOWED_QUALITIES = (
+ ('webm', ('high',)),
+ ('mp4', ('low', 'medium', 'high',)),
+ )
+
+ formats = []
+ for ext, qualities in ALLOWED_QUALITIES:
+ for quality in qualities:
+ f = QUALITIES[quality].copy()
+ clip_post = {
+ 'a': author,
+ 'cap': 'false',
+ 'cn': clip_id,
+ 'course': course,
+ 'lc': 'en',
+ 'm': name,
+ 'mt': ext,
+ 'q': '%dx%d' % (f['width'], f['height']),
+ }
+ request = compat_urllib_request.Request(
+ 'http://www.pluralsight.com/training/Player/ViewClip',
+ json.dumps(clip_post).encode('utf-8'))
+ request.add_header('Content-Type', 'application/json;charset=utf-8')
+ format_id = '%s-%s' % (ext, quality)
+ clip_url = self._download_webpage(
+ request, display_id, 'Downloading %s URL' % format_id, fatal=False)
+ if not clip_url:
+ continue
+ f.update({
+ 'url': clip_url,
+ 'ext': ext,
+ 'format_id': format_id,
+ })
+ formats.append(f)
+ self._sort_formats(formats)
+
+ # TODO: captions
+ # http://www.pluralsight.com/training/Player/ViewClip + cap = true
+ # or
+ # http://www.pluralsight.com/training/Player/Captions
+ # { a = author, cn = clip_id, lc = end, m = name }
+
+ return {
+ 'id': clip['clipName'],
+ 'title': '%s - %s' % (module['title'], clip['title']),
+ 'duration': int_or_none(clip.get('duration')) or parse_duration(clip.get('formattedDuration')),
+ 'creator': author,
+ 'formats': formats
+ }
+
+
+class PluralsightCourseIE(InfoExtractor):
+ IE_NAME = 'pluralsight:course'
+ _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/courses/(?P<id>[^/]+)'
+ _TEST = {
+ # Free course from Pluralsight Starter Subscription for Microsoft TechNet
+ # https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz
+ 'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas',
+ 'info_dict': {
+ 'id': 'hosting-sql-server-windows-azure-iaas',
+ 'title': 'Hosting SQL Server in Microsoft Azure IaaS Fundamentals',
+ 'description': 'md5:61b37e60f21c4b2f91dc621a977d0986',
+ },
+ 'playlist_count': 31,
+ }
+
+ def _real_extract(self, url):
+ course_id = self._match_id(url)
+
+ # TODO: PSM cookie
+
+ course = self._download_json(
+ 'http://www.pluralsight.com/data/course/%s' % course_id,
+ course_id, 'Downloading course JSON')
+
+ title = course['title']
+ description = course.get('description') or course.get('shortDescription')
+
+ course_data = self._download_json(
+ 'http://www.pluralsight.com/data/course/content/%s' % course_id,
+ course_id, 'Downloading course data JSON')
+
+ entries = []
+ for module in course_data:
+ for clip in module.get('clips', []):
+ player_parameters = clip.get('playerParameters')
+ if not player_parameters:
+ continue
+ entries.append(self.url_result(
+ 'http://www.pluralsight.com/training/player?%s' % player_parameters,
+ 'Pluralsight'))
+
+ return self.playlist_result(entries, course_id, title, description)
diff --git a/youtube_dl/extractor/porn91.py b/youtube_dl/extractor/porn91.py
index 72d1b2718..3e15533e9 100644
--- a/youtube_dl/extractor/porn91.py
+++ b/youtube_dl/extractor/porn91.py
@@ -22,6 +22,7 @@ class Porn91IE(InfoExtractor):
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
+ 'age_limit': 18,
}
}
@@ -68,4 +69,5 @@ class Porn91IE(InfoExtractor):
'url': video_url,
'duration': duration,
'comment_count': comment_count,
+ 'age_limit': self._rta_search(webpage),
}
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 0b7886840..a656ad85a 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -20,7 +20,7 @@ from ..aes import (
class PornHubIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
+ _VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '882f488fa1f0026f023f33576004a2ed',
@@ -34,6 +34,9 @@ class PornHubIE(InfoExtractor):
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
+ }, {
+ 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
+ 'only_matching': True,
}]
@classmethod
@@ -81,7 +84,7 @@ class PornHubIE(InfoExtractor):
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
- video_urls = list(map(compat_urllib_parse_unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+ video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage)))
if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse_unquote_plus(
self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
@@ -94,7 +97,7 @@ class PornHubIE(InfoExtractor):
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
- m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
+ m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
if m is None:
height = None
tbr = None
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index fec008ce7..effcf1db3 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -9,7 +9,9 @@ from ..compat import (
compat_urllib_parse,
)
from ..utils import (
+ ExtractorError,
determine_ext,
+ float_or_none,
int_or_none,
unified_strdate,
)
@@ -224,10 +226,13 @@ class ProSiebenSat1IE(InfoExtractor):
'ids': clip_id,
})
- videos = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')
+ video = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')[0]
- duration = float(videos[0]['duration'])
- source_ids = [source['id'] for source in videos[0]['sources']]
+ if video.get('is_protected') is True:
+ raise ExtractorError('This video is DRM protected.', expected=True)
+
+ duration = float_or_none(video.get('duration'))
+ source_ids = [source['id'] for source in video['sources']]
source_ids_str = ','.join(map(str, source_ids))
g = '01!8d8F_)r9]4s[qeuXfP%'
diff --git a/youtube_dl/extractor/qqmusic.py b/youtube_dl/extractor/qqmusic.py
index 1654a641f..c98539f6a 100644
--- a/youtube_dl/extractor/qqmusic.py
+++ b/youtube_dl/extractor/qqmusic.py
@@ -25,7 +25,7 @@ class QQMusicIE(InfoExtractor):
'id': '004295Et37taLD',
'ext': 'mp3',
'title': '可惜没如果',
- 'upload_date': '20141227',
+ 'release_date': '20141227',
'creator': '林俊杰',
'description': 'md5:d327722d0361576fde558f1ac68a7065',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -38,11 +38,26 @@ class QQMusicIE(InfoExtractor):
'id': '004MsGEo3DdNxV',
'ext': 'mp3',
'title': '如果',
- 'upload_date': '20050626',
+ 'release_date': '20050626',
'creator': '李季美',
'description': 'md5:46857d5ed62bc4ba84607a805dccf437',
'thumbnail': 're:^https?://.*\.jpg$',
}
+ }, {
+ 'note': 'lyrics not in .lrc format',
+ 'url': 'http://y.qq.com/#type=song&mid=001JyApY11tIp6',
+ 'info_dict': {
+ 'id': '001JyApY11tIp6',
+ 'ext': 'mp3',
+ 'title': 'Shadows Over Transylvania',
+ 'release_date': '19970225',
+ 'creator': 'Dark Funeral',
+ 'description': 'md5:ed14d5bd7ecec19609108052c25b2c11',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
_FORMATS = {
@@ -112,15 +127,27 @@ class QQMusicIE(InfoExtractor):
self._check_formats(formats, mid)
self._sort_formats(formats)
- return {
+ actual_lrc_lyrics = ''.join(
+ line + '\n' for line in re.findall(
+ r'(?m)^(\[[0-9]{2}:[0-9]{2}(?:\.[0-9]{2,})?\][^\n]*|\[[^\]]*\])', lrc_content))
+
+ info_dict = {
'id': mid,
'formats': formats,
'title': song_name,
- 'upload_date': publish_time,
+ 'release_date': publish_time,
'creator': singer,
'description': lrc_content,
- 'thumbnail': thumbnail_url,
+ 'thumbnail': thumbnail_url
}
+ if actual_lrc_lyrics:
+ info_dict['subtitles'] = {
+ 'origin': [{
+ 'ext': 'lrc',
+ 'data': actual_lrc_lyrics,
+ }]
+ }
+ return info_dict
class QQPlaylistBaseIE(InfoExtractor):
diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index 1631faf29..7ff1d06c4 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
+ compat_urlparse,
)
from ..utils import (
parse_duration,
@@ -72,6 +73,18 @@ class RaiIE(InfoExtractor):
'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!',
'uploader': 'RaiTre',
}
+ },
+ {
+ 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
+ 'md5': '037104d2c14132887e5e4cf114569214',
+ 'info_dict': {
+ 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e',
+ 'ext': 'flv',
+ 'title': 'Il pacco',
+ 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
+ 'uploader': 'RaiTre',
+ 'upload_date': '20141221',
+ },
}
]
@@ -90,11 +103,14 @@ class RaiIE(InfoExtractor):
relinker_url = self._extract_relinker_url(webpage)
if not relinker_url:
- iframe_path = self._search_regex(
- r'<iframe[^>]+src="/?(dl/[^"]+\?iframe\b[^"]*)"',
+ iframe_url = self._search_regex(
+ [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"',
+ r'drawMediaRaiTV\(["\'](.+?)["\']'],
webpage, 'iframe')
+ if not iframe_url.startswith('http'):
+ iframe_url = compat_urlparse.urljoin(url, iframe_url)
webpage = self._download_webpage(
- '%s/%s' % (host, iframe_path), video_id)
+ iframe_url, video_id)
relinker_url = self._extract_relinker_url(webpage)
relinker = self._download_json(
diff --git a/youtube_dl/extractor/rtbf.py b/youtube_dl/extractor/rtbf.py
index e4215d546..04a66df90 100644
--- a/youtube_dl/extractor/rtbf.py
+++ b/youtube_dl/extractor/rtbf.py
@@ -36,7 +36,7 @@ class RTBFIE(InfoExtractor):
data = self._parse_json(
unescapeHTML(self._search_regex(
- r'data-video="([^"]+)"', webpage, 'data video')),
+ r'data-media="([^"]+)"', webpage, 'data video')),
video_id)
if data.get('provider').lower() == 'youtube':
diff --git a/youtube_dl/extractor/rte.py b/youtube_dl/extractor/rte.py
index 04158b993..d9cfbf180 100644
--- a/youtube_dl/extractor/rte.py
+++ b/youtube_dl/extractor/rte.py
@@ -9,16 +9,16 @@ from ..utils import (
class RteIE(InfoExtractor):
- _VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/'
+ _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
- 'url': 'http://www.rte.ie/player/de/show/10363114/',
+ 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'info_dict': {
- 'id': '10363114',
+ 'id': '10478715',
'ext': 'mp4',
- 'title': 'One News',
+ 'title': 'Watch iWitness online',
'thumbnail': 're:^https?://.*\.jpg$',
- 'description': 'The One O\'Clock News followed by Weather.',
- 'duration': 436.844,
+ 'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.',
+ 'duration': 60.046,
},
'params': {
'skip_download': 'f4m fails with --test atm'
diff --git a/youtube_dl/extractor/rtl2.py b/youtube_dl/extractor/rtl2.py
index 72cd80498..25f7faf76 100644
--- a/youtube_dl/extractor/rtl2.py
+++ b/youtube_dl/extractor/rtl2.py
@@ -1,6 +1,7 @@
# encoding: utf-8
from __future__ import unicode_literals
+import re
from .common import InfoExtractor
@@ -8,22 +9,28 @@ class RTL2IE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?rtl2\.de/[^?#]*?/(?P<id>[^?#/]*?)(?:$|/(?:$|[?#]))'
_TESTS = [{
'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0',
- 'md5': 'bfcc179030535b08dc2b36b469b5adc7',
'info_dict': {
'id': 'folge-203-0',
'ext': 'f4v',
'title': 'GRIP sucht den Sommerkönig',
'description': 'Matthias, Det und Helge treten gegeneinander an.'
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/',
- 'md5': 'ffcd517d2805b57ce11a58a2980c2b02',
'info_dict': {
'id': '21040-anna-erwischt-alex',
'ext': 'mp4',
'title': 'Anna erwischt Alex!',
'description': 'Anna ist Alex\' Tochter bei Köln 50667.'
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
@@ -34,12 +41,18 @@ class RTL2IE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- vico_id = self._html_search_regex(
- r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id')
- vivi_id = self._html_search_regex(
- r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id')
+ mobj = re.search(
+ r'<div[^>]+data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"',
+ webpage)
+ if mobj:
+ vico_id = mobj.group('vico_id')
+ vivi_id = mobj.group('vivi_id')
+ else:
+ vico_id = self._html_search_regex(
+ r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id')
+ vivi_id = self._html_search_regex(
+ r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id')
info_url = 'http://www.rtl2.de/video/php/get_video.php?vico_id=' + vico_id + '&vivi_id=' + vivi_id
- webpage = self._download_webpage(info_url, '')
info = self._download_json(info_url, video_id)
video_info = info['video']
diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py
index e0c530d64..543d94417 100644
--- a/youtube_dl/extractor/rtlnl.py
+++ b/youtube_dl/extractor/rtlnl.py
@@ -82,16 +82,21 @@ class RtlNlIE(InfoExtractor):
meta = info.get('meta', {})
- # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118)
- # NB: nowadays, recent ffmpeg and avconv can handle these encrypted streams, so
- # this adaptive -> flash workaround is not required in general, but it also
- # allows bypassing georestriction therefore is retained for now.
- videopath = material['videopath'].replace('/adaptive/', '/flash/')
+ # m3u8 streams are encrypted and may not be handled properly by older ffmpeg/avconv.
+ # To workaround this previously adaptive -> flash trick was used to obtain
+ # unencrypted m3u8 streams (see https://github.com/rg3/youtube-dl/issues/4118)
+ # and bypass georestrictions as well.
+ # Currently, unencrypted m3u8 playlists are (intentionally?) invalid and therefore
+ # unusable albeit can be fixed by simple string replacement (see
+ # https://github.com/rg3/youtube-dl/pull/6337)
+ # Since recent ffmpeg and avconv handle encrypted streams just fine encrypted
+ # streams are used now.
+ videopath = material['videopath']
m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
- video_urlpart = videopath.split('/flash/')[1][:-5]
+ video_urlpart = videopath.split('/adaptive/')[1][:-5]
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
formats.extend([
diff --git a/youtube_dl/extractor/rtp.py b/youtube_dl/extractor/rtp.py
index ecf4939cd..82b323cdd 100644
--- a/youtube_dl/extractor/rtp.py
+++ b/youtube_dl/extractor/rtp.py
@@ -18,6 +18,10 @@ class RTPIE(InfoExtractor):
'description': 'As paixões musicais de António Cartaxo e António Macedo',
'thumbnail': 're:^https?://.*\.jpg',
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas',
'only_matching': True,
diff --git a/youtube_dl/extractor/rts.py b/youtube_dl/extractor/rts.py
index 9fbe239d8..12639f08b 100644
--- a/youtube_dl/extractor/rts.py
+++ b/youtube_dl/extractor/rts.py
@@ -19,7 +19,16 @@ from ..utils import (
class RTSIE(InfoExtractor):
IE_DESC = 'RTS.ch'
- _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))'
+ _VALID_URL = r'''(?x)
+ (?:
+ rts:(?P<rts_id>\d+)|
+ https?://
+ (?:www\.)?rts\.ch/
+ (?:
+ (?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|
+ play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+)
+ )
+ )'''
_TESTS = [
{
@@ -123,6 +132,15 @@ class RTSIE(InfoExtractor):
},
},
{
+ # article with videos on rhs
+ 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
+ 'info_dict': {
+ 'id': '6693917',
+ 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
+ },
+ 'playlist_mincount': 5,
+ },
+ {
'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
'only_matching': True,
}
@@ -130,7 +148,7 @@ class RTSIE(InfoExtractor):
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- video_id = m.group('id') or m.group('id_new')
+ video_id = m.group('rts_id') or m.group('id') or m.group('id_new')
display_id = m.group('display_id') or m.group('display_id_new')
def download_json(internal_id):
@@ -143,6 +161,15 @@ class RTSIE(InfoExtractor):
# video_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id)
+
+ # article with videos on rhs
+ videos = re.findall(
+ r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"',
+ page)
+ if videos:
+ entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos]
+ return self.playlist_result(entries, video_id, self._og_search_title(page))
+
internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id')
diff --git a/youtube_dl/extractor/rtve.py b/youtube_dl/extractor/rtve.py
index 82cd98ac7..5b97d33ca 100644
--- a/youtube_dl/extractor/rtve.py
+++ b/youtube_dl/extractor/rtve.py
@@ -6,7 +6,7 @@ import re
import time
from .common import InfoExtractor
-from ..compat import compat_urlparse
+from ..compat import compat_urllib_request, compat_urlparse
from ..utils import (
ExtractorError,
float_or_none,
@@ -102,7 +102,9 @@ class RTVEALaCartaIE(InfoExtractor):
if info['state'] == 'DESPU':
raise ExtractorError('The video is no longer available', expected=True)
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
- png = self._download_webpage(png_url, video_id, 'Downloading url information')
+ png_request = compat_urllib_request.Request(png_url)
+ png_request.add_header('Referer', url)
+ png = self._download_webpage(png_request, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
if not video_url.endswith('.f4m'):
auth_url = video_url.replace(
diff --git a/youtube_dl/extractor/rtvnh.py b/youtube_dl/extractor/rtvnh.py
new file mode 100644
index 000000000..7c9d4b0cd
--- /dev/null
+++ b/youtube_dl/extractor/rtvnh.py
@@ -0,0 +1,47 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class RTVNHIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.rtvnh.nl/video/131946',
+ 'md5': '6e1d0ab079e2a00b6161442d3ceacfc1',
+ 'info_dict': {
+ 'id': '131946',
+ 'ext': 'mp4',
+ 'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
+ 'thumbnail': 're:^https?:.*\.jpg$'
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ meta = self._parse_json(self._download_webpage(
+ 'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
+
+ status = meta.get('status')
+ if status != 200:
+ raise ExtractorError(
+ '%s returned error code %d' % (self.IE_NAME, status), expected=True)
+
+ formats = self._extract_smil_formats(
+ 'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id, fatal=False)
+
+ for item in meta['source']['fb']:
+ if item.get('type') == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ item['file'], video_id, ext='mp4', entry_protocol='m3u8_native'))
+ elif item.get('type') == '':
+ formats.append({'url': item['file']})
+
+ return {
+ 'id': video_id,
+ 'title': meta['title'].strip(),
+ 'thumbnail': meta.get('image'),
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py
index 5b1c3577a..d94dc7399 100644
--- a/youtube_dl/extractor/rutube.py
+++ b/youtube_dl/extractor/rutube.py
@@ -30,6 +30,7 @@ class RutubeIE(InfoExtractor):
'uploader': 'NTDRussian',
'uploader_id': '29790',
'upload_date': '20131016',
+ 'age_limit': 0,
},
'params': {
# It requires ffmpeg (m3u8 download)
diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py
index 4e22628d0..a16b73ff4 100644
--- a/youtube_dl/extractor/ruutu.py
+++ b/youtube_dl/extractor/ruutu.py
@@ -6,19 +6,19 @@ from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
+ xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?ruutu\.fi/ohjelmat/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
+ _VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
- 'url': 'http://www.ruutu.fi/ohjelmat/oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
+ 'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
- 'display_id': 'oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
@@ -28,14 +28,13 @@ class RuutuIE(InfoExtractor):
},
},
{
- 'url': 'http://www.ruutu.fi/ohjelmat/superpesis/superpesis-katso-koko-kausi-ruudussa',
+ 'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
- 'display_id': 'superpesis-katso-koko-kausi-ruudussa',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
- 'description': 'md5:44c44a99fdbe5b380ab74ebd75f0af77',
+ 'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
@@ -44,29 +43,10 @@ class RuutuIE(InfoExtractor):
]
def _real_extract(self, url):
- display_id = self._match_id(url)
+ video_id = self._match_id(url)
- webpage = self._download_webpage(url, display_id)
-
- video_id = self._search_regex(
- r'data-media-id="(\d+)"', webpage, 'media id')
-
- video_xml_url = None
-
- media_data = self._search_regex(
- r'jQuery\.extend\([^,]+,\s*(.+?)\);', webpage,
- 'media data', default=None)
- if media_data:
- media_json = self._parse_json(media_data, display_id, fatal=False)
- if media_json:
- xml_url = media_json.get('ruutuplayer', {}).get('xmlUrl')
- if xml_url:
- video_xml_url = xml_url.replace('{ID}', video_id)
-
- if not video_xml_url:
- video_xml_url = 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id
-
- video_xml = self._download_xml(video_xml_url, video_id)
+ video_xml = self._download_xml(
+ 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
@@ -94,7 +74,7 @@ class RuutuIE(InfoExtractor):
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
- width, height = [int_or_none(x) for x in child.get('resolution', '').split('x')]
+ width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': '%s-%s' % (proto, label if label else tbr),
'url': video_url,
@@ -109,10 +89,9 @@ class RuutuIE(InfoExtractor):
return {
'id': video_id,
- 'display_id': display_id,
- 'title': self._og_search_title(webpage),
- 'description': self._og_search_description(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
+ 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
+ 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
+ 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py
index f3c80708c..a602af692 100644
--- a/youtube_dl/extractor/safari.py
+++ b/youtube_dl/extractor/safari.py
@@ -20,7 +20,6 @@ from ..utils import (
class SafariBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
_SUCCESSFUL_LOGIN_REGEX = r'<a href="/accounts/logout/"[^>]*>Sign Out</a>'
- _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to supply credentials for safaribooksonline.com'
_NETRC_MACHINE = 'safari'
_API_BASE = 'https://www.safaribooksonline.com/api/v1/book'
@@ -37,9 +36,7 @@ class SafariBaseIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- self._ACCOUNT_CREDENTIALS_HINT,
- expected=True)
+ self.raise_login_required('safaribooksonline.com account is required')
headers = std_headers
if 'Referer' not in headers:
diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py
index d1ab66b32..05f93904c 100644
--- a/youtube_dl/extractor/screenwavemedia.py
+++ b/youtube_dl/extractor/screenwavemedia.py
@@ -7,12 +7,13 @@ from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
+ js_to_json,
)
class ScreenwaveMediaIE(InfoExtractor):
- _VALID_URL = r'http://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?[^"]*\bid=(?P<id>.+)'
-
+ _VALID_URL = r'https?://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=(?P<id>[A-Za-z0-9-]+)'
+ EMBED_PATTERN = r'src=(["\'])(?P<url>(?:https?:)?//player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=.+?)\1'
_TESTS = [{
'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911',
'only_matching': True,
@@ -22,59 +23,71 @@ class ScreenwaveMediaIE(InfoExtractor):
video_id = self._match_id(url)
playerdata = self._download_webpage(
- 'http://player.screenwavemedia.com/play/player.php?id=%s' % video_id,
+ 'http://player.screenwavemedia.com/player.php?id=%s' % video_id,
video_id, 'Downloading player webpage')
vidtitle = self._search_regex(
r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/')
- vidurl = self._search_regex(
- r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/')
-
- videolist_url = None
-
- mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
- if mobj:
- videoserver = mobj.group('videoserver')
- mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
- vidid = mobj.group('vidid') if mobj else video_id
- videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
- else:
- mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
- if mobj:
- videolist_url = mobj.group('smil')
-
- if videolist_url:
- videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
- formats = []
- baseurl = vidurl[:vidurl.rfind('/') + 1]
- for video in videolist.findall('.//video'):
- src = video.get('src')
- if not src:
+
+ playerconfig = self._download_webpage(
+ 'http://player.screenwavemedia.com/player.js',
+ video_id, 'Downloading playerconfig webpage')
+
+ videoserver = self._search_regex(r'SWMServer\s*=\s*"([\d\.]+)"', playerdata, 'videoserver')
+
+ sources = self._parse_json(
+ js_to_json(
+ re.sub(
+ r'(?s)/\*.*?\*/', '',
+ self._search_regex(
+ r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
+ 'sources',
+ ).replace(
+ "' + thisObj.options.videoserver + '",
+ videoserver
+ ).replace(
+ "' + playerVidId + '",
+ video_id
+ )
+ )
+ ),
+ video_id, fatal=False
+ )
+
+ # Fallback to hardcoded sources if JS changes again
+ if not sources:
+ self.report_warning('Falling back to a hardcoded list of streams')
+ sources = [{
+ 'file': 'http://%s/vod/%s_%s.mp4' % (videoserver, video_id, format_id),
+ 'type': 'mp4',
+ 'label': format_label,
+ } for format_id, format_label in (
+ ('low', '144p Low'), ('med', '160p Med'), ('high', '360p High'), ('hd1', '720p HD1'))]
+ sources.append({
+ 'file': 'http://%s/vod/smil:%s.smil/playlist.m3u8' % (videoserver, video_id),
+ 'type': 'hls',
+ })
+
+ formats = []
+ for source in sources:
+ if source['type'] == 'hls':
+ formats.extend(self._extract_m3u8_formats(source['file'], video_id))
+ else:
+ file_ = source.get('file')
+ if not file_:
continue
- file_ = src.partition(':')[-1]
- width = int_or_none(video.get('width'))
- height = int_or_none(video.get('height'))
- bitrate = int_or_none(video.get('system-bitrate'), scale=1000)
- format = {
- 'url': baseurl + file_,
- 'format_id': src.rpartition('.')[0].rpartition('_')[-1],
- }
- if width or height:
- format.update({
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- })
- else:
- format.update({
- 'abr': bitrate,
- 'vcodec': 'none',
- })
- formats.append(format)
- else:
- formats = [{
- 'url': vidurl,
- }]
+ format_label = source.get('label')
+ format_id = self._search_regex(
+ r'_(.+?)\.[^.]+$', file_, 'format id', default=None)
+ height = int_or_none(self._search_regex(
+ r'^(\d+)[pP]', format_label, 'height', default=None))
+ formats.append({
+ 'url': source['file'],
+ 'format_id': format_id,
+ 'format': format_label,
+ 'ext': source.get('type'),
+ 'height': height,
+ })
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/sexykarma.py b/youtube_dl/extractor/sexykarma.py
index 6446d26dc..e33483674 100644
--- a/youtube_dl/extractor/sexykarma.py
+++ b/youtube_dl/extractor/sexykarma.py
@@ -29,6 +29,7 @@ class SexyKarmaIE(InfoExtractor):
'view_count': int,
'comment_count': int,
'categories': list,
+ 'age_limit': 18,
}
}, {
'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
diff --git a/youtube_dl/extractor/shahid.py b/youtube_dl/extractor/shahid.py
new file mode 100644
index 000000000..f76fb12c0
--- /dev/null
+++ b/youtube_dl/extractor/shahid.py
@@ -0,0 +1,107 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class ShahidIE(InfoExtractor):
+ _VALID_URL = r'https?://shahid\.mbc\.net/ar/episode/(?P<id>\d+)/?'
+ _TESTS = [{
+ 'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
+ 'info_dict': {
+ 'id': '90574',
+ 'ext': 'mp4',
+ 'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3',
+ 'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان',
+ 'duration': 2972,
+ 'timestamp': 1422057420,
+ 'upload_date': '20150123',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }, {
+ # shahid plus subscriber only
+ 'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
+ 'only_matching': True
+ }]
+
+ def _handle_error(self, response):
+ if not isinstance(response, dict):
+ return
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())),
+ expected=True)
+
+ def _download_json(self, url, video_id, note='Downloading JSON metadata'):
+ response = super(ShahidIE, self)._download_json(url, video_id, note)['data']
+ self._handle_error(response)
+ return response
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ api_vars = {
+ 'id': video_id,
+ 'type': 'player',
+ 'url': 'http://api.shahid.net/api/v1_1',
+ 'playerType': 'episode',
+ }
+
+ flashvars = self._search_regex(
+ r'var\s+flashvars\s*=\s*({[^}]+})', webpage, 'flashvars', default=None)
+ if flashvars:
+ for key in api_vars.keys():
+ value = self._search_regex(
+ r'\b%s\s*:\s*(?P<q>["\'])(?P<value>.+?)(?P=q)' % key,
+ flashvars, 'type', default=None, group='value')
+ if value:
+ api_vars[key] = value
+
+ player = self._download_json(
+ 'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-%s.html'
+ % (video_id, api_vars['type']), video_id, 'Downloading player JSON')
+
+ formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
+
+ video = self._download_json(
+ '%s/%s/%s?%s' % (
+ api_vars['url'], api_vars['playerType'], api_vars['id'],
+ compat_urllib_parse.urlencode({
+ 'apiKey': 'sh@hid0nlin3',
+ 'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
+ })),
+ video_id, 'Downloading video JSON')
+
+ video = video[api_vars['playerType']]
+
+ title = video['title']
+ description = video.get('description')
+ thumbnail = video.get('thumbnailUrl')
+ duration = int_or_none(video.get('duration'))
+ timestamp = parse_iso8601(video.get('referenceDate'))
+ categories = [
+ category['name']
+ for category in video.get('genres', []) if 'name' in category]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'categories': categories,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py
index a07677686..c5636e8e9 100644
--- a/youtube_dl/extractor/shared.py
+++ b/youtube_dl/extractor/shared.py
@@ -14,17 +14,28 @@ from ..utils import (
class SharedIE(InfoExtractor):
- _VALID_URL = r'http://shared\.sx/(?P<id>[\da-z]{10})'
+ IE_DESC = 'shared.sx and vivo.sx'
+ _VALID_URL = r'http://(?:shared|vivo)\.sx/(?P<id>[\da-z]{10})'
- _TEST = {
+ _TESTS = [{
'url': 'http://shared.sx/0060718775',
'md5': '106fefed92a8a2adb8c98e6a0652f49b',
'info_dict': {
'id': '0060718775',
'ext': 'mp4',
'title': 'Bmp4',
+ 'filesize': 1720110,
},
- }
+ }, {
+ 'url': 'http://vivo.sx/d7ddda0e78',
+ 'md5': '15b3af41be0b4fe01f4df075c2678b2c',
+ 'info_dict': {
+ 'id': 'd7ddda0e78',
+ 'ext': 'mp4',
+ 'title': 'Chicken',
+ 'filesize': 528031,
+ },
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py
index 93a7cfe15..35a81ee87 100644
--- a/youtube_dl/extractor/smotri.py
+++ b/youtube_dl/extractor/smotri.py
@@ -330,10 +330,7 @@ class SmotriBroadcastIE(InfoExtractor):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- 'Erotic broadcasts allowed only for registered users, '
- 'use --username and --password options to provide account credentials.',
- expected=True)
+ self.raise_login_required('Erotic broadcasts allowed only for registered users')
login_form = {
'login-hint53': '1',
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 118ca4832..2b60d354a 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -29,7 +29,7 @@ class SoundcloudIE(InfoExtractor):
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/
- (?!sets/|(?:likes|tracks)/?(?:$|[?#]))
+ (?!(?:tracks|sets(?:/[^/?#]+)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
@@ -113,7 +113,7 @@ class SoundcloudIE(InfoExtractor):
},
]
- _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28'
+ _CLIENT_ID = '02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea'
_IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'
def report_resolve(self, video_id):
@@ -282,69 +282,150 @@ class SoundcloudSetIE(SoundcloudIE):
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
+ entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in info['tracks']]
+
return {
'_type': 'playlist',
- 'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']],
+ 'entries': entries,
'id': '%s' % info['id'],
'title': info['title'],
}
class SoundcloudUserIE(SoundcloudIE):
- _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:(?:www|m)\.)?soundcloud\.com/
+ (?P<user>[^/]+)
+ (?:/
+ (?P<rsrc>tracks|sets|reposts|likes|spotlight)
+ )?
+ /?(?:[?#].*)?$
+ '''
IE_NAME = 'soundcloud:user'
_TESTS = [{
- 'url': 'https://soundcloud.com/the-concept-band',
+ 'url': 'https://soundcloud.com/the-akashic-chronicler',
'info_dict': {
- 'id': '9615865',
- 'title': 'The Royal Concept',
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (All)',
},
- 'playlist_mincount': 12
+ 'playlist_mincount': 111,
}, {
- 'url': 'https://soundcloud.com/the-concept-band/likes',
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
'info_dict': {
- 'id': '9615865',
- 'title': 'The Royal Concept',
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Tracks)',
},
- 'playlist_mincount': 1,
+ 'playlist_mincount': 50,
}, {
- 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
- 'only_matching': True,
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/sets',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Playlists)',
+ },
+ 'playlist_mincount': 3,
+ }, {
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/reposts',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Reposts)',
+ },
+ 'playlist_mincount': 7,
+ }, {
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/likes',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Likes)',
+ },
+ 'playlist_mincount': 321,
+ }, {
+ 'url': 'https://soundcloud.com/grynpyret/spotlight',
+ 'info_dict': {
+ 'id': '7098329',
+ 'title': 'Grynpyret (Spotlight)',
+ },
+ 'playlist_mincount': 1,
}]
+ _API_BASE = 'https://api.soundcloud.com'
+ _API_V2_BASE = 'https://api-v2.soundcloud.com'
+
+ _BASE_URL_MAP = {
+ 'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE,
+ 'tracks': '%s/users/%%s/tracks' % _API_BASE,
+ 'sets': '%s/users/%%s/playlists' % _API_V2_BASE,
+ 'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE,
+ 'likes': '%s/users/%%s/likes' % _API_V2_BASE,
+ 'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE,
+ }
+
+ _TITLE_MAP = {
+ 'all': 'All',
+ 'tracks': 'Tracks',
+ 'sets': 'Playlists',
+ 'reposts': 'Reposts',
+ 'likes': 'Likes',
+ 'spotlight': 'Spotlight',
+ }
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
- resource = mobj.group('rsrc')
- if resource is None:
- resource = 'tracks'
- elif resource == 'likes':
- resource = 'favorites'
url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url)
user = self._download_json(
resolv_url, uploader, 'Downloading user info')
- base_url = 'http://api.soundcloud.com/users/%s/%s.json?' % (uploader, resource)
+
+ resource = mobj.group('rsrc') or 'all'
+ base_url = self._BASE_URL_MAP[resource] % user['id']
+
+ next_href = None
entries = []
for i in itertools.count():
- data = compat_urllib_parse.urlencode({
- 'offset': i * 50,
- 'limit': 50,
- 'client_id': self._CLIENT_ID,
- })
- new_entries = self._download_json(
- base_url + data, uploader, 'Downloading track page %s' % (i + 1))
- if len(new_entries) == 0:
+ if not next_href:
+ data = compat_urllib_parse.urlencode({
+ 'offset': i * 50,
+ 'limit': 50,
+ 'client_id': self._CLIENT_ID,
+ 'linked_partitioning': '1',
+ 'representation': 'speedy',
+ })
+ next_href = base_url + '?' + data
+
+ response = self._download_json(
+ next_href, uploader, 'Downloading track page %s' % (i + 1))
+
+ collection = response['collection']
+
+ if not collection:
self.to_screen('%s: End page received' % uploader)
break
- entries.extend(self.url_result(e['permalink_url'], 'Soundcloud') for e in new_entries)
+
+ def resolve_permalink_url(candidates):
+ for cand in candidates:
+ if isinstance(cand, dict):
+ permalink_url = cand.get('permalink_url')
+ if permalink_url and permalink_url.startswith('http'):
+ return permalink_url
+
+ for e in collection:
+ permalink_url = resolve_permalink_url((e, e.get('track'), e.get('playlist')))
+ if permalink_url:
+ entries.append(self.url_result(permalink_url))
+
+ if 'next_href' in response:
+ next_href = response['next_href']
+ if not next_href:
+ break
+ else:
+ next_href = None
return {
'_type': 'playlist',
'id': compat_str(user['id']),
- 'title': user['username'],
+ 'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]),
'entries': entries,
}
@@ -379,9 +460,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
- entries = [
- self._extract_info_dict(t, quiet=True, secret_token=token)
- for t in data['tracks']]
+ entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in data['tracks']]
return {
'_type': 'playlist',
diff --git a/youtube_dl/extractor/southpark.py b/youtube_dl/extractor/southpark.py
index 7fb165a87..87b650468 100644
--- a/youtube_dl/extractor/southpark.py
+++ b/youtube_dl/extractor/southpark.py
@@ -45,6 +45,14 @@ class SouthParkDeIE(SouthParkIE):
'title': 'The Government Won\'t Respect My Privacy',
'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.',
},
+ }, {
+ # non-ASCII characters in initial URL
+ 'url': 'http://www.southpark.de/alle-episoden/s18e09-hashtag-aufwärmen',
+ 'playlist_count': 4,
+ }, {
+ # non-ASCII characters in redirect URL
+ 'url': 'http://www.southpark.de/alle-episoden/s18e09',
+ 'playlist_count': 4,
}]
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
index 5fa6faf18..9e8fb35b2 100644
--- a/youtube_dl/extractor/spankwire.py
+++ b/youtube_dl/extractor/spankwire.py
@@ -16,8 +16,9 @@ from ..aes import aes_decrypt_text
class SpankwireIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)'
+ _TESTS = [{
+ # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4
'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
'md5': '8bbfde12b101204b39e4b9fe7eb67095',
'info_dict': {
@@ -30,14 +31,27 @@ class SpankwireIE(InfoExtractor):
'upload_date': '20070507',
'age_limit': 18,
}
- }
+ }, {
+ # download URL pattern: */mp4_<format_id>_<video_id>.mp4
+ 'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/',
+ 'md5': '09b3c20833308b736ae8902db2f8d7e6',
+ 'info_dict': {
+ 'id': '1921551',
+ 'ext': 'mp4',
+ 'title': 'Titcums Compiloation I',
+ 'description': 'cum on tits',
+ 'uploader': 'dannyh78999',
+ 'uploader_id': '3056053',
+ 'upload_date': '20150822',
+ 'age_limit': 18,
+ },
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
- url = 'http://www.' + mobj.group('url')
+ video_id = mobj.group('id')
- req = compat_urllib_request.Request(url)
+ req = compat_urllib_request.Request('http://www.' + mobj.group('url'))
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
@@ -54,7 +68,7 @@ class SpankwireIE(InfoExtractor):
r'by:\s*<a [^>]*>(.+?)</a>',
webpage, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
- r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
+ r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"',
webpage, 'uploader id', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'</a> on (.+?) at \d+:\d+',
@@ -67,9 +81,10 @@ class SpankwireIE(InfoExtractor):
r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
- video_urls = list(map(
- compat_urllib_parse_unquote,
- re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)))
+ videos = re.findall(
+ r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)
+ heights = [int(video[0]) for video in videos]
+ video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos]))
if webpage.find('flashvars\.encrypted = "true"') != -1:
password = self._search_regex(
r'flashvars\.video_title = "([^"]+)',
@@ -79,21 +94,22 @@ class SpankwireIE(InfoExtractor):
video_urls))
formats = []
- for video_url in video_urls:
+ for height, video_url in zip(heights, video_urls):
path = compat_urllib_parse_urlparse(video_url).path
- format = path.split('/')[4].split('_')[:2]
- resolution, bitrate_str = format
- format = "-".join(format)
- height = int(resolution.rstrip('Pp'))
- tbr = int(bitrate_str.rstrip('Kk'))
- formats.append({
+ _, quality = path.split('/')[4].split('_')[:2]
+ f = {
'url': video_url,
- 'resolution': resolution,
- 'format': format,
- 'tbr': tbr,
'height': height,
- 'format_id': format,
- })
+ }
+ tbr = self._search_regex(r'^(\d+)[Kk]$', quality, 'tbr', default=None)
+ if tbr:
+ f.update({
+ 'tbr': int(tbr),
+ 'format_id': '%dp' % height,
+ })
+ else:
+ f['format_id'] = quality
+ formats.append(f)
self._sort_formats(formats)
age_limit = self._rta_search(webpage)
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index b868241d5..5bd3c0087 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -9,7 +9,7 @@ from .spiegeltv import SpiegeltvIE
class SpiegelIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
+ _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
_TESTS = [{
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
'md5': '2c2754212136f35fb4b19767d242f66e',
@@ -39,6 +39,9 @@ class SpiegelIE(InfoExtractor):
'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
}
+ }, {
+ 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
+ 'only_matching': True,
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py
index 1a57aebf1..7ec6c613f 100644
--- a/youtube_dl/extractor/sportdeutschland.py
+++ b/youtube_dl/extractor/sportdeutschland.py
@@ -38,10 +38,12 @@ class SportDeutschlandIE(InfoExtractor):
'upload_date': '20140825',
'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
'timestamp': 1408976060,
+ 'duration': 2732,
'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'categories': ['Li-Ning Badminton WM 2014'],
+
}
}]
@@ -50,7 +52,7 @@ class SportDeutschlandIE(InfoExtractor):
video_id = mobj.group('id')
sport_id = mobj.group('sport')
- api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
+ api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
sport_id, video_id)
req = compat_urllib_request.Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
@@ -58,12 +60,11 @@ class SportDeutschlandIE(InfoExtractor):
})
data = self._download_json(req, video_id)
- categories = list(data.get('section', {}).get('tags', {}).values())
asset = data['asset']
- assets_info = self._download_json(asset['url'], video_id)
+ categories = [data['section']['title']]
formats = []
- smil_url = assets_info['video']
+ smil_url = asset['video']
if '.smil' in smil_url:
m3u8_url = smil_url.replace('.smil', '.m3u8')
formats.extend(
@@ -91,6 +92,7 @@ class SportDeutschlandIE(InfoExtractor):
'title': asset['title'],
'thumbnail': asset.get('image'),
'description': asset.get('teaser'),
+ 'duration': asset.get('duration'),
'categories': categories,
'view_count': asset.get('views'),
'rtmp_live': asset.get('live'),
diff --git a/youtube_dl/extractor/tagesschau.py b/youtube_dl/extractor/tagesschau.py
index cf1b37a75..73e7657d4 100644
--- a/youtube_dl/extractor/tagesschau.py
+++ b/youtube_dl/extractor/tagesschau.py
@@ -8,7 +8,7 @@ from ..utils import parse_filesize
class TagesschauIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P<id>-?[0-9]+)\.html'
+ _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:[^/]+/)*?[^/#?]+?(?P<id>-?[0-9]+)(?:~_[^/#?]+?)?\.html'
_TESTS = [{
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
@@ -18,7 +18,7 @@ class TagesschauIE(InfoExtractor):
'ext': 'mp4',
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
'description': 'md5:171feccd9d9b3dd54d05d501568f6359',
- 'thumbnail': 're:^http:.*\.jpg$',
+ 'thumbnail': 're:^https?:.*\.jpg$',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
@@ -28,8 +28,39 @@ class TagesschauIE(InfoExtractor):
'ext': 'mp4',
'description': 'md5:695c01bfd98b7e313c501386327aea59',
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
- 'thumbnail': 're:^http:.*\.jpg$',
- }
+ 'thumbnail': 're:^https?:.*\.jpg$',
+ },
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/politikimradio/audio-18407.html',
+ 'md5': 'aef45de271c4bf0a5db834aa40bf774c',
+ 'info_dict': {
+ 'id': '18407',
+ 'ext': 'mp3',
+ 'title': 'Flüchtlingsdebatte: Hitzig, aber wenig hilfreich',
+ 'description': 'Flüchtlingsdebatte: Hitzig, aber wenig hilfreich',
+ 'thumbnail': 're:^https?:.*\.jpg$',
+ },
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
+ 'only_matching': True,
}]
_FORMATS = {
@@ -49,19 +80,26 @@ class TagesschauIE(InfoExtractor):
playerpage = self._download_webpage(
player_url, display_id, 'Downloading player page')
- medias = re.findall(
- r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
- playerpage)
formats = []
- for url, ext, res in medias:
+ for media in re.finditer(
+ r'''(?x)
+ (?P<q_url>["\'])(?P<url>http://media.+?)(?P=q_url)
+ ,\s*type:(?P<q_type>["\'])(?P<type>video|audio)/(?P<ext>.+?)(?P=q_type)
+ (?:,\s*quality:(?P<q_quality>["\'])(?P<quality>.+?)(?P=q_quality))?
+ ''', playerpage):
+ url = media.group('url')
+ type_ = media.group('type')
+ ext = media.group('ext')
+ res = media.group('quality')
f = {
- 'format_id': res + '_' + ext,
+ 'format_id': '%s_%s' % (res, ext) if res else ext,
'url': url,
'ext': ext,
+ 'vcodec': 'none' if type_ == 'audio' else None,
}
f.update(self._FORMATS.get(res, {}))
formats.append(f)
- thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+ thumbnail = self._og_search_thumbnail(playerpage)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
else:
@@ -99,17 +137,14 @@ class TagesschauIE(InfoExtractor):
'filesize_approx': parse_filesize(m.group('filesize_approx')),
})
formats.append(format)
- thumbnail_fn = self._search_regex(
- r'(?s)<img alt="Sendungsbild".*?src="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
description = self._html_search_regex(
r'(?s)<p class="teasertext">(.*?)</p>',
- webpage, 'description', fatal=False)
+ webpage, 'description', default=None)
title = self._html_search_regex(
r'<span class="headline".*?>(.*?)</span>', webpage, 'title')
self._sort_formats(formats)
- thumbnail = 'http://www.tagesschau.de' + thumbnail_fn
return {
'id': display_id,
diff --git a/youtube_dl/extractor/tapely.py b/youtube_dl/extractor/tapely.py
index f1f43d0a7..744f9db38 100644
--- a/youtube_dl/extractor/tapely.py
+++ b/youtube_dl/extractor/tapely.py
@@ -16,7 +16,7 @@ from ..utils import (
class TapelyIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
+ _VALID_URL = r'https?://(?:www\.)?(?:tape\.ly|tapely\.com)/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
_API_URL = 'http://tape.ly/showtape?id={0:}'
_S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
_SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
@@ -42,6 +42,10 @@ class TapelyIE(InfoExtractor):
'ext': 'm4a',
},
},
+ {
+ 'url': 'https://tapely.com/my-grief-as-told-by-water',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
index a0c744fd1..2c8e9b941 100644
--- a/youtube_dl/extractor/telecinco.py
+++ b/youtube_dl/extractor/telecinco.py
@@ -1,26 +1,94 @@
# coding: utf-8
from __future__ import unicode_literals
-from .mitele import MiTeleIE
+import json
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_parse_unquote,
+ compat_urlparse,
+)
+from ..utils import (
+ get_element_by_attribute,
+ parse_duration,
+ strip_jsonp,
+)
-class TelecincoIE(MiTeleIE):
- IE_NAME = 'telecinco.es'
- _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/(?:[^/]+/)?(?P<id>.*?)\.html'
+
+class TelecincoIE(InfoExtractor):
+ IE_DESC = 'telecinco.es, cuatro.com and mediaset.es'
+ _VALID_URL = r'https?://www\.(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html'
_TESTS = [{
'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
+ 'md5': '5cbef3ad5ef17bf0d21570332d140729',
'info_dict': {
'id': 'MDSVID20141015_0058',
'ext': 'mp4',
'title': 'Con Martín Berasategui, hacer un bacalao al ...',
'duration': 662,
},
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ }, {
+ 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
+ 'md5': '0a5b9f3cc8b074f50a0578f823a12694',
+ 'info_dict': {
+ 'id': 'MDSVID20150916_0128',
+ 'ext': 'mp4',
+ 'title': '¿Quién es este ex futbolista con el que hablan ...',
+ 'duration': 79,
+ },
+ }, {
+ 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
+ 'md5': 'ad1bfaaba922dd4a295724b05b68f86a',
+ 'info_dict': {
+ 'id': 'MDSVID20150513_0220',
+ 'ext': 'mp4',
+ 'title': '#DOYLACARA. Con la trata no hay trato',
+ 'duration': 50,
},
}, {
'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html',
'only_matching': True,
+ }, {
+ 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
+ 'only_matching': True,
}]
+
+ def _real_extract(self, url):
+ episode = self._match_id(url)
+ webpage = self._download_webpage(url, episode)
+ embed_data_json = self._search_regex(
+ r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
+ ).replace('\'', '"')
+ embed_data = json.loads(embed_data_json)
+
+ domain = embed_data['mediaUrl']
+ if not domain.startswith('http'):
+ # only happens in telecinco.es videos
+ domain = 'http://' + domain
+ info_url = compat_urlparse.urljoin(
+ domain,
+ compat_urllib_parse_unquote(embed_data['flashvars']['host'])
+ )
+ info_el = self._download_xml(info_url, episode).find('./video/info')
+
+ video_link = info_el.find('videoUrl/link').text
+ token_query = compat_urllib_parse.urlencode({'id': video_link})
+ token_info = self._download_json(
+ embed_data['flashvars']['ov_tk'] + '?' + token_query,
+ episode,
+ transform_source=strip_jsonp
+ )
+ formats = self._extract_m3u8_formats(
+ token_info['tokenizedUrl'], episode, ext='mp4', entry_protocol='m3u8_native')
+
+ return {
+ 'id': embed_data['videoId'],
+ 'display_id': episode,
+ 'title': info_el.find('title').text,
+ 'formats': formats,
+ 'description': get_element_by_attribute('class', 'text', webpage),
+ 'thumbnail': info_el.find('thumb').text,
+ 'duration': parse_duration(info_el.find('duration').text),
+ }
diff --git a/youtube_dl/extractor/telegraaf.py b/youtube_dl/extractor/telegraaf.py
new file mode 100644
index 000000000..6f8333cfc
--- /dev/null
+++ b/youtube_dl/extractor/telegraaf.py
@@ -0,0 +1,35 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import remove_end
+
+
+class TelegraafIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?telegraaf\.nl/tv/(?:[^/]+/)+(?P<id>\d+)/[^/]+\.html'
+ _TEST = {
+ 'url': 'http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html',
+ 'md5': '83245a9779bcc4a24454bfd53c65b6dc',
+ 'info_dict': {
+ 'id': '24353229',
+ 'ext': 'mp4',
+ 'title': 'Tikibad ontruimd wegens brand',
+ 'description': 'md5:05ca046ff47b931f9b04855015e163a4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 33,
+ },
+ }
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ playlist_url = self._search_regex(
+ r"iframe\.loadPlayer\('([^']+)'", webpage, 'player')
+
+ entries = self._extract_xspf_playlist(playlist_url, playlist_id)
+ title = remove_end(self._og_search_title(webpage), ' - VIDEO')
+ description = self._og_search_description(webpage)
+
+ return self.playlist_result(entries, playlist_id, title, description)
diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py
index 83d833e30..25edc3100 100644
--- a/youtube_dl/extractor/theplatform.py
+++ b/youtube_dl/extractor/theplatform.py
@@ -1,7 +1,7 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
-import json
import time
import hmac
import binascii
@@ -10,7 +10,8 @@ import hashlib
from .common import InfoExtractor
from ..compat import (
- compat_str,
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
@@ -18,12 +19,69 @@ from ..utils import (
xpath_with_ns,
unsmuggle_url,
int_or_none,
+ url_basename,
+ float_or_none,
)
-_x = lambda p: xpath_with_ns(p, {'smil': 'http://www.w3.org/2005/SMIL21/Language'})
+default_ns = 'http://www.w3.org/2005/SMIL21/Language'
+_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
-class ThePlatformIE(InfoExtractor):
+class ThePlatformBaseIE(InfoExtractor):
+ def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
+ meta = self._download_xml(smil_url, video_id, note=note)
+ try:
+ error_msg = next(
+ n.attrib['abstract']
+ for n in meta.findall(_x('.//smil:ref'))
+ if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
+ except StopIteration:
+ pass
+ else:
+ raise ExtractorError(error_msg, expected=True)
+
+ formats = self._parse_smil_formats(
+ meta, smil_url, video_id, namespace=default_ns,
+ # the parameters are from syfy.com, other sites may use others,
+ # they also work for nbc.com
+ f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
+ transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
+
+ for _format in formats:
+ ext = determine_ext(_format['url'])
+ if ext == 'once':
+ _format['ext'] = 'mp4'
+
+ self._sort_formats(formats)
+
+ subtitles = self._parse_smil_subtitles(meta, default_ns)
+
+ return formats, subtitles
+
+ def get_metadata(self, path, video_id):
+ info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
+ info = self._download_json(info_url, video_id)
+
+ subtitles = {}
+ captions = info.get('captions')
+ if isinstance(captions, list):
+ for caption in captions:
+ lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
+ subtitles[lang] = [{
+ 'ext': 'srt' if mime == 'text/srt' else 'ttml',
+ 'url': src,
+ }]
+
+ return {
+ 'title': info['title'],
+ 'subtitles': subtitles,
+ 'description': info['description'],
+ 'thumbnail': info['defaultThumbnailUrl'],
+ 'duration': int_or_none(info.get('duration'), 1000),
+ }
+
+
+class ThePlatformIE(ThePlatformBaseIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
@@ -67,6 +125,20 @@ class ThePlatformIE(InfoExtractor):
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
+ }, {
+ 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
+ 'md5': '734f3790fb5fc4903da391beeebc4836',
+ 'info_dict': {
+ 'id': 'tdy_or_siri_150701',
+ 'ext': 'mp4',
+ 'title': 'iPhone Siri’s sassy response to a math question has people talking',
+ 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
+ 'duration': 83.0,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1435752600,
+ 'upload_date': '20150701',
+ 'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"],
+ },
}]
@staticmethod
@@ -101,6 +173,24 @@ class ThePlatformIE(InfoExtractor):
path += '/media'
path += '/' + video_id
+ qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+ if 'guid' in qs_dict:
+ webpage = self._download_webpage(url, video_id)
+ scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
+ feed_id = None
+ # feed id usually locates in the last script.
+ # Seems there's no pattern for the interested script filename, so
+ # I try one by one
+ for script in reversed(scripts):
+ feed_script = self._download_webpage(script, video_id, 'Downloading feed script')
+ feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None)
+ if feed_id is not None:
+ break
+ if feed_id is None:
+ raise ExtractorError('Unable to find feed id')
+ return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
+ provider_id, feed_id, qs_dict['guid'][0]))
+
if smuggled_data.get('force_smil_url', False):
smil_url = url
elif mobj.group('config'):
@@ -108,7 +198,11 @@ class ThePlatformIE(InfoExtractor):
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
- smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
+ if 'releaseUrl' in config:
+ release_url = config['releaseUrl']
+ else:
+ release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
+ smil_url = release_url + '&format=SMIL&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path
@@ -116,95 +210,85 @@ class ThePlatformIE(InfoExtractor):
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
- meta = self._download_xml(smil_url, video_id)
- try:
- error_msg = next(
- n.attrib['abstract']
- for n in meta.findall(_x('.//smil:ref'))
- if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
- except StopIteration:
- pass
- else:
- raise ExtractorError(error_msg, expected=True)
+ formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
- info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
- info_json = self._download_webpage(info_url, video_id)
- info = json.loads(info_json)
+ ret = self.get_metadata(path, video_id)
+ combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
+ ret.update({
+ 'id': video_id,
+ 'formats': formats,
+ 'subtitles': combined_subtitles,
+ })
+
+ return ret
+
+
+class ThePlatformFeedIE(ThePlatformBaseIE):
+ _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
+ _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
+ _TEST = {
+ # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
+ 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
+ 'md5': '22d2b84f058d3586efcd99e57d59d314',
+ 'info_dict': {
+ 'id': 'n_hardball_5biden_140207',
+ 'ext': 'mp4',
+ 'title': 'The Biden factor: will Joe run in 2016?',
+ 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'upload_date': '20140208',
+ 'timestamp': 1391824260,
+ 'duration': 467.0,
+ 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ provider_id = mobj.group('provider_id')
+ feed_id = mobj.group('feed_id')
+
+ real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
+ feed = self._download_json(real_url, video_id)
+ entry = feed['entries'][0]
+ formats = []
subtitles = {}
- captions = info.get('captions')
- if isinstance(captions, list):
- for caption in captions:
- lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
- subtitles[lang] = [{
- 'ext': 'srt' if mime == 'text/srt' else 'ttml',
- 'url': src,
- }]
+ first_video_id = None
+ duration = None
+ for item in entry['media$content']:
+ smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M'
+ cur_video_id = url_basename(smil_url)
+ if first_video_id is None:
+ first_video_id = cur_video_id
+ duration = float_or_none(item.get('plfile$duration'))
+ cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
+ formats.extend(cur_formats)
+ subtitles = self._merge_subtitles(subtitles, cur_subtitles)
- head = meta.find(_x('smil:head'))
- body = meta.find(_x('smil:body'))
+ self._sort_formats(formats)
- f4m_node = body.find(_x('smil:seq//smil:video'))
- if f4m_node is None:
- f4m_node = body.find(_x('smil:seq/smil:video'))
- if f4m_node is not None and '.f4m' in f4m_node.attrib['src']:
- f4m_url = f4m_node.attrib['src']
- if 'manifest.f4m?' not in f4m_url:
- f4m_url += '?'
- # the parameters are from syfy.com, other sites may use others,
- # they also work for nbc.com
- f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3'
- formats = self._extract_f4m_formats(f4m_url, video_id)
- else:
- formats = []
- switch = body.find(_x('smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par//smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par/smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par'))
- if switch is not None:
- base_url = head.find(_x('smil:meta')).attrib['base']
- for f in switch.findall(_x('smil:video')):
- attr = f.attrib
- width = int_or_none(attr.get('width'))
- height = int_or_none(attr.get('height'))
- vbr = int_or_none(attr.get('system-bitrate'), 1000)
- format_id = '%dx%d_%dk' % (width, height, vbr)
- formats.append({
- 'format_id': format_id,
- 'url': base_url,
- 'play_path': 'mp4:' + attr['src'],
- 'ext': 'flv',
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- })
- else:
- switch = body.find(_x('smil:seq//smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:seq/smil:switch'))
- for f in switch.findall(_x('smil:video')):
- attr = f.attrib
- vbr = int_or_none(attr.get('system-bitrate'), 1000)
- ext = determine_ext(attr['src'])
- if ext == 'once':
- ext = 'mp4'
- formats.append({
- 'format_id': compat_str(vbr),
- 'url': attr['src'],
- 'vbr': vbr,
- 'ext': ext,
- })
- self._sort_formats(formats)
+ thumbnails = [{
+ 'url': thumbnail['plfile$url'],
+ 'width': int_or_none(thumbnail.get('plfile$width')),
+ 'height': int_or_none(thumbnail.get('plfile$height')),
+ } for thumbnail in entry.get('media$thumbnails', [])]
- return {
+ timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
+ categories = [item['media$name'] for item in entry.get('media$categories', [])]
+
+ ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
+ subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
+ ret.update({
'id': video_id,
- 'title': info['title'],
- 'subtitles': subtitles,
'formats': formats,
- 'description': info['description'],
- 'thumbnail': info['defaultThumbnailUrl'],
- 'duration': int_or_none(info.get('duration'), 1000),
- }
+ 'subtitles': subtitles,
+ 'thumbnails': thumbnails,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'categories': categories,
+ })
+
+ return ret
diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index 2c4b21807..4f86b3ee9 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -60,9 +60,7 @@ class TubiTvIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
if re.search(r"<(?:DIV|div) class='login-required-screen'>", webpage):
- raise ExtractorError(
- 'This video requires login, use --username and --password '
- 'options to provide account credentials.', expected=True)
+ self.raise_login_required('This video requires login')
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py
index c89de5ba4..5f7ac4b35 100644
--- a/youtube_dl/extractor/tudou.py
+++ b/youtube_dl/extractor/tudou.py
@@ -2,14 +2,12 @@
from __future__ import unicode_literals
-import re
-import json
-
from .common import InfoExtractor
+from ..compat import compat_str
class TudouIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/.*?/(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
+ _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/([^/]+/)*(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
_TESTS = [{
'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
'md5': '140a49ed444bd22f93330985d8475fcb',
@@ -27,35 +25,41 @@ class TudouIE(InfoExtractor):
'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
'thumbnail': 're:^https?://.*\.jpg$',
}
+ }, {
+ 'url': 'http://www.tudou.com/albumplay/cJAHGih4yYg.html',
+ 'only_matching': True,
}]
- def _url_for_id(self, id, quality=None):
- info_url = "http://v2.tudou.com/f?id=" + str(id)
+ _PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf'
+
+ def _url_for_id(self, video_id, quality=None):
+ info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
if quality:
info_url += '&hd' + quality
- webpage = self._download_webpage(info_url, id, "Opening the info webpage")
- final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
+ xml_data = self._download_xml(info_url, video_id, "Opening the info XML page")
+ final_url = xml_data.text
return final_url
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
- if m and m.group(1):
- return {
- '_type': 'url',
- 'url': 'youku:' + m.group(1),
- 'ie_key': 'Youku'
- }
+ youku_vcode = self._search_regex(
+ r'vcode\s*:\s*[\'"]([^\'"]*)[\'"]', webpage, 'youku vcode', default=None)
+ if youku_vcode:
+ return self.url_result('youku:' + youku_vcode, ie='Youku')
title = self._search_regex(
- r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
+ r',kw\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'title')
thumbnail_url = self._search_regex(
- r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
+ r',pic\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'thumbnail URL', fatal=False)
+
+ player_url = self._search_regex(
+ r'playerUrl\s*:\s*[\'"]([^\'"]+\.swf)[\'"]',
+ webpage, 'player URL', default=self._PLAYER_URL)
- segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
- segments = json.loads(segs_json)
+ segments = self._parse_json(self._search_regex(
+ r'segs: \'([^\']+)\'', webpage, 'segments'), video_id)
# It looks like the keys are the arguments that have to be passed as
# the hd field in the request url, we pick the higher
# Also, filter non-number qualities (see issue #3643).
@@ -76,6 +80,9 @@ class TudouIE(InfoExtractor):
'ext': ext,
'title': title,
'thumbnail': thumbnail_url,
+ 'http_headers': {
+ 'Referer': player_url,
+ },
}
result.append(part_info)
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 3d3b635e4..4f844706d 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import int_or_none
class TumblrIE(InfoExtractor):
@@ -29,6 +30,19 @@ class TumblrIE(InfoExtractor):
'thumbnail': 're:http://.*\.jpg',
}
}, {
+ 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video',
+ 'md5': '7ae503065ad150122dc3089f8cf1546c',
+ 'info_dict': {
+ 'id': '130323439814',
+ 'ext': 'mp4',
+ 'title': 'HD Video Testing \u2014 Test description for my HD video',
+ 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'params': {
+ 'format': 'hd',
+ },
+ }, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
@@ -37,6 +51,9 @@ class TumblrIE(InfoExtractor):
'title': 'naked smoking & stretching',
'upload_date': '20150506',
'timestamp': 1430931613,
+ 'age_limit': 18,
+ 'uploader_id': '1638622',
+ 'uploader': 'naked-yogi',
},
'add_ie': ['Vidme'],
}, {
@@ -66,10 +83,38 @@ class TumblrIE(InfoExtractor):
if iframe_url is None:
return self.url_result(urlh.geturl(), 'Generic')
- iframe = self._download_webpage(iframe_url, video_id,
- 'Downloading iframe page')
- video_url = self._search_regex(r'<source src="([^"]+)"',
- iframe, 'video url')
+ iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page')
+
+ duration = None
+ sources = []
+
+ sd_url = self._search_regex(
+ r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe,
+ 'sd video url', default=None, group='url')
+ if sd_url:
+ sources.append((sd_url, 'sd'))
+
+ options = self._parse_json(
+ self._search_regex(
+ r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe,
+ 'hd video url', default='', group='options'),
+ video_id, fatal=False)
+ if options:
+ duration = int_or_none(options.get('duration'))
+ hd_url = options.get('hdUrl')
+ if hd_url:
+ sources.append((hd_url, 'hd'))
+
+ formats = [{
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'format_id': format_id,
+ 'height': int_or_none(self._search_regex(
+ r'/(\d{3,4})$', video_url, 'height', default=None)),
+ 'quality': quality,
+ } for quality, (video_url, format_id) in enumerate(sources)]
+
+ self._sort_formats(formats)
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
@@ -79,9 +124,9 @@ class TumblrIE(InfoExtractor):
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
+ 'duration': duration,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
index 79863e781..b4683de54 100644
--- a/youtube_dl/extractor/tvplay.py
+++ b/youtube_dl/extractor/tvplay.py
@@ -104,6 +104,7 @@ class TVPlayIE(InfoExtractor):
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
+ 'age_limit': 18,
},
'params': {
# rtmp download
diff --git a/youtube_dl/extractor/tweakers.py b/youtube_dl/extractor/tweakers.py
index c80ec15cf..f3198fb85 100644
--- a/youtube_dl/extractor/tweakers.py
+++ b/youtube_dl/extractor/tweakers.py
@@ -1,19 +1,13 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- xpath_text,
- xpath_with_ns,
- int_or_none,
- float_or_none,
-)
class TweakersIE(InfoExtractor):
_VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)'
_TEST = {
'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html',
- 'md5': '1b5afa817403bb5baa08359dca31e6df',
+ 'md5': '3147e4ddad366f97476a93863e4557c8',
'info_dict': {
'id': '9926',
'ext': 'mp4',
@@ -25,41 +19,7 @@ class TweakersIE(InfoExtractor):
}
def _real_extract(self, url):
- video_id = self._match_id(url)
-
- playlist = self._download_xml(
- 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % video_id,
- video_id)
-
- NS_MAP = {
- 'xspf': 'http://xspf.org/ns/0/',
- 's1': 'http://static.streamone.nl/player/ns/0',
- }
-
- track = playlist.find(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP))
-
- title = xpath_text(
- track, xpath_with_ns('./xspf:title', NS_MAP), 'title')
- description = xpath_text(
- track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
- thumbnail = xpath_text(
- track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
- duration = float_or_none(
- xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'),
- 1000)
-
- formats = [{
- 'url': location.text,
- 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
- 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
- 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
- } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'formats': formats,
- }
+ playlist_id = self._match_id(url)
+ entries = self._extract_xspf_playlist(
+ 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % playlist_id, playlist_id)
+ return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 73ce335b7..3ec08b674 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -7,12 +7,18 @@ import random
from .common import InfoExtractor
from ..compat import (
+ compat_parse_qs,
compat_str,
compat_urllib_parse,
+ compat_urllib_parse_urlparse,
compat_urllib_request,
+ compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
+ int_or_none,
+ parse_duration,
parse_iso8601,
)
@@ -22,8 +28,7 @@ class TwitchBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
- _LOGIN_URL = 'https://secure.twitch.tv/login'
- _LOGIN_POST_URL = 'https://passport.twitch.tv/authorize'
+ _LOGIN_URL = 'http://www.twitch.tv/login'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
@@ -56,19 +61,28 @@ class TwitchBaseIE(InfoExtractor):
if username is None:
return
- login_page = self._download_webpage(
+ login_page, handle = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
- 'login': username.encode('utf-8'),
- 'password': password.encode('utf-8'),
+ 'username': username,
+ 'password': password,
})
+ redirect_url = handle.geturl()
+
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
+ 'post url', default=redirect_url, group='url')
+
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(redirect_url, post_url)
+
request = compat_urllib_request.Request(
- self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
- request.add_header('Referer', self._LOGIN_URL)
+ post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
+ request.add_header('Referer', redirect_url)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -129,14 +143,14 @@ class TwitchItemBaseIE(TwitchBaseIE):
def _extract_info(self, info):
return {
'id': info['_id'],
- 'title': info['title'],
- 'description': info['description'],
- 'duration': info['length'],
- 'thumbnail': info['preview'],
- 'uploader': info['channel']['display_name'],
- 'uploader_id': info['channel']['name'],
- 'timestamp': parse_iso8601(info['recorded_at']),
- 'view_count': info['views'],
+ 'title': info.get('title') or 'Untitled Broadcast',
+ 'description': info.get('description'),
+ 'duration': int_or_none(info.get('length')),
+ 'thumbnail': info.get('preview'),
+ 'uploader': info.get('channel', {}).get('display_name'),
+ 'uploader_id': info.get('channel', {}).get('name'),
+ 'timestamp': parse_iso8601(info.get('recorded_at')),
+ 'view_count': int_or_none(info.get('views')),
}
def _real_extract(self, url):
@@ -184,8 +198,8 @@ class TwitchVodIE(TwitchItemBaseIE):
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
- _TEST = {
- 'url': 'http://www.twitch.tv/riotgames/v/6528877',
+ _TESTS = [{
+ 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
@@ -197,25 +211,61 @@ class TwitchVodIE(TwitchItemBaseIE):
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
+ 'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
- }
+ }, {
+ # Untitled broadcast (title is None)
+ 'url': 'http://www.twitch.tv/belkao_o/v/11230755',
+ 'info_dict': {
+ 'id': 'v11230755',
+ 'ext': 'mp4',
+ 'title': 'Untitled Broadcast',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 1638,
+ 'timestamp': 1439746708,
+ 'upload_date': '20150816',
+ 'uploader': 'BelkAO_o',
+ 'uploader_id': 'belkao_o',
+ 'view_count': int,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }]
def _real_extract(self, url):
item_id = self._match_id(url)
+
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
+
formats = self._extract_m3u8_formats(
- '%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
- % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
+ '%s/vod/%s?%s' % (
+ self._USHER_BASE, item_id,
+ compat_urllib_parse.urlencode({
+ 'allow_source': 'true',
+ 'allow_spectre': 'true',
+ 'player': 'twitchweb',
+ 'nauth': access_token['token'],
+ 'nauthsig': access_token['sig'],
+ })),
item_id, 'mp4')
+
self._prefer_source(formats)
info['formats'] = formats
+
+ parsed_url = compat_urllib_parse_urlparse(url)
+ query = compat_parse_qs(parsed_url.query)
+ if 't' in query:
+ info['start_time'] = parse_duration(query['t'][0])
+
return info
diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index e2bab52fe..365d8b4bf 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -70,14 +70,16 @@ class UdemyIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- 'Udemy account is required, use --username and --password options to provide account credentials.',
- expected=True)
+ self.raise_login_required('Udemy account is required')
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
- if login_popup == '<div class="run-command close-popup redirect" data-url="https://www.udemy.com/"></div>':
+ def is_logged(webpage):
+ return any(p in webpage for p in ['href="https://www.udemy.com/user/logout/', '>Logout<'])
+
+ # already logged in
+ if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
@@ -95,8 +97,7 @@ class UdemyIE(InfoExtractor):
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
- if all(logout_pattern not in response
- for logout_pattern in ['href="https://www.udemy.com/user/logout/', '>Logout<']):
+ if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
index c39c278ab..73b05ecab 100644
--- a/youtube_dl/extractor/ustream.py
+++ b/youtube_dl/extractor/ustream.py
@@ -1,17 +1,20 @@
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ float_or_none,
+)
class UstreamIE(InfoExtractor):
- _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
+ _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
IE_NAME = 'ustream'
_TESTS = [{
'url': 'http://www.ustream.tv/recorded/20274954',
@@ -19,8 +22,12 @@ class UstreamIE(InfoExtractor):
'info_dict': {
'id': '20274954',
'ext': 'flv',
- 'uploader': 'Young Americans for Liberty',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
+ 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM',
+ 'timestamp': 1328577035,
+ 'upload_date': '20120207',
+ 'uploader': 'yaliberty',
+ 'uploader_id': '6780869',
},
}, {
# From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444
@@ -32,20 +39,21 @@ class UstreamIE(InfoExtractor):
'ext': 'flv',
'title': '-CG11- Canada Games Figure Skating',
'uploader': 'sportscanadatv',
- }
+ },
+ 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.',
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- video_id = m.group('videoID')
+ video_id = m.group('id')
# some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
- video_id = m.group('videoID')
+ video_id = m.group('id')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
- video_id = m.group('videoID')
+ video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
desktop_video_id = self._html_search_regex(
r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
@@ -53,52 +61,50 @@ class UstreamIE(InfoExtractor):
return self.url_result(desktop_url, 'Ustream')
params = self._download_json(
- 'http://cdngw.ustream.tv/rgwjson/Viewer.getVideo/' + json.dumps({
- 'brandId': 1,
- 'videoId': int(video_id),
- 'autoplay': False,
- }), video_id)
-
- if 'error' in params:
- raise ExtractorError(params['error']['message'], expected=True)
-
- video_url = params['flv']
+ 'https://api.ustream.tv/videos/%s.json' % video_id, video_id)
- webpage = self._download_webpage(url, video_id)
+ error = params.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error), expected=True)
- self.report_extraction(video_id)
+ video = params['video']
- video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
- webpage, 'title', default=None)
+ title = video['title']
+ filesize = float_or_none(video.get('file_size'))
- if not video_title:
- try:
- video_title = params['moduleConfig']['meta']['title']
- except KeyError:
- pass
-
- if not video_title:
- video_title = 'Ustream video ' + video_id
+ formats = [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': format_id,
+ 'filesize': filesize,
+ } for format_id, video_url in video['media_urls'].items()]
+ self._sort_formats(formats)
- uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
- webpage, 'uploader', fatal=False, flags=re.DOTALL, default=None)
+ description = video.get('description')
+ timestamp = int_or_none(video.get('created_at'))
+ duration = float_or_none(video.get('length'))
+ view_count = int_or_none(video.get('views'))
- if not uploader:
- try:
- uploader = params['moduleConfig']['meta']['userName']
- except KeyError:
- uploader = None
+ uploader = video.get('owner', {}).get('username')
+ uploader_id = video.get('owner', {}).get('id')
- thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
- webpage, 'thumbnail', fatal=False)
+ thumbnails = [{
+ 'id': thumbnail_id,
+ 'url': thumbnail_url,
+ } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()]
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'flv',
- 'title': video_title,
+ 'title': title,
+ 'description': description,
+ 'thumbnails': thumbnails,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'view_count': view_count,
'uploader': uploader,
- 'thumbnail': thumbnail,
+ 'uploader_id': uploader_id,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/videobam.py b/youtube_dl/extractor/videobam.py
deleted file mode 100644
index 0eb3d9414..000000000
--- a/youtube_dl/extractor/videobam.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-import json
-
-from .common import InfoExtractor
-from ..utils import int_or_none
-
-
-class VideoBamIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?videobam\.com/(?:videos/download/)?(?P<id>[a-zA-Z]+)'
-
- _TESTS = [
- {
- 'url': 'http://videobam.com/OiJQM',
- 'md5': 'db471f27763a531f10416a0c58b5a1e0',
- 'info_dict': {
- 'id': 'OiJQM',
- 'ext': 'mp4',
- 'title': 'Is Alcohol Worse Than Ecstasy?',
- 'description': 'md5:d25b96151515c91debc42bfbb3eb2683',
- 'uploader': 'frihetsvinge',
- },
- },
- {
- 'url': 'http://videobam.com/pqLvq',
- 'md5': 'd9a565b5379a99126ef94e1d7f9a383e',
- 'note': 'HD video',
- 'info_dict': {
- 'id': 'pqLvq',
- 'ext': 'mp4',
- 'title': '_',
- }
- },
- ]
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage('http://videobam.com/%s' % video_id, video_id, 'Downloading page')
-
- formats = []
-
- for preference, format_id in enumerate(['low', 'high']):
- mobj = re.search(r"%s: '(?P<url>[^']+)'" % format_id, page)
- if not mobj:
- continue
- formats.append({
- 'url': mobj.group('url'),
- 'ext': 'mp4',
- 'format_id': format_id,
- 'preference': preference,
- })
-
- if not formats:
- player_config = json.loads(self._html_search_regex(r'var player_config = ({.+?});', page, 'player config'))
- formats = [{
- 'url': item['url'],
- 'ext': 'mp4',
- } for item in player_config['playlist'] if 'autoPlay' in item]
-
- self._sort_formats(formats)
-
- title = self._og_search_title(page, default='_', fatal=False)
- description = self._og_search_description(page, default=None)
- thumbnail = self._og_search_thumbnail(page)
- uploader = self._html_search_regex(r'Upload by ([^<]+)</a>', page, 'uploader', fatal=False, default=None)
- view_count = int_or_none(
- self._html_search_regex(r'<strong>Views:</strong> (\d+) ', page, 'view count', fatal=False))
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'view_count': view_count,
- 'formats': formats,
- 'age_limit': 18,
- }
diff --git a/youtube_dl/extractor/videolecturesnet.py b/youtube_dl/extractor/videolecturesnet.py
index d6a7eb203..649ac9433 100644
--- a/youtube_dl/extractor/videolecturesnet.py
+++ b/youtube_dl/extractor/videolecturesnet.py
@@ -3,19 +3,21 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_HTTPError,
+ compat_urlparse,
+)
from ..utils import (
- find_xpath_attr,
- int_or_none,
+ ExtractorError,
parse_duration,
- unified_strdate,
)
class VideoLecturesNetIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/'
+ _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/*(?:[#?].*)?$'
IE_NAME = 'videolectures.net'
- _TEST = {
+ _TESTS = [{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
'info_dict': {
'id': 'promogram_igor_mekjavic_eng',
@@ -26,61 +28,55 @@ class VideoLecturesNetIE(InfoExtractor):
'duration': 565,
'thumbnail': 're:http://.*\.jpg',
},
- }
+ }, {
+ # video with invalid direct format links (HTTP 403)
+ 'url': 'http://videolectures.net/russir2010_filippova_nlp/',
+ 'info_dict': {
+ 'id': 'russir2010_filippova_nlp',
+ 'ext': 'flv',
+ 'title': 'NLP at Google',
+ 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
+ 'duration': 5352,
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://videolectures.net/deeplearning2015_montreal/',
+ 'info_dict': {
+ 'id': 'deeplearning2015_montreal',
+ 'title': 'Deep Learning Summer School, Montreal 2015',
+ 'description': 'md5:90121a40cc6926df1bf04dcd8563ed3b',
+ },
+ 'playlist_count': 30,
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id
- smil = self._download_xml(smil_url, video_id)
- title = find_xpath_attr(smil, './/meta', 'name', 'title').attrib['content']
- description_el = find_xpath_attr(smil, './/meta', 'name', 'abstract')
- description = (
- None if description_el is None
- else description_el.attrib['content'])
- upload_date = unified_strdate(
- find_xpath_attr(smil, './/meta', 'name', 'date').attrib['content'])
+ try:
+ smil = self._download_smil(smil_url, video_id)
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
+ # Probably a playlist
+ webpage = self._download_webpage(url, video_id)
+ entries = [
+ self.url_result(compat_urlparse.urljoin(url, video_url), 'VideoLecturesNet')
+ for _, video_url in re.findall(r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', webpage)]
+ playlist_title = self._html_search_meta('title', webpage, 'title', fatal=True)
+ playlist_description = self._html_search_meta('description', webpage, 'description')
+ return self.playlist_result(entries, video_id, playlist_title, playlist_description)
- switch = smil.find('.//switch')
- duration = parse_duration(switch.attrib.get('dur'))
- thumbnail_el = find_xpath_attr(switch, './image', 'type', 'thumbnail')
- thumbnail = (
- None if thumbnail_el is None else thumbnail_el.attrib.get('src'))
+ info = self._parse_smil(smil, smil_url, video_id)
- formats = []
- for v in switch.findall('./video'):
- proto = v.attrib.get('proto')
- if proto not in ['http', 'rtmp']:
- continue
- f = {
- 'width': int_or_none(v.attrib.get('width')),
- 'height': int_or_none(v.attrib.get('height')),
- 'filesize': int_or_none(v.attrib.get('size')),
- 'tbr': int_or_none(v.attrib.get('systemBitrate')) / 1000.0,
- 'ext': v.attrib.get('ext'),
- }
- src = v.attrib['src']
- if proto == 'http':
- if self._is_valid_url(src, video_id):
- f['url'] = src
- formats.append(f)
- elif proto == 'rtmp':
- f.update({
- 'url': v.attrib['streamer'],
- 'play_path': src,
- 'rtmp_real_time': True,
- })
- formats.append(f)
- self._sort_formats(formats)
+ info['id'] = video_id
+
+ switch = smil.find('.//switch')
+ if switch is not None:
+ info['duration'] = parse_duration(switch.attrib.get('dur'))
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'upload_date': upload_date,
- 'duration': duration,
- 'thumbnail': thumbnail,
- 'formats': formats,
- }
+ return info
diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py
index e0b55078b..382517a4a 100644
--- a/youtube_dl/extractor/vidme.py
+++ b/youtube_dl/extractor/vidme.py
@@ -1,10 +1,12 @@
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import compat_HTTPError
from ..utils import (
+ ExtractorError,
int_or_none,
float_or_none,
- str_to_int,
+ parse_iso8601,
)
@@ -12,55 +14,149 @@ class VidmeIE(InfoExtractor):
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://vid.me/QNB',
- 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+ 'md5': 'c62f1156138dc3323902188c5b5a8bd6',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
- 'duration': 119.92,
+ 'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
+ 'age_limit': 0,
+ 'duration': 119.92,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ }, {
+ 'url': 'https://vid.me/Gc6M',
+ 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+ 'info_dict': {
+ 'id': 'Gc6M',
+ 'ext': 'mp4',
+ 'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1441211642,
+ 'upload_date': '20150902',
+ 'uploader': 'SunshineM',
+ 'uploader_id': '3552827',
+ 'age_limit': 0,
+ 'duration': 223.72,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # tests uploader field
+ 'url': 'https://vid.me/4Iib',
+ 'info_dict': {
+ 'id': '4Iib',
+ 'ext': 'mp4',
+ 'title': 'The Carver',
+ 'description': 'md5:e9c24870018ae8113be936645b93ba3c',
'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1433203629,
+ 'upload_date': '20150602',
+ 'uploader': 'Thomas',
+ 'uploader_id': '109747',
+ 'age_limit': 0,
+ 'duration': 97.859999999999999,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
},
}, {
- # From http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
+ # nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
+ 'info_dict': {
+ 'id': 'Wmur',
+ 'ext': 'mp4',
+ 'title': 'naked smoking & stretching',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1430931613,
+ 'upload_date': '20150506',
+ 'uploader': 'naked-yogi',
+ 'uploader_id': '1638622',
+ 'age_limit': 18,
+ 'duration': 653.26999999999998,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # nsfw, user-disabled
+ 'url': 'https://vid.me/dzGJ',
'only_matching': True,
}]
def _real_extract(self, url):
- url = url.replace('vid.me/e/', 'vid.me/')
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_url = self._html_search_regex(
- r'<source src="([^"]+)"', webpage, 'video URL')
+ try:
+ response = self._download_json(
+ 'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
+ response = self._parse_json(e.cause.read(), video_id)
+ else:
+ raise
+
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error), expected=True)
+
+ video = response['video']
+
+ if video.get('state') == 'user-disabled':
+ raise ExtractorError(
+ 'Vidme said: This video has been suspended either due to a copyright claim, '
+ 'or for violating the terms of use.',
+ expected=True)
+
+ formats = [{
+ 'format_id': f.get('type'),
+ 'url': f['uri'],
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ 'preference': 0 if f.get('type', '').endswith('clip') else 1,
+ } for f in video.get('formats', []) if f.get('uri')]
+ self._sort_formats(formats)
- title = self._og_search_title(webpage)
- description = self._og_search_description(webpage, default='')
- thumbnail = self._og_search_thumbnail(webpage)
- timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False))
- width = int_or_none(self._og_search_property('video:width', webpage, fatal=False))
- height = int_or_none(self._og_search_property('video:height', webpage, fatal=False))
- duration = float_or_none(self._html_search_regex(
- r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
- view_count = str_to_int(self._html_search_regex(
- r'<(?:li|span) class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False))
- like_count = str_to_int(self._html_search_regex(
- r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
- webpage, 'like count', fatal=False))
+ title = video['title']
+ description = video.get('description')
+ thumbnail = video.get('thumbnail_url')
+ timestamp = parse_iso8601(video.get('date_created'), ' ')
+ uploader = video.get('user', {}).get('username')
+ uploader_id = video.get('user', {}).get('user_id')
+ age_limit = 18 if video.get('nsfw') is True else 0
+ duration = float_or_none(video.get('duration'))
+ view_count = int_or_none(video.get('view_count'))
+ like_count = int_or_none(video.get('likes_count'))
+ comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
- 'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
'timestamp': timestamp,
- 'width': width,
- 'height': height,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
+ 'comment_count': comment_count,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/vier.py b/youtube_dl/extractor/vier.py
index 15377097e..c76c20614 100644
--- a/youtube_dl/extractor/vier.py
+++ b/youtube_dl/extractor/vier.py
@@ -2,6 +2,7 @@
from __future__ import unicode_literals
import re
+import itertools
from .common import InfoExtractor
@@ -91,31 +92,27 @@ class VierVideosIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
program = mobj.group('program')
- webpage = self._download_webpage(url, program)
-
page_id = mobj.group('page')
if page_id:
page_id = int(page_id)
start_page = page_id
- last_page = start_page + 1
playlist_id = '%s-page%d' % (program, page_id)
else:
start_page = 0
- last_page = int(self._search_regex(
- r'videos\?page=(\d+)">laatste</a>',
- webpage, 'last page', default=0)) + 1
playlist_id = program
entries = []
- for current_page_id in range(start_page, last_page):
+ for current_page_id in itertools.count(start_page):
current_page = self._download_webpage(
'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id),
program,
- 'Downloading page %d' % (current_page_id + 1)) if current_page_id != page_id else webpage
+ 'Downloading page %d' % (current_page_id + 1))
page_entries = [
self.url_result('http://www.vier.be' + video_url, 'Vier')
for video_url in re.findall(
r'<h3><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
entries.extend(page_entries)
+ if page_id or '>Meer<' not in current_page:
+ break
return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py
index 6ef36290b..632e57fb4 100644
--- a/youtube_dl/extractor/viewster.py
+++ b/youtube_dl/extractor/viewster.py
@@ -3,25 +3,29 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
+ compat_HTTPError,
compat_urllib_request,
compat_urllib_parse,
+ compat_urllib_parse_unquote,
)
from ..utils import (
determine_ext,
+ ExtractorError,
int_or_none,
parse_iso8601,
+ HEADRequest,
)
class ViewsterIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)'
+ _VALID_URL = r'https?://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)'
_TESTS = [{
# movie, Type=Movie
'url': 'http://www.viewster.com/movie/1140-11855-000/the-listening-project/',
- 'md5': '14d3cfffe66d57b41ae2d9c873416f01',
+ 'md5': 'e642d1b27fcf3a4ffa79f194f5adde36',
'info_dict': {
'id': '1140-11855-000',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'The listening Project',
'description': 'md5:bac720244afd1a8ea279864e67baa071',
'timestamp': 1214870400,
@@ -31,10 +35,10 @@ class ViewsterIE(InfoExtractor):
}, {
# series episode, Type=Episode
'url': 'http://www.viewster.com/serie/1284-19427-001/the-world-and-a-wall/',
- 'md5': 'd5434c80fcfdb61651cc2199a88d6ba3',
+ 'md5': '9243079a8531809efe1b089db102c069',
'info_dict': {
'id': '1284-19427-001',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'The World and a Wall',
'description': 'md5:24814cf74d3453fdf5bfef9716d073e3',
'timestamp': 1428192000,
@@ -59,10 +63,17 @@ class ViewsterIE(InfoExtractor):
'description': 'md5:e7097a8fc97151e25f085c9eb7a1cdb1',
},
'playlist_mincount': 16,
+ }, {
+ # geo restricted series
+ 'url': 'https://www.viewster.com/serie/1280-18794-002/',
+ 'only_matching': True,
+ }, {
+ # geo restricted video
+ 'url': 'https://www.viewster.com/serie/1280-18794-002/what-is-extraterritoriality-lawo/',
+ 'only_matching': True,
}]
_ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01'
- _AUTH_TOKEN = '/YqhSYsx8EaU9Bsta3ojlA=='
def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True):
request = compat_urllib_request.Request(url)
@@ -72,6 +83,10 @@ class ViewsterIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
+ # Get 'api_token' cookie
+ self._request_webpage(HEADRequest('http://www.viewster.com/'), video_id)
+ cookies = self._get_cookies('http://www.viewster.com/')
+ self._AUTH_TOKEN = compat_urllib_parse_unquote(cookies['api_token'].value)
info = self._download_json(
'https://public-api.viewster.com/search/%s' % video_id,
@@ -80,10 +95,16 @@ class ViewsterIE(InfoExtractor):
entry_id = info.get('Id') or info['id']
# unfinished serie has no Type
- if info.get('Type') in ['Serie', None]:
- episodes = self._download_json(
- 'https://public-api.viewster.com/series/%s/episodes' % entry_id,
- video_id, 'Downloading series JSON')
+ if info.get('Type') in ('Serie', None):
+ try:
+ episodes = self._download_json(
+ 'https://public-api.viewster.com/series/%s/episodes' % entry_id,
+ video_id, 'Downloading series JSON')
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
+ self.raise_geo_restricted()
+ else:
+ raise
entries = [
self.url_result(
'http://www.viewster.com/movie/%s' % episode['OriginId'], 'Viewster')
@@ -93,7 +114,7 @@ class ViewsterIE(InfoExtractor):
return self.playlist_result(entries, video_id, title, description)
formats = []
- for media_type in ('application/f4m+xml', 'application/x-mpegURL'):
+ for media_type in ('application/f4m+xml', 'application/x-mpegURL', 'video/mp4'):
media = self._download_json(
'https://public-api.viewster.com/movies/%s/video?mediaType=%s'
% (entry_id, compat_urllib_parse.quote(media_type)),
@@ -115,9 +136,22 @@ class ViewsterIE(InfoExtractor):
fatal=False # m3u8 sometimes fail
))
else:
- formats.append({
+ format_id = media.get('Bitrate')
+ f = {
'url': video_url,
- })
+ 'format_id': 'mp4-%s' % format_id,
+ 'height': int_or_none(media.get('Height')),
+ 'width': int_or_none(media.get('Width')),
+ 'preference': 1,
+ }
+ if format_id and not f['height']:
+ f['height'] = int_or_none(self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None))
+ formats.append(f)
+
+ if not formats and not info.get('LanguageSets') and not info.get('VODSettings'):
+ self.raise_geo_restricted()
+
self._sort_formats(formats)
synopsis = info.get('Synopsis', {})
diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index 51cdc6b65..ddbd395c8 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -88,6 +88,14 @@ class VikiBaseIE(InfoExtractor):
if not self._token:
self.report_warning('Unable to get session token, login has probably failed')
+ @staticmethod
+ def dict_selection(dict_obj, preferred_key):
+ if preferred_key in dict_obj:
+ return dict_obj.get(preferred_key)
+
+ filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
+ return filtered_dict[0] if filtered_dict else None
+
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
@@ -173,6 +181,19 @@ class VikiIE(VikiBaseIE):
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
+ }, {
+ # non-English description
+ 'url': 'http://www.viki.com/videos/158036v-love-in-magic',
+ 'md5': '1713ae35df5a521b31f6dc40730e7c9c',
+ 'info_dict': {
+ 'id': '158036v',
+ 'ext': 'mp4',
+ 'uploader': 'I Planet Entertainment',
+ 'upload_date': '20111122',
+ 'timestamp': 1321985454,
+ 'description': 'md5:44b1e46619df3a072294645c770cef36',
+ 'title': 'Love In Magic',
+ },
}]
def _real_extract(self, url):
@@ -181,19 +202,14 @@ class VikiIE(VikiBaseIE):
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
- title = None
- titles = video.get('titles')
- if titles:
- title = titles.get('en') or titles[titles.keys()[0]]
+ title = self.dict_selection(video.get('titles', {}), 'en')
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
- container_titles = video.get('container', {}).get('titles')
- if container_titles:
- container_title = container_titles.get('en') or container_titles[container_titles.keys()[0]]
- title = '%s - %s' % (container_title, title)
+ container_titles = video.get('container', {}).get('titles', {})
+ container_title = self.dict_selection(container_titles, 'en')
+ title = '%s - %s' % (container_title, title)
- descriptions = video.get('descriptions')
- description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None
+ description = self.dict_selection(video.get('descriptions', {}), 'en')
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
@@ -242,8 +258,8 @@ class VikiIE(VikiBaseIE):
formats = []
for format_id, stream_dict in streams.items():
- height = self._search_regex(
- r'^(\d+)[pP]$', format_id, 'height', default=None)
+ height = int_or_none(self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None))
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
formats = self._extract_m3u8_formats(
@@ -299,11 +315,9 @@ class VikiChannelIE(VikiBaseIE):
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
- titles = channel['titles']
- title = titles.get('en') or titles[titles.keys()[0]]
+ title = self.dict_selection(channel['titles'], 'en')
- descriptions = channel['descriptions']
- description = descriptions.get('en') or descriptions[descriptions.keys()[0]]
+ description = self.dict_selection(channel['descriptions'], 'en')
entries = []
for video_type in ('episodes', 'clips', 'movies'):
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 10d6745af..0f84656c0 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -29,6 +29,7 @@ from ..utils import (
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
+ _LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
@@ -37,21 +38,30 @@ class VimeoBaseInfoExtractor(InfoExtractor):
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
- login_url = 'https://vimeo.com/log_in'
- webpage = self._download_webpage(login_url, None, False)
- token = self._search_regex(r'xsrft":"(.*?)"', webpage, 'login token')
+ webpage = self._download_webpage(self._LOGIN_URL, None, False)
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
+ 'action': 'login',
'email': username,
'password': password,
- 'action': 'login',
'service': 'vimeo',
'token': token,
})
- login_request = compat_urllib_request.Request(login_url, data)
+ login_request = compat_urllib_request.Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- login_request.add_header('Cookie', 'xsrft=%s' % token)
+ login_request.add_header('Cookie', 'vuid=%s' % vuid)
+ login_request.add_header('Referer', self._LOGIN_URL)
self._download_webpage(login_request, None, False, 'Wrong login info')
+ def _extract_xsrft_and_vuid(self, webpage):
+ xsrft = self._search_regex(
+ r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
+ webpage, 'login token', group='xsrft')
+ vuid = self._search_regex(
+ r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
+ webpage, 'vuid', group='vuid')
+ return xsrft, vuid
+
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
@@ -75,12 +85,12 @@ class VimeoIE(VimeoBaseInfoExtractor):
'info_dict': {
'id': '56015672',
'ext': 'mp4',
- "upload_date": "20121220",
- "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
- "uploader_id": "user7108434",
- "uploader": "Filippo Valsorda",
- "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
- "duration": 10,
+ 'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
+ 'description': 'md5:2d3305bad981a06ff79f027f19865021',
+ 'upload_date': '20121220',
+ 'uploader_id': 'user7108434',
+ 'uploader': 'Filippo Valsorda',
+ 'duration': 10,
},
},
{
@@ -93,7 +103,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
- 'description': 'md5:380943ec71b89736ff4bf27183233d09',
+ 'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
@@ -147,7 +157,6 @@ class VimeoIE(VimeoBaseInfoExtractor):
},
{
'url': 'http://vimeo.com/76979871',
- 'md5': '3363dd6ffebe3784d56f4132317fd446',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
@@ -193,7 +202,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
- token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token')
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
@@ -203,7 +212,8 @@ class VimeoIE(VimeoBaseInfoExtractor):
url = url.replace('http://', 'https://')
password_request = compat_urllib_request.Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- password_request.add_header('Cookie', 'xsrft=%s' % token)
+ password_request.add_header('Cookie', 'clip_test2=1; vuid=%s' % vuid)
+ password_request.add_header('Referer', url)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
@@ -276,7 +286,17 @@ class VimeoIE(VimeoBaseInfoExtractor):
try:
try:
config_url = self._html_search_regex(
- r' data-config-url="(.+?)"', webpage, 'config URL')
+ r' data-config-url="(.+?)"', webpage,
+ 'config URL', default=None)
+ if not config_url:
+ # Sometimes new react-based page is served instead of old one that require
+ # different config URL extraction approach (see
+ # https://github.com/rg3/youtube-dl/pull/7209)
+ vimeo_clip_page_config = self._search_regex(
+ r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
+ 'vimeo clip page config')
+ config_url = self._parse_json(
+ vimeo_clip_page_config, video_id)['player']['config_url']
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
@@ -386,14 +406,20 @@ class VimeoIE(VimeoBaseInfoExtractor):
'ext': codec_extension,
'url': video_url,
'format_id': format_id,
- 'width': file_info.get('width'),
- 'height': file_info.get('height'),
+ 'width': int_or_none(file_info.get('width')),
+ 'height': int_or_none(file_info.get('height')),
+ 'tbr': int_or_none(file_info.get('bitrate')),
})
formats = []
+ m3u8_url = config_files.get('hls', {}).get('all')
+ if m3u8_url:
+ m3u8_formats = self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', 0, 'hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
for key in ('other', 'sd', 'hd'):
formats += files[key]
- if len(formats) == 0:
- raise ExtractorError('No known codec found')
+ self._sort_formats(formats)
subtitles = {}
text_tracks = config['request'].get('text_tracks')
@@ -422,10 +448,11 @@ class VimeoIE(VimeoBaseInfoExtractor):
}
-class VimeoChannelIE(InfoExtractor):
+class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
+ _TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
@@ -440,7 +467,7 @@ class VimeoChannelIE(InfoExtractor):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
- return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
+ return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
@@ -453,7 +480,7 @@ class VimeoChannelIE(InfoExtractor):
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
- token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token')
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(fields)
@@ -462,6 +489,7 @@ class VimeoChannelIE(InfoExtractor):
password_url = compat_urlparse.urljoin(page_url, password_path)
password_request = compat_urllib_request.Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
+ password_request.add_header('Cookie', 'vuid=%s' % vuid)
self._set_cookie('vimeo.com', 'xsrft', token)
return self._download_webpage(
@@ -499,7 +527,7 @@ class VimeoChannelIE(InfoExtractor):
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
- _VALID_URL = r'https://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
+ _VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
@@ -603,14 +631,14 @@ class VimeoReviewIE(InfoExtractor):
return self.url_result(player_url, 'Vimeo', video_id)
-class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
+class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
- _VALID_URL = r'https://vimeo\.com/home/watchlater|:vimeowatchlater'
+ _VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
+ _TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
- _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
_TESTS = [{
- 'url': 'https://vimeo.com/home/watchlater',
+ 'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
@@ -626,7 +654,7 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
return request
def _real_extract(self, url):
- return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
+ return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(InfoExtractor):
diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py
index c733a48fa..be72f3147 100644
--- a/youtube_dl/extractor/vine.py
+++ b/youtube_dl/extractor/vine.py
@@ -1,10 +1,14 @@
+# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+ int_or_none,
+ unified_strdate,
+)
class VineIE(InfoExtractor):
@@ -17,10 +21,12 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': 'Chicken.',
'alt_title': 'Vine by Jack Dorsey',
- 'description': 'Chicken.',
'upload_date': '20130519',
'uploader': 'Jack Dorsey',
'uploader_id': '76',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/v/MYxVapFvz2z',
@@ -29,11 +35,13 @@ class VineIE(InfoExtractor):
'id': 'MYxVapFvz2z',
'ext': 'mp4',
'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
- 'alt_title': 'Vine by Luna',
- 'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
+ 'alt_title': 'Vine by Mars Ruiz',
'upload_date': '20140815',
- 'uploader': 'Luna',
+ 'uploader': 'Mars Ruiz',
'uploader_id': '1102363502380728320',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/v/bxVjBbZlPUH',
@@ -43,14 +51,33 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': '#mw3 #ac130 #killcam #angelofdeath',
'alt_title': 'Vine by Z3k3',
- 'description': '#mw3 #ac130 #killcam #angelofdeath',
'upload_date': '20130430',
'uploader': 'Z3k3',
'uploader_id': '936470460173008896',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
'only_matching': True,
+ }, {
+ 'url': 'https://vine.co/v/e192BnZnZ9V',
+ 'info_dict': {
+ 'id': 'e192BnZnZ9V',
+ 'ext': 'mp4',
+ 'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2',
+ 'alt_title': 'Vine by Pimry_zaa',
+ 'upload_date': '20150705',
+ 'uploader': 'Pimry_zaa',
+ 'uploader_id': '1135760698325307392',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
@@ -65,25 +92,26 @@ class VineIE(InfoExtractor):
formats = [{
'format_id': '%(format)s-%(rate)s' % f,
- 'vcodec': f['format'],
- 'quality': f['rate'],
+ 'vcodec': f.get('format'),
+ 'quality': f.get('rate'),
'url': f['videoUrl'],
- } for f in data['videoUrls']]
+ } for f in data['videoUrls'] if f.get('videoUrl')]
self._sort_formats(formats)
+ username = data.get('username')
+
return {
'id': video_id,
- 'title': self._og_search_title(webpage),
- 'alt_title': self._og_search_description(webpage, default=None),
- 'description': data['description'],
- 'thumbnail': data['thumbnailUrl'],
- 'upload_date': unified_strdate(data['created']),
- 'uploader': data['username'],
- 'uploader_id': data['userIdStr'],
- 'like_count': data['likes']['count'],
- 'comment_count': data['comments']['count'],
- 'repost_count': data['reposts']['count'],
+ 'title': data.get('description') or self._og_search_title(webpage),
+ 'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None),
+ 'thumbnail': data.get('thumbnailUrl'),
+ 'upload_date': unified_strdate(data.get('created')),
+ 'uploader': username,
+ 'uploader_id': data.get('userIdStr'),
+ 'like_count': int_or_none(data.get('likes', {}).get('count')),
+ 'comment_count': int_or_none(data.get('comments', {}).get('count')),
+ 'repost_count': int_or_none(data.get('reposts', {}).get('count')),
'formats': formats,
}
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index c30c5a8e5..765e9e6fd 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -17,6 +17,7 @@ from ..utils import (
unescapeHTML,
unified_strdate,
)
+from .vimeo import VimeoIE
class VKIE(InfoExtractor):
@@ -249,6 +250,10 @@ class VKIE(InfoExtractor):
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
+ vimeo_url = VimeoIE._extract_vimeo_url(url, info_page)
+ if vimeo_url is not None:
+ return self.url_result(vimeo_url)
+
m_rutube = re.search(
r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page)
if m_rutube is not None:
diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
new file mode 100644
index 000000000..86c1cb5ef
--- /dev/null
+++ b/youtube_dl/extractor/vlive.py
@@ -0,0 +1,86 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import hmac
+from hashlib import sha1
+from base64 import b64encode
+from time import time
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ determine_ext
+)
+from ..compat import compat_urllib_parse
+
+
+class VLiveIE(InfoExtractor):
+ IE_NAME = 'vlive'
+ # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices
+ _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://m.vlive.tv/video/1326',
+ 'md5': 'cc7314812855ce56de70a06a27314983',
+ 'info_dict': {
+ 'id': '1326',
+ 'ext': 'mp4',
+ 'title': '[V] Girl\'s Day\'s Broadcast',
+ 'creator': 'Girl\'s Day',
+ },
+ }
+ _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://m.vlive.tv/video/%s' % video_id,
+ video_id, note='Download video page')
+
+ title = self._og_search_title(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+ creator = self._html_search_regex(
+ r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator')
+
+ url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id
+ msgpad = '%.0f' % (time() * 1000)
+ md = b64encode(
+ hmac.new(self._SECRET.encode('ascii'),
+ (url[:255] + msgpad).encode('ascii'), sha1).digest()
+ )
+ url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md})
+ playinfo = self._download_json(url, video_id, 'Downloading video json')
+
+ if playinfo.get('message', '') != 'success':
+ raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful'))
+
+ if not playinfo.get('result'):
+ raise ExtractorError('No videos found.')
+
+ formats = []
+ for vid in playinfo['result'].get('videos', {}).get('list', []):
+ formats.append({
+ 'url': vid['source'],
+ 'ext': 'mp4',
+ 'abr': vid.get('bitrate', {}).get('audio'),
+ 'vbr': vid.get('bitrate', {}).get('video'),
+ 'format_id': vid['encodingOption']['name'],
+ 'height': vid.get('height'),
+ 'width': vid.get('width'),
+ })
+ self._sort_formats(formats)
+
+ subtitles = {}
+ for caption in playinfo['result'].get('captions', {}).get('list', []):
+ subtitles[caption['language']] = [
+ {'ext': determine_ext(caption['source'], default_ext='vtt'),
+ 'url': caption['source']}]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'creator': creator,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
diff --git a/youtube_dl/extractor/washingtonpost.py b/youtube_dl/extractor/washingtonpost.py
index 72eb010f8..ec8b99998 100644
--- a/youtube_dl/extractor/washingtonpost.py
+++ b/youtube_dl/extractor/washingtonpost.py
@@ -19,25 +19,25 @@ class WashingtonPostIE(InfoExtractor):
'title': 'Sinkhole of bureaucracy',
},
'playlist': [{
- 'md5': '79132cc09ec5309fa590ae46e4cc31bc',
+ 'md5': 'b9be794ceb56c7267d410a13f99d801a',
'info_dict': {
'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'Breaking Points: The Paper Mine',
- 'duration': 1287,
+ 'duration': 1290,
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
'uploader': 'The Washington Post',
'timestamp': 1395527908,
'upload_date': '20140322',
},
}, {
- 'md5': 'e1d5734c06865cc504ad99dc2de0d443',
+ 'md5': '1fff6a689d8770966df78c8cb6c8c17c',
'info_dict': {
'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'The town bureaucracy sustains',
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
- 'duration': 2217,
+ 'duration': 2220,
'timestamp': 1395528005,
'upload_date': '20140322',
'uploader': 'The Washington Post',
diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py
index f69d46a28..e4f50e64c 100644
--- a/youtube_dl/extractor/wimp.py
+++ b/youtube_dl/extractor/wimp.py
@@ -1,40 +1,33 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/'
+ _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
- 'md5': 'f1acced123ecb28d9bb79f2479f2b6a1',
+ 'md5': 'ee21217ffd66d058e8b16be340b74883',
'info_dict': {
'id': 'maruexhausted',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
- # youtube video
'url': 'http://www.wimp.com/clowncar/',
+ 'md5': '4e2986c793694b55b37cf92521d12bb4',
'info_dict': {
- 'id': 'cG4CEr2aiSg',
+ 'id': 'clowncar',
'ext': 'mp4',
- 'title': 'Basset hound clown car...incredible!',
- 'description': 'md5:8d228485e0719898c017203f900b3a35',
- 'uploader': 'Gretchen Hoey',
- 'uploader_id': 'gretchenandjeff1',
- 'upload_date': '20140303',
+ 'title': 'It\'s like a clown car.',
+ 'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2',
},
- 'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index b4ad513a0..8938c0e45 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -4,7 +4,6 @@ import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
unified_strdate,
str_to_int,
int_or_none,
@@ -22,7 +21,7 @@ class XHamsterIE(InfoExtractor):
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
- 'uploader_id': 'Ruseful2011',
+ 'uploader': 'Ruseful2011',
'duration': 893,
'age_limit': 18,
}
@@ -34,7 +33,7 @@ class XHamsterIE(InfoExtractor):
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
- 'uploader_id': 'jojo747400',
+ 'uploader': 'jojo747400',
'duration': 200,
'age_limit': 18,
}
@@ -46,12 +45,12 @@ class XHamsterIE(InfoExtractor):
]
def _real_extract(self, url):
- def extract_video_url(webpage):
- mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
- if mp4 is None:
- raise ExtractorError('Unable to extract media URL')
- else:
- return mp4.group(1)
+ def extract_video_url(webpage, name):
+ return self._search_regex(
+ [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
+ r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
+ r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
+ webpage, name, group='mp4')
def is_hd(webpage):
return '<div class=\'icon iconHD\'' in webpage
@@ -64,7 +63,9 @@ class XHamsterIE(InfoExtractor):
mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo)
webpage = self._download_webpage(mrss_url, video_id)
- title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, 'title')
+ title = self._html_search_regex(
+ [r'<title>(?P<title>.+?)(?:, (?:[^,]+? )?Porn: xHamster| - xHamster\.com)</title>',
+ r'<h1>([^<]+)</h1>'], webpage, 'title')
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
@@ -75,10 +76,14 @@ class XHamsterIE(InfoExtractor):
if upload_date:
upload_date = unified_strdate(upload_date)
- uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
- webpage, 'uploader id', default='anonymous')
+ uploader = self._html_search_regex(
+ r"<a href='[^']+xhamster\.com/user/[^>]+>(?P<uploader>[^<]+)",
+ webpage, 'uploader', default='anonymous')
- thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
+ thumbnail = self._search_regex(
+ [r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
+ r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
+ webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
webpage, 'duration', fatal=False))
@@ -97,7 +102,9 @@ class XHamsterIE(InfoExtractor):
hd = is_hd(webpage)
- video_url = extract_video_url(webpage)
+ format_id = 'hd' if hd else 'sd'
+
+ video_url = extract_video_url(webpage, format_id)
formats = [{
'url': video_url,
'format_id': 'hd' if hd else 'sd',
@@ -108,7 +115,7 @@ class XHamsterIE(InfoExtractor):
mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
if is_hd(webpage):
- video_url = extract_video_url(webpage)
+ video_url = extract_video_url(webpage, 'hd')
formats.append({
'url': video_url,
'format_id': 'hd',
@@ -122,7 +129,7 @@ class XHamsterIE(InfoExtractor):
'title': title,
'description': description,
'upload_date': upload_date,
- 'uploader_id': uploader_id,
+ 'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
diff --git a/youtube_dl/extractor/xuite.py b/youtube_dl/extractor/xuite.py
index 5aac8adb3..8bbac54e2 100644
--- a/youtube_dl/extractor/xuite.py
+++ b/youtube_dl/extractor/xuite.py
@@ -19,7 +19,7 @@ class XuiteIE(InfoExtractor):
_TESTS = [{
# Audio
'url': 'http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2',
- 'md5': '63a42c705772aa53fd4c1a0027f86adf',
+ 'md5': 'e79284c87b371424885448d11f6398c8',
'info_dict': {
'id': '3860914',
'ext': 'mp3',
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index f9afbdbab..fca5ddc69 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -101,7 +101,7 @@ class YahooIE(InfoExtractor):
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
- 'md5': '67010fdf3a08d290e060a4dd96baa07b',
+ 'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
@@ -144,6 +144,17 @@ class YahooIE(InfoExtractor):
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
+ }, {
+ # Query result is embedded in webpage, but explicit request to video API fails with geo restriction
+ 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
+ 'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
+ 'info_dict': {
+ 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
+ 'ext': 'mp4',
+ 'title': 'Communitary - Community Episode 1: Ladders',
+ 'description': 'md5:8fc39608213295748e1e289807838c97',
+ 'duration': 1646,
+ },
}
]
@@ -171,6 +182,19 @@ class YahooIE(InfoExtractor):
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
+ # Query result is often embedded in webpage as JSON. Sometimes explicit requests
+ # to video API results in a failure with geo restriction reason therefore using
+ # embedded query result when present sounds reasonable.
+ config_json = self._search_regex(
+ r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
+ webpage, 'videoplayer applet', default=None)
+ if config_json:
+ config = self._parse_json(config_json, display_id, fatal=False)
+ if config:
+ sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
+ if sapi:
+ return self._extract_info(display_id, sapi, webpage)
+
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
@@ -190,22 +214,10 @@ class YahooIE(InfoExtractor):
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
- def _get_info(self, video_id, display_id, webpage):
- region = self._search_regex(
- r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
- webpage, 'region', fatal=False, default='US')
- data = compat_urllib_parse.urlencode({
- 'protocol': 'http',
- 'region': region,
- })
- query_url = (
- 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
- '{id}?{data}'.format(id=video_id, data=data))
- query_result = self._download_json(
- query_url, display_id, 'Downloading video info')
-
- info = query_result['query']['results']['mediaObj'][0]
+ def _extract_info(self, display_id, query, webpage):
+ info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
+ video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
@@ -231,6 +243,9 @@ class YahooIE(InfoExtractor):
'ext': 'flv',
})
else:
+ if s.get('format') == 'm3u8_playlist':
+ format_info['protocol'] = 'm3u8_native'
+ format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
@@ -264,6 +279,21 @@ class YahooIE(InfoExtractor):
'subtitles': subtitles,
}
+ def _get_info(self, video_id, display_id, webpage):
+ region = self._search_regex(
+ r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+ webpage, 'region', fatal=False, default='US')
+ data = compat_urllib_parse.urlencode({
+ 'protocol': 'http',
+ 'region': region,
+ })
+ query_url = (
+ 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
+ '{id}?{data}'.format(id=video_id, data=data))
+ query_result = self._download_json(
+ query_url, display_id, 'Downloading video info')
+ return self._extract_info(display_id, query_result, webpage)
+
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index f4c0f5702..08dc81f3a 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -1,18 +1,38 @@
-# coding=utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
-from ..compat import compat_str
+from ..compat import (
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_request,
+)
from ..utils import (
int_or_none,
float_or_none,
)
-class YandexMusicBaseIE(InfoExtractor):
+class YandexMusicTrackIE(InfoExtractor):
+ IE_NAME = 'yandexmusic:track'
+ IE_DESC = 'Яндекс.Музыка - Трек'
+ _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
+
+ _TEST = {
+ 'url': 'http://music.yandex.ru/album/540508/track/4878838',
+ 'md5': 'f496818aa2f60b6c0062980d2e00dc20',
+ 'info_dict': {
+ 'id': '4878838',
+ 'ext': 'mp3',
+ 'title': 'Carlo Ambrosio - Gypsy Eyes 1',
+ 'filesize': 4628061,
+ 'duration': 193.04,
+ }
+ }
+
def _get_track_url(self, storage_dir, track_id):
data = self._download_json(
'http://music.yandex.ru/api/v1.5/handlers/api-jsonp.jsx?action=getTrackSrc&p=download-info/%s'
@@ -26,6 +46,12 @@ class YandexMusicBaseIE(InfoExtractor):
% (data['host'], key, data['ts'] + data['path'], storage[1]))
def _get_track_info(self, track):
+ thumbnail = None
+ cover_uri = track.get('albums', [{}])[0].get('coverUri')
+ if cover_uri:
+ thumbnail = cover_uri.replace('%%', 'orig')
+ if not thumbnail.startswith('http'):
+ thumbnail = 'http://' + thumbnail
return {
'id': track['id'],
'ext': 'mp3',
@@ -33,26 +59,9 @@ class YandexMusicBaseIE(InfoExtractor):
'title': '%s - %s' % (track['artists'][0]['name'], track['title']),
'filesize': int_or_none(track.get('fileSize')),
'duration': float_or_none(track.get('durationMs'), 1000),
+ 'thumbnail': thumbnail,
}
-
-class YandexMusicTrackIE(YandexMusicBaseIE):
- IE_NAME = 'yandexmusic:track'
- IE_DESC = 'Яндекс.Музыка - Трек'
- _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
-
- _TEST = {
- 'url': 'http://music.yandex.ru/album/540508/track/4878838',
- 'md5': 'f496818aa2f60b6c0062980d2e00dc20',
- 'info_dict': {
- 'id': '4878838',
- 'ext': 'mp3',
- 'title': 'Carlo Ambrosio - Gypsy Eyes 1',
- 'filesize': 4628061,
- 'duration': 193.04,
- }
- }
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
album_id, track_id = mobj.group('album_id'), mobj.group('id')
@@ -64,7 +73,15 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
return self._get_track_info(track)
-class YandexMusicAlbumIE(YandexMusicBaseIE):
+class YandexMusicPlaylistBaseIE(InfoExtractor):
+ def _build_playlist(self, tracks):
+ return [
+ self.url_result(
+ 'http://music.yandex.ru/album/%s/track/%s' % (track['albums'][0]['id'], track['id']))
+ for track in tracks if track.get('albums') and isinstance(track.get('albums'), list)]
+
+
+class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:album'
IE_DESC = 'Яндекс.Музыка - Альбом'
_VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)'
@@ -85,7 +102,7 @@ class YandexMusicAlbumIE(YandexMusicBaseIE):
'http://music.yandex.ru/handlers/album.jsx?album=%s' % album_id,
album_id, 'Downloading album JSON')
- entries = [self._get_track_info(track) for track in album['volumes'][0]]
+ entries = self._build_playlist(album['volumes'][0])
title = '%s - %s' % (album['artists'][0]['name'], album['title'])
year = album.get('year')
@@ -95,12 +112,12 @@ class YandexMusicAlbumIE(YandexMusicBaseIE):
return self.playlist_result(entries, compat_str(album['id']), title)
-class YandexMusicPlaylistIE(YandexMusicBaseIE):
+class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:playlist'
IE_DESC = 'Яндекс.Музыка - Плейлист'
_VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/users/[^/]+/playlists/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://music.yandex.ru/users/music.partners/playlists/1245',
'info_dict': {
'id': '1245',
@@ -108,20 +125,54 @@ class YandexMusicPlaylistIE(YandexMusicBaseIE):
'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9',
},
'playlist_count': 6,
- }
+ }, {
+ # playlist exceeding the limit of 150 tracks shipped with webpage (see
+ # https://github.com/rg3/youtube-dl/issues/6666)
+ 'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036',
+ 'info_dict': {
+ 'id': '1036',
+ 'title': 'Музыка 90-х',
+ },
+ 'playlist_count': 310,
+ }]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
- playlist = self._parse_json(
+ mu = self._parse_json(
self._search_regex(
r'var\s+Mu\s*=\s*({.+?});\s*</script>', webpage, 'player'),
- playlist_id)['pageData']['playlist']
-
- entries = [self._get_track_info(track) for track in playlist['tracks']]
+ playlist_id)
+
+ playlist = mu['pageData']['playlist']
+ tracks, track_ids = playlist['tracks'], playlist['trackIds']
+
+ # tracks dictionary shipped with webpage is limited to 150 tracks,
+ # missing tracks should be retrieved manually.
+ if len(tracks) < len(track_ids):
+ present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')])
+ missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
+ request = compat_urllib_request.Request(
+ 'https://music.yandex.ru/handlers/track-entries.jsx',
+ compat_urllib_parse.urlencode({
+ 'entries': ','.join(missing_track_ids),
+ 'lang': mu.get('settings', {}).get('lang', 'en'),
+ 'external-domain': 'music.yandex.ru',
+ 'overembed': 'false',
+ 'sign': mu.get('authData', {}).get('user', {}).get('sign'),
+ 'strict': 'true',
+ }).encode('utf-8'))
+ request.add_header('Referer', url)
+ request.add_header('X-Requested-With', 'XMLHttpRequest')
+
+ missing_tracks = self._download_json(
+ request, playlist_id, 'Downloading missing tracks JSON', fatal=False)
+ if missing_tracks:
+ tracks.extend(missing_tracks)
return self.playlist_result(
- entries, compat_str(playlist_id),
+ self._build_playlist(tracks),
+ compat_str(playlist_id),
playlist['title'], playlist.get('description'))
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 78caeb8b3..2e81d9223 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -49,6 +49,17 @@ class YoukuIE(InfoExtractor):
},
'playlist_count': 13,
'skip': 'Available in China only',
+ }, {
+ 'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
+ 'note': 'Video protected with password',
+ 'info_dict': {
+ 'id': 'XNjA1NzA2Njgw',
+ 'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
+ },
+ 'playlist_count': 19,
+ 'params': {
+ 'videopassword': '100600',
+ },
}]
def construct_video_urls(self, data1, data2):
@@ -185,9 +196,15 @@ class YoukuIE(InfoExtractor):
raw_data = self._download_json(req, video_id, note=note)
return raw_data['data'][0]
+ video_password = self._downloader.params.get('videopassword', None)
+
# request basic data
+ basic_data_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id
+ if video_password:
+ basic_data_url += '?password=%s' % video_password
+
data1 = retrieve_data(
- 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id,
+ basic_data_url,
'Downloading JSON metadata 1')
data2 = retrieve_data(
'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id,
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 3d8b31f98..08e821362 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -19,21 +19,27 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
+ compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
+ encode_dict,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
orderedSet,
+ parse_duration,
+ remove_start,
+ smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
+ unsmuggle_url,
uppercase_escape,
ISO3166Utils,
)
@@ -42,7 +48,7 @@ from ..utils import (
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
- _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
+ _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
@@ -106,10 +112,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'hl': 'en_US',
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
@@ -124,42 +127,25 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
- if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
- tfa_code = self._get_tfa_info()
+ if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
+ tfa_code = self._get_tfa_info('2-step verification code')
- if tfa_code is None:
- self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
- self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
+ if not tfa_code:
+ self._downloader.report_warning(
+ 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
+ '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
- # Unlike the first login form, secTok and timeStmp are both required for the TFA form
-
- match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
- if match is None:
- self._downloader.report_warning('Failed to get secTok - did the page structure change?')
- secTok = match.group(1)
- match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
- if match is None:
- self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
- timeStmp = match.group(1)
-
- tfa_form_strs = {
- 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- 'smsToken': '',
- 'smsUserPin': tfa_code,
- 'smsVerifyPin': 'Verify',
-
- 'PersistentCookie': 'yes',
- 'checkConnection': '',
- 'checkedDomains': 'youtube',
- 'pstMsg': '1',
- 'secTok': secTok,
- 'timeStmp': timeStmp,
- 'service': 'youtube',
- 'hl': 'en_US',
- }
- tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
- tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
+ tfa_code = remove_start(tfa_code, 'G-')
+
+ tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
+
+ tfa_form_strs.update({
+ 'Pin': tfa_code,
+ 'TrustDevice': 'on',
+ })
+
+ tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
@@ -169,8 +155,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
if tfa_results is False:
return False
- if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
- self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
+ if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
+ self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
@@ -192,6 +178,52 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return
+class YoutubePlaylistBaseInfoExtractor(InfoExtractor):
+ # Extract the video ids from the playlist pages
+ def _entries(self, page, playlist_id):
+ more_widget_html = content_html = page
+ for page_num in itertools.count(1):
+ for video_id, video_title in self.extract_videos_from_page(content_html):
+ yield self.url_result(
+ video_id, 'Youtube', video_id=video_id,
+ video_title=video_title)
+
+ mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
+ if not mobj:
+ break
+
+ more = self._download_json(
+ 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
+ 'Downloading page #%s' % page_num,
+ transform_source=uppercase_escape)
+ content_html = more['content_html']
+ if not content_html.strip():
+ # Some webpages show a "Load more" button but they don't
+ # have more videos
+ break
+ more_widget_html = more['load_more_widget_html']
+
+ def extract_videos_from_page(self, page):
+ ids_in_page = []
+ titles_in_page = []
+ for mobj in re.finditer(self._VIDEO_RE, page):
+ # The link with index 0 is not the first video of the playlist (not sure if still actual)
+ if 'index' in mobj.groupdict() and mobj.group('id') == '0':
+ continue
+ video_id = mobj.group('id')
+ video_title = unescapeHTML(mobj.group('title'))
+ if video_title:
+ video_title = video_title.strip()
+ try:
+ idx = ids_in_page.index(video_id)
+ if video_title and not titles_in_page[idx]:
+ titles_in_page[idx] = video_title
+ except ValueError:
+ ids_in_page.append(video_id)
+ titles_in_page.append(video_title)
+ return zip(ids_in_page, titles_in_page)
+
+
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
@@ -209,11 +241,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
+ (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
- |youtu\.be/ # just youtu.be/xxxx
+ |(?:
+ youtu\.be| # just youtu.be/xxxx
+ vid\.plus # or vid.plus/xxxx
+ )/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
@@ -279,13 +314,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
- '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
+ '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
@@ -295,11 +330,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
- '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+ '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
+ '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
@@ -317,7 +352,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube'
_TESTS = [
{
- 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
@@ -327,8 +362,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
+ 'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
+ 'start_time': 1,
+ 'end_time': 9,
}
},
{
@@ -339,7 +377,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
- 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
+ 'description': 'md5:782e8651347686cba06e58f71ab51773',
+ 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
+ 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
+ 'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
@@ -355,6 +396,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
+ 'age_limit': 18,
}
},
{
@@ -371,6 +413,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}
},
{
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
+ 'note': 'Use the first video ID in the URL',
+ 'info_dict': {
+ 'id': 'BaW_jenozKc',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
+ 'uploader': 'Philipp Hagemeister',
+ 'uploader_id': 'phihag',
+ 'upload_date': '20121002',
+ 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
+ 'categories': ['Science & Technology'],
+ 'tags': ['youtube-dl'],
+ 'like_count': int,
+ 'dislike_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
@@ -411,7 +473,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
- 'description': 'md5:2acfda1b285bdd478ccec22f9918199d',
+ 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
@@ -445,6 +507,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
+ 'age_limit': 18,
},
},
# Age-gate video with encrypted signature
@@ -458,6 +521,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
+ 'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
@@ -482,7 +546,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
- 'upload_date': '20120731',
+ 'upload_date': '20120724',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
@@ -511,7 +575,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
- 'ext': 'mp4',
+ 'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
@@ -554,6 +618,63 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'format': '135', # bestvideo
}
},
+ {
+ # Multifeed videos (multiple cameras), URL is for Main Camera
+ 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
+ 'info_dict': {
+ 'id': 'jqWvoWXjCVs',
+ 'title': 'teamPGP: Rocket League Noob Stream',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ },
+ 'playlist': [{
+ 'info_dict': {
+ 'id': 'jqWvoWXjCVs',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': '6h8e8xoXJzg',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': 'PUOgX5z9xZw',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': 'teuwxikvS5k',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (zim)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }],
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://vid.plus/FlRa-iH7PGw',
+ 'only_matching': True,
+ }
]
def __init__(self, *args, **kwargs):
@@ -582,7 +703,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
- r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
+ r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
@@ -885,10 +1006,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return formats
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
+
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
+ start_time = None
+ end_time = None
+ parsed_url = compat_urllib_parse_urlparse(url)
+ for component in [parsed_url.fragment, parsed_url.query]:
+ query = compat_parse_qs(component)
+ if start_time is None and 't' in query:
+ start_time = parse_duration(query['t'][0])
+ if start_time is None and 'start' in query:
+ start_time = parse_duration(query['start'][0])
+ if end_time is None and 'end' in query:
+ end_time = parse_duration(query['end'][0])
+
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
@@ -967,7 +1102,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
- add_dash_mpd(get_video_info)
+ if get_video_info.get('use_cipher_signature') != ['True']:
+ add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
@@ -976,7 +1112,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
- if regions_allowed is not None:
+ if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
@@ -988,6 +1124,55 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'"token" parameter not in video info for unknown reason',
video_id=video_id)
+ # title
+ if 'title' in video_info:
+ video_title = video_info['title'][0]
+ else:
+ self._downloader.report_warning('Unable to extract video title')
+ video_title = '_'
+
+ # description
+ video_description = get_element_by_id("eow-description", video_webpage)
+ if video_description:
+ video_description = re.sub(r'''(?x)
+ <a\s+
+ (?:[a-zA-Z-]+="[^"]+"\s+)*?
+ title="([^"]+)"\s+
+ (?:[a-zA-Z-]+="[^"]+"\s+)*?
+ class="yt-uix-redirect-link"\s*>
+ [^<]+
+ </a>
+ ''', r'\1', video_description)
+ video_description = clean_html(video_description)
+ else:
+ fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
+ if fd_mobj:
+ video_description = unescapeHTML(fd_mobj.group(1))
+ else:
+ video_description = ''
+
+ if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
+ if not self._downloader.params.get('noplaylist'):
+ entries = []
+ feed_ids = []
+ multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
+ for feed in multifeed_metadata_list.split(','):
+ feed_data = compat_parse_qs(feed)
+ entries.append({
+ '_type': 'url_transparent',
+ 'ie_key': 'Youtube',
+ 'url': smuggle_url(
+ '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
+ {'force_singlefeed': True}),
+ 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
+ })
+ feed_ids.append(feed_data['id'][0])
+ self.to_screen(
+ 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
+ % (', '.join(feed_ids), video_id))
+ return self.playlist_result(entries, video_id, video_title, video_description)
+ self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
+
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
@@ -1013,13 +1198,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else:
self._downloader.report_warning('unable to extract uploader nickname')
- # title
- if 'title' in video_info:
- video_title = video_info['title'][0]
- else:
- self._downloader.report_warning('Unable to extract video title')
- video_title = '_'
-
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
@@ -1055,25 +1233,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else:
video_categories = None
- # description
- video_description = get_element_by_id("eow-description", video_webpage)
- if video_description:
- video_description = re.sub(r'''(?x)
- <a\s+
- (?:[a-zA-Z-]+="[^"]+"\s+)*?
- title="([^"]+)"\s+
- (?:[a-zA-Z-]+="[^"]+"\s+)*?
- class="yt-uix-redirect-link"\s*>
- [^<]+
- </a>
- ''', r'\1', video_description)
- video_description = clean_html(video_description)
- else:
- fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
- if fd_mobj:
- video_description = unescapeHTML(fd_mobj.group(1))
- else:
- video_description = ''
+ video_tags = [
+ unescapeHTML(m.group('content'))
+ for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
@@ -1124,7 +1286,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
- url_map = {}
+ formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
@@ -1170,7 +1332,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
- r'html5player-([^/]+?)(?:/html5player)?\.js',
+ r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
@@ -1184,8 +1346,50 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
- url_map[format_id] = url
- formats = _map_to_format_list(url_map)
+
+ # Some itags are not included in DASH manifest thus corresponding formats will
+ # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
+ # Trying to extract metadata from url_encoded_fmt_stream_map entry.
+ mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
+ width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
+ dct = {
+ 'format_id': format_id,
+ 'url': url,
+ 'player_url': player_url,
+ 'filesize': int_or_none(url_data.get('clen', [None])[0]),
+ 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
+ 'width': width,
+ 'height': height,
+ 'fps': int_or_none(url_data.get('fps', [None])[0]),
+ 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
+ }
+ type_ = url_data.get('type', [None])[0]
+ if type_:
+ type_split = type_.split(';')
+ kind_ext = type_split[0].split('/')
+ if len(kind_ext) == 2:
+ kind, ext = kind_ext
+ dct['ext'] = ext
+ if kind in ('audio', 'video'):
+ codecs = None
+ for mobj in re.finditer(
+ r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
+ if mobj.group('key') == 'codecs':
+ codecs = mobj.group('val')
+ break
+ if codecs:
+ codecs = codecs.split(',')
+ if len(codecs) == 2:
+ acodec, vcodec = codecs[0], codecs[1]
+ else:
+ acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
+ dct.update({
+ 'acodec': acodec,
+ 'vcodec': vcodec,
+ })
+ if format_id in self._formats:
+ dct.update(self._formats[format_id])
+ formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
@@ -1243,6 +1447,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
+ 'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
@@ -1255,10 +1460,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
+ 'start_time': start_time,
+ 'end_time': end_time,
}
-class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
+class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
@@ -1279,7 +1486,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
- _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
+ _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
@@ -1396,37 +1603,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
else:
self.report_warning('Youtube gives an alert message: ' + match)
- # Extract the video ids from the playlist pages
- def _entries():
- more_widget_html = content_html = page
- for page_num in itertools.count(1):
- matches = re.finditer(self._VIDEO_RE, content_html)
- # We remove the duplicates and the link with index 0
- # (it's not the first video of the playlist)
- new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
- for vid_id in new_ids:
- yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
-
- mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
- if not mobj:
- break
-
- more = self._download_json(
- 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
- 'Downloading page #%s' % page_num,
- transform_source=uppercase_escape)
- content_html = more['content_html']
- if not content_html.strip():
- # Some webpages show a "Load more" button but they don't
- # have more videos
- break
- more_widget_html = more['load_more_widget_html']
-
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
- return self.playlist_result(_entries(), playlist_id, playlist_title)
+ return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
@@ -1452,10 +1633,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
return self._extract_playlist(playlist_id)
-class YoutubeChannelIE(InfoExtractor):
+class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
+ _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
@@ -1466,22 +1648,6 @@ class YoutubeChannelIE(InfoExtractor):
}
}]
- @staticmethod
- def extract_videos_from_page(page):
- ids_in_page = []
- titles_in_page = []
- for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
- video_id = mobj.group('id')
- video_title = unescapeHTML(mobj.group('title'))
- try:
- idx = ids_in_page.index(video_id)
- if video_title and not titles_in_page[idx]:
- titles_in_page[idx] = video_title
- except ValueError:
- ids_in_page.append(video_id)
- titles_in_page.append(video_title)
- return zip(ids_in_page, titles_in_page)
-
def _real_extract(self, url):
channel_id = self._match_id(url)
@@ -1493,12 +1659,15 @@ class YoutubeChannelIE(InfoExtractor):
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
- channel_playlist_id = self._html_search_meta(
- 'channelId', channel_page, 'channel id', default=None)
- if not channel_playlist_id:
- channel_playlist_id = self._search_regex(
- r'data-channel-external-id="([^"]+)"',
- channel_page, 'channel id', default=None)
+ if channel_page is False:
+ channel_playlist_id = False
+ else:
+ channel_playlist_id = self._html_search_meta(
+ 'channelId', channel_page, 'channel id', default=None)
+ if not channel_playlist_id:
+ channel_playlist_id = self._search_regex(
+ r'data-channel-external-id="([^"]+)"',
+ channel_page, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
@@ -1521,29 +1690,7 @@ class YoutubeChannelIE(InfoExtractor):
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
- def _entries():
- more_widget_html = content_html = channel_page
- for pagenum in itertools.count(1):
-
- for video_id, video_title in self.extract_videos_from_page(content_html):
- yield self.url_result(
- video_id, 'Youtube', video_id=video_id,
- video_title=video_title)
-
- mobj = re.search(
- r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
- more_widget_html)
- if not mobj:
- break
-
- more = self._download_json(
- 'https://youtube.com/%s' % mobj.group('more'), channel_id,
- 'Downloading page #%s' % (pagenum + 1),
- transform_source=uppercase_escape)
- content_html = more['content_html']
- more_widget_html = more['load_more_widget_html']
-
- return self.playlist_result(_entries(), channel_id)
+ return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
@@ -1647,7 +1794,7 @@ class YoutubeSearchURLIE(InfoExtractor):
r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
- r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
+ r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
@@ -1674,8 +1821,8 @@ class YoutubeShowIE(InfoExtractor):
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
- 'url': 'http://www.youtube.com/show/airdisasters',
- 'playlist_mincount': 3,
+ 'url': 'https://www.youtube.com/show/airdisasters',
+ 'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
@@ -1686,7 +1833,7 @@ class YoutubeShowIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
- url, playlist_id, 'Downloading show webpage')
+ 'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
@@ -1809,6 +1956,7 @@ class YoutubeTruncatedURLIE(InfoExtractor):
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
+ t=[0-9]+
)?
|
attribution_link\?a=[^&]+
@@ -1831,6 +1979,9 @@ class YoutubeTruncatedURLIE(InfoExtractor):
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
+ }, {
+ 'url': 'https://www.youtube.com/watch?t=2372',
+ 'only_matching': True,
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py
index 7dc1e2f2b..437eecb67 100644
--- a/youtube_dl/extractor/zingmp3.py
+++ b/youtube_dl/extractor/zingmp3.py
@@ -9,9 +9,11 @@ from ..utils import ExtractorError
class ZingMp3BaseInfoExtractor(InfoExtractor):
- def _extract_item(self, item):
+ def _extract_item(self, item, fatal=True):
error_message = item.find('./errormessage').text
if error_message:
+ if not fatal:
+ return
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message),
expected=True)
@@ -43,7 +45,9 @@ class ZingMp3BaseInfoExtractor(InfoExtractor):
entries = []
for i, item in enumerate(items, 1):
- entry = self._extract_item(item)
+ entry = self._extract_item(item, fatal=False)
+ if not entry:
+ continue
entry['id'] = '%s-%d' % (id, i)
entries.append(entry)
@@ -85,7 +89,7 @@ class ZingMp3SongIE(ZingMp3BaseInfoExtractor):
class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
- _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
+ _VALID_URL = r'https?://mp3\.zing\.vn/(?:album|playlist)/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
_TESTS = [{
'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html',
'info_dict': {
@@ -94,6 +98,9 @@ class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless',
},
'playlist_count': 10,
+ }, {
+ 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html',
+ 'only_matching': True,
}]
IE_NAME = 'zingmp3:album'
IE_DESC = 'mp3.zing.vn albums'
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 9016e3498..3dd6d290b 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -2,7 +2,6 @@ from __future__ import unicode_literals
import os.path
import optparse
-import shlex
import sys
from .downloader.external import list_external_downloaders
@@ -11,6 +10,7 @@ from .compat import (
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
+ compat_shlex_split,
)
from .utils import (
preferredencoding,
@@ -28,7 +28,7 @@ def parseOpts(overrideArguments=None):
try:
res = []
for l in optionf:
- res += shlex.split(l, comments=True)
+ res += compat_shlex_split(l, comments=True)
finally:
optionf.close()
return res
@@ -276,7 +276,7 @@ def parseOpts(overrideArguments=None):
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
- 'also have a description, use --match-filter '
+ 'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
@@ -320,7 +320,7 @@ def parseOpts(overrideArguments=None):
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
- help='Video password (vimeo, smotri)')
+ help='Video password (vimeo, smotri, youku)')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
@@ -602,7 +602,7 @@ def parseOpts(overrideArguments=None):
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
- help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
+ help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
diff --git a/youtube_dl/postprocessor/common.py b/youtube_dl/postprocessor/common.py
index 4191d040b..599dd1df2 100644
--- a/youtube_dl/postprocessor/common.py
+++ b/youtube_dl/postprocessor/common.py
@@ -4,6 +4,7 @@ import os
from ..utils import (
PostProcessingError,
+ cli_configuration_args,
encodeFilename,
)
@@ -61,11 +62,7 @@ class PostProcessor(object):
self._downloader.report_warning(errnote)
def _configuration_args(self, default=[]):
- pp_args = self._downloader.params.get('postprocessor_args')
- if pp_args is None:
- return default
- assert isinstance(pp_args, list)
- return pp_args
+ return cli_configuration_args(self._downloader.params, 'postprocessor_args', default)
class AudioConversionError(PostProcessingError):
diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py
index 1f723908b..4f320e124 100644
--- a/youtube_dl/postprocessor/ffmpeg.py
+++ b/youtube_dl/postprocessor/ffmpeg.py
@@ -135,7 +135,10 @@ class FFmpegPostProcessor(PostProcessor):
files_cmd = []
for path in input_paths:
- files_cmd.extend([encodeArgument('-i'), encodeFilename(path, True)])
+ files_cmd.extend([
+ encodeArgument('-i'),
+ encodeFilename(self._ffmpeg_filename_argument(path), True)
+ ])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
@@ -155,10 +158,10 @@ class FFmpegPostProcessor(PostProcessor):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
- # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
- if fn.startswith('-'):
- return './' + fn
- return fn
+ # Always use 'file:' because the filename may contain ':' (ffmpeg
+ # interprets that as a protocol) or can start with '-' (-- is broken in
+ # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
+ return 'file:' + fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 942f76d24..7dbe25661 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -139,21 +139,24 @@ def write_json_file(obj, fn):
if sys.version_info >= (2, 7):
- def find_xpath_attr(node, xpath, key, val):
+ def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
- assert re.match(r'^[a-zA-Z-]+$', key)
- assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
- expr = xpath + "[@%s='%s']" % (key, val)
+ assert re.match(r'^[a-zA-Z_-]+$', key)
+ if val:
+ assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
+ expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
- def find_xpath_attr(node, xpath, key, val):
+ def find_xpath_attr(node, xpath, key, val=None):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
for f in node.findall(xpath):
- if f.attrib.get(key) == val:
+ if key not in f.attrib:
+ continue
+ if val is None or f.attrib.get(key) == val:
return f
return None
@@ -173,12 +176,12 @@ def xpath_with_ns(path, ns_map):
return '/'.join(replaced)
-def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
+def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
if sys.version_info < (2, 7): # Crazy 2.6
xpath = xpath.encode('ascii')
n = node.find(xpath)
- if n is None or n.text is None:
+ if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
@@ -186,9 +189,37 @@ def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
+ return n
+
+
+def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
+ n = xpath_element(node, xpath, name, fatal=fatal, default=default)
+ if n is None or n == default:
+ return n
+ if n.text is None:
+ if default is not NO_DEFAULT:
+ return default
+ elif fatal:
+ name = xpath if name is None else name
+ raise ExtractorError('Could not find XML element\'s text %s' % name)
+ else:
+ return None
return n.text
+def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
+ n = find_xpath_attr(node, xpath, key)
+ if n is None:
+ if default is not NO_DEFAULT:
+ return default
+ elif fatal:
+ name = '%s[@%s]' % (xpath, key) if name is None else name
+ raise ExtractorError('Could not find XML attribute %s' % name)
+ else:
+ return None
+ return n.attrib[key]
+
+
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute("id", id, html)
@@ -576,16 +607,19 @@ class ContentTooShortError(Exception):
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
- # Both in bytes
- downloaded = None
- expected = None
def __init__(self, downloaded, expected):
+ # Both in bytes
self.downloaded = downloaded
self.expected = expected
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
+ # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
+ # expected HTTP responses to meet HTTP/1.0 or later (see also
+ # https://github.com/rg3/youtube-dl/issues/6727)
+ if sys.version_info < (3, 0):
+ kwargs[b'strict'] = True
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
@@ -650,6 +684,26 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
return ret
def http_request(self, req):
+ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
+ # always respected by websites, some tend to give out URLs with non percent-encoded
+ # non-ASCII characters (see telemb.py, ard.py [#3412])
+ # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
+ # To work around aforementioned issue we will replace request's original URL with
+ # percent-encoded one
+ # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
+ # the code of this workaround has been moved here from YoutubeDL.urlopen()
+ url = req.get_full_url()
+ url_escaped = escape_url(url)
+
+ # Substitute URL if any change after escaping
+ if url != url_escaped:
+ req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
+ new_req = req_type(
+ url_escaped, data=req.data, headers=req.headers,
+ origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
+ new_req.timeout = req.timeout
+ req = new_req
+
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
@@ -694,6 +748,18 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
+ # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
+ # https://github.com/rg3/youtube-dl/issues/6457).
+ if 300 <= resp.code < 400:
+ location = resp.headers.get('Location')
+ if location:
+ # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
+ if sys.version_info >= (3, 0):
+ location = location.encode('iso-8859-1').decode('utf-8')
+ location_escaped = escape_url(location)
+ if location != location_escaped:
+ del resp.headers['Location']
+ resp.headers['Location'] = location_escaped
return resp
https_request = http_request
@@ -717,6 +783,30 @@ class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
req, **kwargs)
+class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
+ def __init__(self, cookiejar=None):
+ compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
+
+ def http_response(self, request, response):
+ # Python 2 will choke on next HTTP request in row if there are non-ASCII
+ # characters in Set-Cookie HTTP header of last response (see
+ # https://github.com/rg3/youtube-dl/issues/6769).
+ # In order to at least prevent crashing we will percent encode Set-Cookie
+ # header before HTTPCookieProcessor starts processing it.
+ # if sys.version_info < (3, 0) and response.headers:
+ # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
+ # set_cookie = response.headers.get(set_cookie_header)
+ # if set_cookie:
+ # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
+ # if set_cookie != set_cookie_escaped:
+ # del response.headers[set_cookie_header]
+ # response.headers[set_cookie_header] = set_cookie_escaped
+ return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
+
+ https_request = compat_urllib_request.HTTPCookieProcessor.http_request
+ https_response = http_response
+
+
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
@@ -1281,7 +1371,12 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
v = getattr(v, get_attr, None)
if v == '':
v = None
- return default if v is None else (int(v) * invscale // scale)
+ if v is None:
+ return default
+ try:
+ return int(v) * invscale // scale
+ except ValueError:
+ return default
def str_or_none(v, default=None):
@@ -1297,7 +1392,12 @@ def str_to_int(int_str):
def float_or_none(v, scale=1, invscale=1, default=None):
- return default if v is None else (float(v) * invscale / scale)
+ if v is None:
+ return default
+ try:
+ return float(v) * invscale / scale
+ except ValueError:
+ return default
def parse_duration(s):
@@ -1309,10 +1409,10 @@ def parse_duration(s):
m = re.match(
r'''(?ix)(?:P?T)?
(?:
- (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*|
+ (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
(?P<only_hours>[0-9.]+)\s*(?:hours?)|
- \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*|
+ \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
(?:
(?:
(?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
@@ -1546,6 +1646,10 @@ def urlencode_postdata(*args, **kargs):
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+def encode_dict(d, encoding='utf-8'):
+ return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
+
+
try:
etree_iter = xml.etree.ElementTree.Element.iter
except AttributeError: # Python <=2.6
@@ -1886,6 +1990,32 @@ def dfxp2srt(dfxp_data):
return ''.join(out)
+def cli_option(params, command_option, param):
+ param = params.get(param)
+ return [command_option, param] if param is not None else []
+
+
+def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
+ param = params.get(param)
+ assert isinstance(param, bool)
+ if separator:
+ return [command_option + separator + (true_value if param else false_value)]
+ return [command_option, true_value if param else false_value]
+
+
+def cli_valueless_option(params, command_option, param, expected_value=True):
+ param = params.get(param)
+ return [command_option] if param == expected_value else []
+
+
+def cli_configuration_args(params, param, default=[]):
+ ex_args = params.get(param)
+ if ex_args is None:
+ return default
+ assert isinstance(ex_args, list)
+ return ex_args
+
+
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 280afdd7f..31d2a9dc0 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
from __future__ import unicode_literals
-__version__ = '2015.07.21'
+__version__ = '2015.10.16'