aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml1
-rw-r--r--AUTHORS9
-rw-r--r--CONTRIBUTING.md35
-rw-r--r--Makefile14
-rw-r--r--README.md497
-rwxr-xr-xdevscripts/bash-completion.py2
-rwxr-xr-xdevscripts/fish-completion.py2
-rwxr-xr-xdevscripts/gh-pages/update-sites.py2
-rw-r--r--devscripts/make_supportedsites.py2
-rw-r--r--devscripts/prepare_manpage.py31
-rwxr-xr-xdevscripts/zsh-completion.py2
-rw-r--r--docs/supportedsites.md66
-rw-r--r--setup.py2
-rw-r--r--test/helper.py129
-rw-r--r--test/test_InfoExtractor.py8
-rw-r--r--test/test_all_urls.py4
-rw-r--r--test/test_compat.py17
-rw-r--r--test/test_download.py2
-rw-r--r--test/test_jsinterp.py3
-rw-r--r--test/test_subtitles.py21
-rw-r--r--test/test_utils.py59
-rw-r--r--test/test_youtube_lists.py9
-rw-r--r--tox.ini2
-rwxr-xr-xyoutube_dl/YoutubeDL.py52
-rw-r--r--youtube_dl/__init__.py2
-rwxr-xr-xyoutube_dl/__main__.py2
-rw-r--r--youtube_dl/compat.py121
-rw-r--r--youtube_dl/downloader/common.py8
-rw-r--r--youtube_dl/downloader/dash.py4
-rw-r--r--youtube_dl/downloader/f4m.py28
-rw-r--r--youtube_dl/downloader/hls.py20
-rw-r--r--youtube_dl/downloader/http.py10
-rw-r--r--youtube_dl/downloader/rtmp.py4
-rw-r--r--youtube_dl/extractor/__init__.py88
-rw-r--r--youtube_dl/extractor/abc.py18
-rw-r--r--youtube_dl/extractor/acast.py70
-rw-r--r--youtube_dl/extractor/adobetv.py165
-rw-r--r--youtube_dl/extractor/adultswim.py46
-rw-r--r--youtube_dl/extractor/aljazeera.py4
-rw-r--r--youtube_dl/extractor/anitube.py4
-rw-r--r--youtube_dl/extractor/appletrailers.py68
-rw-r--r--youtube_dl/extractor/ard.py4
-rw-r--r--youtube_dl/extractor/arte.py12
-rw-r--r--youtube_dl/extractor/atresplayer.py6
-rw-r--r--youtube_dl/extractor/audimedia.py80
-rw-r--r--youtube_dl/extractor/bambuser.py6
-rw-r--r--youtube_dl/extractor/bandcamp.py12
-rw-r--r--youtube_dl/extractor/bbc.py296
-rw-r--r--youtube_dl/extractor/beeg.py102
-rw-r--r--youtube_dl/extractor/bild.py22
-rw-r--r--youtube_dl/extractor/bilibili.py160
-rw-r--r--youtube_dl/extractor/bliptv.py8
-rw-r--r--youtube_dl/extractor/bloomberg.py31
-rw-r--r--youtube_dl/extractor/brightcove.py195
-rw-r--r--youtube_dl/extractor/byutv.py5
-rw-r--r--youtube_dl/extractor/canalc2.py43
-rw-r--r--youtube_dl/extractor/canalplus.py3
-rw-r--r--youtube_dl/extractor/cbs.py14
-rw-r--r--youtube_dl/extractor/cbsnews.py5
-rw-r--r--youtube_dl/extractor/ceskatelevize.py6
-rw-r--r--youtube_dl/extractor/channel9.py66
-rw-r--r--youtube_dl/extractor/chaturbate.py50
-rw-r--r--youtube_dl/extractor/clipfish.py64
-rw-r--r--youtube_dl/extractor/cliphunter.py43
-rw-r--r--youtube_dl/extractor/clubic.py9
-rw-r--r--youtube_dl/extractor/clyp.py57
-rw-r--r--youtube_dl/extractor/cmt.py5
-rw-r--r--youtube_dl/extractor/cnet.py69
-rw-r--r--youtube_dl/extractor/collegerama.py4
-rw-r--r--youtube_dl/extractor/comedycentral.py7
-rw-r--r--youtube_dl/extractor/common.py98
-rw-r--r--youtube_dl/extractor/condenast.py50
-rw-r--r--youtube_dl/extractor/criterion.py4
-rw-r--r--youtube_dl/extractor/crunchyroll.py130
-rw-r--r--youtube_dl/extractor/cspan.py114
-rw-r--r--youtube_dl/extractor/dailymotion.py64
-rw-r--r--youtube_dl/extractor/dbtv.py14
-rw-r--r--youtube_dl/extractor/dcn.py8
-rw-r--r--youtube_dl/extractor/democracynow.py88
-rw-r--r--youtube_dl/extractor/divxstage.py27
-rw-r--r--youtube_dl/extractor/dplay.py51
-rw-r--r--youtube_dl/extractor/dramafever.py4
-rw-r--r--youtube_dl/extractor/dumpert.py17
-rw-r--r--youtube_dl/extractor/eagleplatform.py29
-rw-r--r--youtube_dl/extractor/eitb.py95
-rw-r--r--youtube_dl/extractor/engadget.py2
-rw-r--r--youtube_dl/extractor/escapist.py5
-rw-r--r--youtube_dl/extractor/europa.py93
-rw-r--r--youtube_dl/extractor/everyonesmixtape.py8
-rw-r--r--youtube_dl/extractor/expotv.py31
-rw-r--r--youtube_dl/extractor/extremetube.py61
-rw-r--r--youtube_dl/extractor/facebook.py37
-rw-r--r--youtube_dl/extractor/faz.py2
-rw-r--r--youtube_dl/extractor/fc2.py9
-rw-r--r--youtube_dl/extractor/fczenit.py41
-rw-r--r--youtube_dl/extractor/fivemin.py84
-rw-r--r--youtube_dl/extractor/fktv.py87
-rw-r--r--youtube_dl/extractor/flickr.py4
-rw-r--r--youtube_dl/extractor/footyroom.py1
-rw-r--r--youtube_dl/extractor/fourtube.py35
-rw-r--r--youtube_dl/extractor/francetv.py28
-rw-r--r--youtube_dl/extractor/funimation.py193
-rw-r--r--youtube_dl/extractor/funnyordie.py15
-rw-r--r--youtube_dl/extractor/gameinformer.py43
-rw-r--r--youtube_dl/extractor/gametrailers.py61
-rw-r--r--youtube_dl/extractor/gdcvault.py8
-rw-r--r--youtube_dl/extractor/generic.py158
-rw-r--r--youtube_dl/extractor/globo.py161
-rw-r--r--youtube_dl/extractor/googleplus.py2
-rw-r--r--youtube_dl/extractor/gputechconf.py55
-rw-r--r--youtube_dl/extractor/groupon.py2
-rw-r--r--youtube_dl/extractor/hearthisat.py8
-rw-r--r--youtube_dl/extractor/hostingbulk.py80
-rw-r--r--youtube_dl/extractor/hotnewhiphop.py8
-rw-r--r--youtube_dl/extractor/howcast.py1
-rw-r--r--youtube_dl/extractor/hypem.py24
-rw-r--r--youtube_dl/extractor/iconosquare.py24
-rw-r--r--youtube_dl/extractor/imdb.py29
-rw-r--r--youtube_dl/extractor/infoq.py85
-rw-r--r--youtube_dl/extractor/instagram.py9
-rw-r--r--youtube_dl/extractor/iprima.py6
-rw-r--r--youtube_dl/extractor/iqiyi.py19
-rw-r--r--youtube_dl/extractor/ivi.py6
-rw-r--r--youtube_dl/extractor/jeuxvideo.py2
-rw-r--r--youtube_dl/extractor/kaltura.py52
-rw-r--r--youtube_dl/extractor/keek.py39
-rw-r--r--youtube_dl/extractor/keezmovies.py40
-rw-r--r--youtube_dl/extractor/kuwo.py5
-rw-r--r--youtube_dl/extractor/letv.py74
-rw-r--r--youtube_dl/extractor/limelight.py229
-rw-r--r--youtube_dl/extractor/lynda.py86
-rw-r--r--youtube_dl/extractor/mdr.py189
-rw-r--r--youtube_dl/extractor/megavideoz.py56
-rw-r--r--youtube_dl/extractor/metacafe.py12
-rw-r--r--youtube_dl/extractor/minhateca.py8
-rw-r--r--youtube_dl/extractor/miomio.py13
-rw-r--r--youtube_dl/extractor/mit.py2
-rw-r--r--youtube_dl/extractor/mitele.py103
-rw-r--r--youtube_dl/extractor/mixcloud.py3
-rw-r--r--youtube_dl/extractor/moevideo.py8
-rw-r--r--youtube_dl/extractor/mofosex.py4
-rw-r--r--youtube_dl/extractor/moniker.py53
-rw-r--r--youtube_dl/extractor/mooshare.py8
-rw-r--r--youtube_dl/extractor/movieclips.py80
-rw-r--r--youtube_dl/extractor/movshare.py27
-rw-r--r--youtube_dl/extractor/mtv.py19
-rw-r--r--youtube_dl/extractor/musicvault.py63
-rw-r--r--youtube_dl/extractor/myvideo.py4
-rw-r--r--youtube_dl/extractor/naver.py11
-rw-r--r--youtube_dl/extractor/nba.py93
-rw-r--r--youtube_dl/extractor/nbc.py11
-rw-r--r--youtube_dl/extractor/ndr.py445
-rw-r--r--youtube_dl/extractor/neteasemusic.py6
-rw-r--r--youtube_dl/extractor/nextmedia.py16
-rw-r--r--youtube_dl/extractor/nfb.py11
-rw-r--r--youtube_dl/extractor/nfl.py164
-rw-r--r--youtube_dl/extractor/nhl.py26
-rw-r--r--youtube_dl/extractor/niconico.py6
-rw-r--r--youtube_dl/extractor/ninegag.py95
-rw-r--r--youtube_dl/extractor/noco.py38
-rw-r--r--youtube_dl/extractor/nosvideo.py6
-rw-r--r--youtube_dl/extractor/novamov.py153
-rw-r--r--youtube_dl/extractor/nowness.py176
-rw-r--r--youtube_dl/extractor/nowtv.py167
-rw-r--r--youtube_dl/extractor/nowvideo.py28
-rw-r--r--youtube_dl/extractor/nrk.py52
-rw-r--r--youtube_dl/extractor/nuvid.py6
-rw-r--r--youtube_dl/extractor/odnoklassniki.py13
-rw-r--r--youtube_dl/extractor/ooyala.py175
-rw-r--r--youtube_dl/extractor/openfilm.py70
-rw-r--r--youtube_dl/extractor/patreon.py6
-rw-r--r--youtube_dl/extractor/pbs.py269
-rw-r--r--youtube_dl/extractor/periscope.py43
-rw-r--r--youtube_dl/extractor/pladform.py9
-rw-r--r--youtube_dl/extractor/played.py8
-rw-r--r--youtube_dl/extractor/playwire.py2
-rw-r--r--youtube_dl/extractor/pluralsight.py148
-rw-r--r--youtube_dl/extractor/pornhd.py3
-rw-r--r--youtube_dl/extractor/pornhub.py12
-rw-r--r--youtube_dl/extractor/pornotube.py10
-rw-r--r--youtube_dl/extractor/primesharetv.py10
-rw-r--r--youtube_dl/extractor/promptfile.py8
-rw-r--r--youtube_dl/extractor/prosiebensat1.py2
-rw-r--r--youtube_dl/extractor/qqmusic.py41
-rw-r--r--youtube_dl/extractor/rai.py22
-rw-r--r--youtube_dl/extractor/rtbf.py15
-rw-r--r--youtube_dl/extractor/rte.py12
-rw-r--r--youtube_dl/extractor/rtve.py12
-rw-r--r--youtube_dl/extractor/rutube.py43
-rw-r--r--youtube_dl/extractor/ruutu.py17
-rw-r--r--youtube_dl/extractor/safari.py16
-rw-r--r--youtube_dl/extractor/sandia.py8
-rw-r--r--youtube_dl/extractor/senateisvp.py4
-rw-r--r--youtube_dl/extractor/shahid.py4
-rw-r--r--youtube_dl/extractor/shared.py8
-rw-r--r--youtube_dl/extractor/sharesix.py8
-rw-r--r--youtube_dl/extractor/sina.py8
-rw-r--r--youtube_dl/extractor/skynewsarabia.py117
-rw-r--r--youtube_dl/extractor/smotri.py10
-rw-r--r--youtube_dl/extractor/sohu.py5
-rw-r--r--youtube_dl/extractor/soundcloud.py65
-rw-r--r--youtube_dl/extractor/space.py8
-rw-r--r--youtube_dl/extractor/spankwire.py4
-rw-r--r--youtube_dl/extractor/spiegel.py3
-rw-r--r--youtube_dl/extractor/spiegeltv.py18
-rw-r--r--youtube_dl/extractor/sportdeutschland.py6
-rw-r--r--youtube_dl/extractor/srf.py26
-rw-r--r--youtube_dl/extractor/stitcher.py81
-rw-r--r--youtube_dl/extractor/streamcloud.py8
-rw-r--r--youtube_dl/extractor/streamcz.py6
-rw-r--r--youtube_dl/extractor/tapely.py12
-rw-r--r--youtube_dl/extractor/teachingchannel.py1
-rw-r--r--youtube_dl/extractor/telecinco.py79
-rw-r--r--youtube_dl/extractor/tf1.py16
-rw-r--r--youtube_dl/extractor/theplatform.py34
-rw-r--r--youtube_dl/extractor/tlc.py6
-rw-r--r--youtube_dl/extractor/toggle.py194
-rw-r--r--youtube_dl/extractor/trilulilu.py127
-rw-r--r--youtube_dl/extractor/tube8.py8
-rw-r--r--youtube_dl/extractor/tubitv.py8
-rw-r--r--youtube_dl/extractor/tudou.py38
-rw-r--r--youtube_dl/extractor/tumblr.py57
-rw-r--r--youtube_dl/extractor/tutv.py4
-rw-r--r--youtube_dl/extractor/twitch.py40
-rw-r--r--youtube_dl/extractor/twitter.py188
-rw-r--r--youtube_dl/extractor/udemy.py150
-rw-r--r--youtube_dl/extractor/udn.py3
-rw-r--r--youtube_dl/extractor/ustream.py92
-rw-r--r--youtube_dl/extractor/vbox7.py4
-rw-r--r--youtube_dl/extractor/veoh.py6
-rw-r--r--youtube_dl/extractor/vessel.py4
-rw-r--r--youtube_dl/extractor/vevo.py80
-rw-r--r--youtube_dl/extractor/vice.py1
-rw-r--r--youtube_dl/extractor/viddler.py6
-rw-r--r--youtube_dl/extractor/videofyme.py40
-rw-r--r--youtube_dl/extractor/videolecturesnet.py86
-rw-r--r--youtube_dl/extractor/videomega.py4
-rw-r--r--youtube_dl/extractor/videoweed.py26
-rw-r--r--youtube_dl/extractor/vidme.py177
-rw-r--r--youtube_dl/extractor/vidzi.py10
-rw-r--r--youtube_dl/extractor/vier.py13
-rw-r--r--youtube_dl/extractor/viewster.py68
-rw-r--r--youtube_dl/extractor/viidea.py188
-rw-r--r--youtube_dl/extractor/viki.py6
-rw-r--r--youtube_dl/extractor/vimeo.py199
-rw-r--r--youtube_dl/extractor/vine.py70
-rw-r--r--youtube_dl/extractor/vk.py30
-rw-r--r--youtube_dl/extractor/vodlocker.py17
-rw-r--r--youtube_dl/extractor/voicerepublic.py8
-rw-r--r--youtube_dl/extractor/washingtonpost.py8
-rw-r--r--youtube_dl/extractor/wdr.py58
-rw-r--r--youtube_dl/extractor/wimp.py40
-rw-r--r--youtube_dl/extractor/wistia.py8
-rw-r--r--youtube_dl/extractor/wsj.py1
-rw-r--r--youtube_dl/extractor/xfileshare.py (renamed from youtube_dl/extractor/gorillavid.py)38
-rw-r--r--youtube_dl/extractor/xhamster.py4
-rw-r--r--youtube_dl/extractor/xtube.py8
-rw-r--r--youtube_dl/extractor/xuite.py2
-rw-r--r--youtube_dl/extractor/xvideos.py8
-rw-r--r--youtube_dl/extractor/yahoo.py60
-rw-r--r--youtube_dl/extractor/yandexmusic.py11
-rw-r--r--youtube_dl/extractor/youku.py129
-rw-r--r--youtube_dl/extractor/youporn.py218
-rw-r--r--youtube_dl/extractor/youtube.py370
-rw-r--r--youtube_dl/extractor/zdf.py50
-rw-r--r--youtube_dl/extractor/zingmp3.py13
-rw-r--r--youtube_dl/jsinterp.py6
-rw-r--r--youtube_dl/options.py8
-rw-r--r--youtube_dl/postprocessor/ffmpeg.py18
-rw-r--r--youtube_dl/update.py26
-rw-r--r--youtube_dl/utils.py169
-rw-r--r--youtube_dl/version.py2
272 files changed, 8898 insertions, 3844 deletions
diff --git a/.travis.yml b/.travis.yml
index e78a2fa76..cc21fae8f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,6 +5,7 @@ python:
- "3.2"
- "3.3"
- "3.4"
+ - "3.5"
sudo: false
script: nosetests test --verbose
notifications:
diff --git a/AUTHORS b/AUTHORS
index d1693224e..ce350e96c 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -140,3 +140,12 @@ Behrouz Abbasi
ngld
nyuszika7h
Shaun Walbridge
+Lee Jenkins
+Anssi Hannula
+Lukáš Lalinský
+Qijiang Fan
+Rémy Léone
+Marco Ferragina
+reiv
+Muratcan Simsek
+Evan Lu
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f8ab29631..f3fe0d432 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,20 @@
-**Please include the full output of youtube-dl when run with `-v`**.
-
-The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
+**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
+```
+$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
+[debug] System config: []
+[debug] User config: []
+[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
+[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
+[debug] youtube-dl version 2015.12.06
+[debug] Git HEAD: 135392e
+[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
+[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
+[debug] Proxy map: {}
+...
+```
+**Do not post screenshots of verbose log only plain text is acceptable.**
+
+The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
@@ -16,19 +30,19 @@ So please elaborate on what feature you are requesting, or what bug you want to
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
-For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
-If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
+If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
-**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
+**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
### Are you using the latest version?
-Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
### Is the issue already documented?
-Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
### Why are existing options not enough?
@@ -114,17 +128,18 @@ If you want to add support for a new site, you can follow this quick list (assum
webpage = self._download_webpage(url, video_id)
# TODO more code goes here, for example ...
- title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+ title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
return {
'id': video_id,
'title': title,
'description': self._og_search_description(webpage),
+ 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
# TODO more properties (see youtube_dl/extractor/common.py)
}
```
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
diff --git a/Makefile b/Makefile
index fdb1abb60..f826c1685 100644
--- a/Makefile
+++ b/Makefile
@@ -61,34 +61,34 @@ youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
chmod a+x youtube-dl
README.md: youtube_dl/*.py youtube_dl/*/*.py
- COLUMNS=80 python youtube_dl/__main__.py --help | python devscripts/make_readme.py
+ COLUMNS=80 $(PYTHON) youtube_dl/__main__.py --help | $(PYTHON) devscripts/make_readme.py
CONTRIBUTING.md: README.md
- python devscripts/make_contributing.py README.md CONTRIBUTING.md
+ $(PYTHON) devscripts/make_contributing.py README.md CONTRIBUTING.md
supportedsites:
- python devscripts/make_supportedsites.py docs/supportedsites.md
+ $(PYTHON) devscripts/make_supportedsites.py docs/supportedsites.md
README.txt: README.md
pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.1: README.md
- python devscripts/prepare_manpage.py >youtube-dl.1.temp.md
+ $(PYTHON) devscripts/prepare_manpage.py >youtube-dl.1.temp.md
pandoc -s -f markdown -t man youtube-dl.1.temp.md -o youtube-dl.1
rm -f youtube-dl.1.temp.md
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
- python devscripts/bash-completion.py
+ $(PYTHON) devscripts/bash-completion.py
bash-completion: youtube-dl.bash-completion
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
- python devscripts/zsh-completion.py
+ $(PYTHON) devscripts/zsh-completion.py
zsh-completion: youtube-dl.zsh
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
- python devscripts/fish-completion.py
+ $(PYTHON) devscripts/fish-completion.py
fish-completion: youtube-dl.fish
diff --git a/README.md b/README.md
index 24bfe38a2..7002f45e0 100644
--- a/README.md
+++ b/README.md
@@ -9,6 +9,7 @@ youtube-dl - download videos from youtube.com or other video platforms
- [VIDEO SELECTION](#video-selection)
- [FAQ](#faq)
- [DEVELOPER INSTRUCTIONS](#developer-instructions)
+- [EMBEDDING YOUTUBE-DL](#embedding-youtube-dl)
- [BUGS](#bugs)
- [COPYRIGHT](#copyright)
@@ -34,7 +35,7 @@ You can also use pip:
sudo pip install youtube-dl
-Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see https://rg3.github.io/youtube-dl/download.html .
+Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
# DESCRIPTION
**youtube-dl** is a small command-line program to download videos from
@@ -48,110 +49,220 @@ which means you can modify it, redistribute it or use it however you like.
# OPTIONS
-h, --help Print this help text and exit
--version Print program version and exit
- -U, --update Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)
- -i, --ignore-errors Continue on download errors, for example to skip unavailable videos in a playlist
- --abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs
+ -U, --update Update this program to latest version. Make
+ sure that you have sufficient permissions
+ (run with sudo if needed)
+ -i, --ignore-errors Continue on download errors, for example to
+ skip unavailable videos in a playlist
+ --abort-on-error Abort downloading of further videos (in the
+ playlist or the command line) if an error
+ occurs
--dump-user-agent Display the current browser identification
--list-extractors List all supported extractors
- --extractor-descriptions Output descriptions of all supported extractors
- --force-generic-extractor Force extraction to use the generic extractor
- --default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple".
- Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The
- default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.
- --ignore-config Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: Do not read the user configuration
- in ~/.config/youtube-dl/config (%APPDATA%/youtube-dl/config.txt on Windows)
- --flat-playlist Do not extract the videos of a playlist, only list them.
+ --extractor-descriptions Output descriptions of all supported
+ extractors
+ --force-generic-extractor Force extraction to use the generic
+ extractor
+ --default-search PREFIX Use this prefix for unqualified URLs. For
+ example "gvsearch2:" downloads two videos
+ from google videos for youtube-dl "large
+ apple". Use the value "auto" to let
+ youtube-dl guess ("auto_warning" to emit a
+ warning when guessing). "error" just throws
+ an error. The default value "fixup_error"
+ repairs broken URLs, but emits an error if
+ this is not possible instead of searching.
+ --ignore-config Do not read configuration files. When given
+ in the global configuration file /etc
+ /youtube-dl.conf: Do not read the user
+ configuration in ~/.config/youtube-
+ dl/config (%APPDATA%/youtube-dl/config.txt
+ on Windows)
+ --flat-playlist Do not extract the videos of a playlist,
+ only list them.
--no-color Do not emit color codes in output
## Network Options:
- --proxy URL Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection
+ --proxy URL Use the specified HTTP/HTTPS proxy. Pass in
+ an empty string (--proxy "") for direct
+ connection
--socket-timeout SECONDS Time to wait before giving up, in seconds
- --source-address IP Client-side IP address to bind to (experimental)
- -4, --force-ipv4 Make all connections via IPv4 (experimental)
- -6, --force-ipv6 Make all connections via IPv6 (experimental)
- --cn-verification-proxy URL Use this proxy to verify the IP address for some Chinese sites. The default proxy specified by --proxy (or none, if the options is
- not present) is used for the actual downloading. (experimental)
+ --source-address IP Client-side IP address to bind to
+ (experimental)
+ -4, --force-ipv4 Make all connections via IPv4
+ (experimental)
+ -6, --force-ipv6 Make all connections via IPv6
+ (experimental)
+ --cn-verification-proxy URL Use this proxy to verify the IP address for
+ some Chinese sites. The default proxy
+ specified by --proxy (or none, if the
+ options is not present) is used for the
+ actual downloading. (experimental)
## Video Selection:
--playlist-start NUMBER Playlist video to start at (default is 1)
--playlist-end NUMBER Playlist video to end at (default is last)
- --playlist-items ITEM_SPEC Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8"
- if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will
- download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.
- --match-title REGEX Download only matching titles (regex or caseless sub-string)
- --reject-title REGEX Skip download for matching titles (regex or caseless sub-string)
+ --playlist-items ITEM_SPEC Playlist video items to download. Specify
+ indices of the videos in the playlist
+ separated by commas like: "--playlist-items
+ 1,2,5,8" if you want to download videos
+ indexed 1, 2, 5, 8 in the playlist. You can
+ specify range: "--playlist-items
+ 1-3,7,10-13", it will download the videos
+ at index 1, 2, 3, 7, 10, 11, 12 and 13.
+ --match-title REGEX Download only matching titles (regex or
+ caseless sub-string)
+ --reject-title REGEX Skip download for matching titles (regex or
+ caseless sub-string)
--max-downloads NUMBER Abort after downloading NUMBER files
- --min-filesize SIZE Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)
- --max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m)
+ --min-filesize SIZE Do not download any videos smaller than
+ SIZE (e.g. 50k or 44.6m)
+ --max-filesize SIZE Do not download any videos larger than SIZE
+ (e.g. 50k or 44.6m)
--date DATE Download only videos uploaded in this date
- --datebefore DATE Download only videos uploaded on or before this date (i.e. inclusive)
- --dateafter DATE Download only videos uploaded on or after this date (i.e. inclusive)
- --min-views COUNT Do not download any videos with less than COUNT views
- --max-views COUNT Do not download any videos with more than COUNT views
- --match-filter FILTER Generic video filter (experimental). Specify any key (see help for -o for a list of available keys) to match if the key is present,
- !key to check if the key is not present,key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against
- a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) after the
- operator.For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike
- functionality is not available at the given service), but who also have a description, use --match-filter "like_count > 100 &
+ --datebefore DATE Download only videos uploaded on or before
+ this date (i.e. inclusive)
+ --dateafter DATE Download only videos uploaded on or after
+ this date (i.e. inclusive)
+ --min-views COUNT Do not download any videos with less than
+ COUNT views
+ --max-views COUNT Do not download any videos with more than
+ COUNT views
+ --match-filter FILTER Generic video filter (experimental).
+ Specify any key (see help for -o for a list
+ of available keys) to match if the key is
+ present, !key to check if the key is not
+ present,key > NUMBER (like "comment_count >
+ 12", also works with >=, <, <=, !=, =) to
+ compare against a number, and & to require
+ multiple matches. Values which are not
+ known are excluded unless you put a
+ question mark (?) after the operator.For
+ example, to only match videos that have
+ been liked more than 100 times and disliked
+ less than 50 times (or the dislike
+ functionality is not available at the given
+ service), but who also have a description,
+ use --match-filter "like_count > 100 &
dislike_count <? 50 & description" .
- --no-playlist Download only the video, if the URL refers to a video and a playlist.
- --yes-playlist Download the playlist, if the URL refers to a video and a playlist.
- --age-limit YEARS Download only videos suitable for the given age
- --download-archive FILE Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.
- --include-ads Download advertisements as well (experimental)
+ --no-playlist Download only the video, if the URL refers
+ to a video and a playlist.
+ --yes-playlist Download the playlist, if the URL refers to
+ a video and a playlist.
+ --age-limit YEARS Download only videos suitable for the given
+ age
+ --download-archive FILE Download only videos not listed in the
+ archive file. Record the IDs of all
+ downloaded videos in it.
+ --include-ads Download advertisements as well
+ (experimental)
## Download Options:
- -r, --rate-limit LIMIT Maximum download rate in bytes per second (e.g. 50K or 4.2M)
- -R, --retries RETRIES Number of retries (default is 10), or "infinite".
- --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K) (default is 1024)
- --no-resize-buffer Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.
+ -r, --rate-limit LIMIT Maximum download rate in bytes per second
+ (e.g. 50K or 4.2M)
+ -R, --retries RETRIES Number of retries (default is 10), or
+ "infinite".
+ --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K)
+ (default is 1024)
+ --no-resize-buffer Do not automatically adjust the buffer
+ size. By default, the buffer size is
+ automatically resized from an initial value
+ of SIZE.
--playlist-reverse Download playlist videos in reverse order
- --xattr-set-filesize Set file xattribute ytdl.filesize with expected filesize (experimental)
- --hls-prefer-native Use the native HLS downloader instead of ffmpeg (experimental)
- --external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,axel,curl,httpie,wget
- --external-downloader-args ARGS Give these arguments to the external downloader
+ --xattr-set-filesize Set file xattribute ytdl.filesize with
+ expected filesize (experimental)
+ --hls-prefer-native Use the native HLS downloader instead of
+ ffmpeg (experimental)
+ --external-downloader COMMAND Use the specified external downloader.
+ Currently supports
+ aria2c,axel,curl,httpie,wget
+ --external-downloader-args ARGS Give these arguments to the external
+ downloader
## Filesystem Options:
- -a, --batch-file FILE File containing URLs to download ('-' for stdin)
+ -a, --batch-file FILE File containing URLs to download ('-' for
+ stdin)
--id Use only video ID in file name
- -o, --output TEMPLATE Output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader
- nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for
- the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like YouTube's itags: "137"),
- %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id,
- %(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in,
- %(playlist_index)s for the position in the playlist. %(height)s and %(width)s for the width and height of the video format.
- %(resolution)s for a textual description of the resolution of the video format. %% for a literal percent. Use - to output to stdout.
- Can also be used to download to a different directory, for example with -o '/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
- --autonumber-size NUMBER Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given
- --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames
- -A, --auto-number [deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000
- -t, --title [deprecated] Use title in file name (default)
+ -o, --output TEMPLATE Output filename template. Use %(title)s to
+ get the title, %(uploader)s for the
+ uploader name, %(uploader_id)s for the
+ uploader nickname if different,
+ %(autonumber)s to get an automatically
+ incremented number, %(ext)s for the
+ filename extension, %(format)s for the
+ format description (like "22 - 1280x720" or
+ "HD"), %(format_id)s for the unique id of
+ the format (like YouTube's itags: "137"),
+ %(upload_date)s for the upload date
+ (YYYYMMDD), %(extractor)s for the provider
+ (youtube, metacafe, etc), %(id)s for the
+ video id, %(playlist_title)s,
+ %(playlist_id)s, or %(playlist)s (=title if
+ present, ID otherwise) for the playlist the
+ video is in, %(playlist_index)s for the
+ position in the playlist. %(height)s and
+ %(width)s for the width and height of the
+ video format. %(resolution)s for a textual
+ description of the resolution of the video
+ format. %% for a literal percent. Use - to
+ output to stdout. Can also be used to
+ download to a different directory, for
+ example with -o '/my/downloads/%(uploader)s
+ /%(title)s-%(id)s.%(ext)s' .
+ --autonumber-size NUMBER Specify the number of digits in
+ %(autonumber)s when it is present in output
+ filename template or --auto-number option
+ is given
+ --restrict-filenames Restrict filenames to only ASCII
+ characters, and avoid "&" and spaces in
+ filenames
+ -A, --auto-number [deprecated; use -o
+ "%(autonumber)s-%(title)s.%(ext)s" ] Number
+ downloaded files starting from 00000
+ -t, --title [deprecated] Use title in file name
+ (default)
-l, --literal [deprecated] Alias of --title
-w, --no-overwrites Do not overwrite files
- -c, --continue Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.
- --no-continue Do not resume partially downloaded files (restart from beginning)
- --no-part Do not use .part files - write directly into output file
- --no-mtime Do not use the Last-modified header to set the file modification time
- --write-description Write video description to a .description file
+ -c, --continue Force resume of partially downloaded files.
+ By default, youtube-dl will resume
+ downloads if possible.
+ --no-continue Do not resume partially downloaded files
+ (restart from beginning)
+ --no-part Do not use .part files - write directly
+ into output file
+ --no-mtime Do not use the Last-modified header to set
+ the file modification time
+ --write-description Write video description to a .description
+ file
--write-info-json Write video metadata to a .info.json file
- --write-annotations Write video annotations to a .annotations.xml file
- --load-info FILE JSON file containing the video information (created with the "--write-info-json" option)
- --cookies FILE File to read cookies from and dump cookie jar in
- --cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl
- or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may
- change.
+ --write-annotations Write video annotations to a
+ .annotations.xml file
+ --load-info FILE JSON file containing the video information
+ (created with the "--write-info-json"
+ option)
+ --cookies FILE File to read cookies from and dump cookie
+ jar in
+ --cache-dir DIR Location in the filesystem where youtube-dl
+ can store some downloaded information
+ permanently. By default $XDG_CACHE_HOME
+ /youtube-dl or ~/.cache/youtube-dl . At the
+ moment, only YouTube player files (for
+ videos with obfuscated signatures) are
+ cached, but that may change.
--no-cache-dir Disable filesystem caching
--rm-cache-dir Delete all filesystem cache files
## Thumbnail images:
--write-thumbnail Write thumbnail image to disk
--write-all-thumbnails Write all thumbnail image formats to disk
- --list-thumbnails Simulate and list all available thumbnail formats
+ --list-thumbnails Simulate and list all available thumbnail
+ formats
## Verbosity / Simulation Options:
-q, --quiet Activate quiet mode
--no-warnings Ignore warnings
- -s, --simulate Do not download the video and do not write anything to disk
+ -s, --simulate Do not download the video and do not write
+ anything to disk
--skip-download Do not download the video
-g, --get-url Simulate, quiet but print URL
-e, --get-title Simulate, quiet but print title
@@ -161,93 +272,151 @@ which means you can modify it, redistribute it or use it however you like.
--get-duration Simulate, quiet but print video length
--get-filename Simulate, quiet but print output filename
--get-format Simulate, quiet but print output format
- -j, --dump-json Simulate, quiet but print JSON information. See --output for a description of available keys.
- -J, --dump-single-json Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist
- information in a single line.
- --print-json Be quiet and print the video information as JSON (video is still being downloaded).
+ -j, --dump-json Simulate, quiet but print JSON information.
+ See --output for a description of available
+ keys.
+ -J, --dump-single-json Simulate, quiet but print JSON information
+ for each command-line argument. If the URL
+ refers to a playlist, dump the whole
+ playlist information in a single line.
+ --print-json Be quiet and print the video information as
+ JSON (video is still being downloaded).
--newline Output progress bar as new lines
--no-progress Do not print progress bar
--console-title Display progress in console titlebar
-v, --verbose Print various debugging information
- --dump-pages Print downloaded pages encoded using base64 to debug problems (very verbose)
- --write-pages Write downloaded intermediary pages to files in the current directory to debug problems
+ --dump-pages Print downloaded pages encoded using base64
+ to debug problems (very verbose)
+ --write-pages Write downloaded intermediary pages to
+ files in the current directory to debug
+ problems
--print-traffic Display sent and read HTTP traffic
-C, --call-home Contact the youtube-dl server for debugging
- --no-call-home Do NOT contact the youtube-dl server for debugging
+ --no-call-home Do NOT contact the youtube-dl server for
+ debugging
## Workarounds:
--encoding ENCODING Force the specified encoding (experimental)
--no-check-certificate Suppress HTTPS certificate validation
- --prefer-insecure Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)
+ --prefer-insecure Use an unencrypted connection to retrieve
+ information about the video. (Currently
+ supported only for YouTube)
--user-agent UA Specify a custom user agent
- --referer URL Specify a custom referer, use if the video access is restricted to one domain
- --add-header FIELD:VALUE Specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times
- --bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH
- --sleep-interval SECONDS Number of seconds to sleep before each download.
+ --referer URL Specify a custom referer, use if the video
+ access is restricted to one domain
+ --add-header FIELD:VALUE Specify a custom HTTP header and its value,
+ separated by a colon ':'. You can use this
+ option multiple times
+ --bidi-workaround Work around terminals that lack
+ bidirectional text support. Requires bidiv
+ or fribidi executable in PATH
+ --sleep-interval SECONDS Number of seconds to sleep before each
+ download.
## Video Format Options:
- -f, --format FORMAT Video format code, see the "FORMAT SELECTION" for all the info
+ -f, --format FORMAT Video format code, see the "FORMAT
+ SELECTION" for all the info
--all-formats Download all available video formats
- --prefer-free-formats Prefer free video formats unless a specific one is requested
- -F, --list-formats List all available formats
- --youtube-skip-dash-manifest Do not download the DASH manifests and related data on YouTube videos
- --merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv. Ignored if no
- merge is required
+ --prefer-free-formats Prefer free video formats unless a specific
+ one is requested
+ -F, --list-formats List all available formats of requested
+ videos
+ --youtube-skip-dash-manifest Do not download the DASH manifests and
+ related data on YouTube videos
+ --merge-output-format FORMAT If a merge is required (e.g.
+ bestvideo+bestaudio), output to given
+ container format. One of mkv, mp4, ogg,
+ webm, flv. Ignored if no merge is required
## Subtitle Options:
--write-sub Write subtitle file
- --write-auto-sub Write automatic subtitle file (YouTube only)
- --all-subs Download all the available subtitles of the video
+ --write-auto-sub Write automatically generated subtitle file
+ (YouTube only)
+ --all-subs Download all the available subtitles of the
+ video
--list-subs List all available subtitles for the video
- --sub-format FORMAT Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"
- --sub-lang LANGS Languages of the subtitles to download (optional) separated by commas, use IETF language tags like 'en,pt'
+ --sub-format FORMAT Subtitle format, accepts formats
+ preference, for example: "srt" or
+ "ass/srt/best"
+ --sub-lang LANGS Languages of the subtitles to download
+ (optional) separated by commas, use IETF
+ language tags like 'en,pt'
## Authentication Options:
-u, --username USERNAME Login with this account ID
- -p, --password PASSWORD Account password. If this option is left out, youtube-dl will ask interactively.
+ -p, --password PASSWORD Account password. If this option is left
+ out, youtube-dl will ask interactively.
-2, --twofactor TWOFACTOR Two-factor auth code
-n, --netrc Use .netrc authentication data
--video-password PASSWORD Video password (vimeo, smotri, youku)
## Post-processing Options:
- -x, --extract-audio Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)
- --audio-format FORMAT Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default
- --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default
- 5)
- --recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)
+ -x, --extract-audio Convert video files to audio-only files
+ (requires ffmpeg or avconv and ffprobe or
+ avprobe)
+ --audio-format FORMAT Specify audio format: "best", "aac",
+ "vorbis", "mp3", "m4a", "opus", or "wav";
+ "best" by default
+ --audio-quality QUALITY Specify ffmpeg/avconv audio quality, insert
+ a value between 0 (better) and 9 (worse)
+ for VBR or a specific bitrate like 128K
+ (default 5)
+ --recode-video FORMAT Encode the video to another format if
+ necessary (currently supported:
+ mp4|flv|ogg|webm|mkv|avi)
--postprocessor-args ARGS Give these arguments to the postprocessor
- -k, --keep-video Keep the video file on disk after the post-processing; the video is erased by default
- --no-post-overwrites Do not overwrite post-processed files; the post-processed files are overwritten by default
- --embed-subs Embed subtitles in the video (only for mkv and mp4 videos)
+ -k, --keep-video Keep the video file on disk after the post-
+ processing; the video is erased by default
+ --no-post-overwrites Do not overwrite post-processed files; the
+ post-processed files are overwritten by
+ default
+ --embed-subs Embed subtitles in the video (only for mkv
+ and mp4 videos)
--embed-thumbnail Embed thumbnail in the audio as cover art
--add-metadata Write metadata to the video file
- --metadata-from-title FORMAT Parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed
- parameters replace existing values. Additional templates: %(album)s, %(artist)s. Example: --metadata-from-title "%(artist)s -
- %(title)s" matches a title like "Coldplay - Paradise"
- --xattrs Write metadata to the video file's xattrs (using dublin core and xdg standards)
- --fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn (the default;
- fix file if we can, warn otherwise)
- --prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default)
- --prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors
- --ffmpeg-location PATH Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.
- --exec CMD Execute a command on the file after downloading, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm
- {}'
- --convert-subtitles FORMAT Convert the subtitles to other format (currently supported: srt|ass|vtt)
+ --metadata-from-title FORMAT Parse additional metadata like song title /
+ artist from the video title. The format
+ syntax is the same as --output, the parsed
+ parameters replace existing values.
+ Additional templates: %(album)s,
+ %(artist)s. Example: --metadata-from-title
+ "%(artist)s - %(title)s" matches a title
+ like "Coldplay - Paradise"
+ --xattrs Write metadata to the video file's xattrs
+ (using dublin core and xdg standards)
+ --fixup POLICY Automatically correct known faults of the
+ file. One of never (do nothing), warn (only
+ emit a warning), detect_or_warn (the
+ default; fix file if we can, warn
+ otherwise)
+ --prefer-avconv Prefer avconv over ffmpeg for running the
+ postprocessors (default)
+ --prefer-ffmpeg Prefer ffmpeg over avconv for running the
+ postprocessors
+ --ffmpeg-location PATH Location of the ffmpeg/avconv binary;
+ either the path to the binary or its
+ containing directory.
+ --exec CMD Execute a command on the file after
+ downloading, similar to find's -exec
+ syntax. Example: --exec 'adb push {}
+ /sdcard/Music/ && rm {}'
+ --convert-subtitles FORMAT Convert the subtitles to other format
+ (currently supported: srt|ass|vtt)
# CONFIGURATION
-You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, system wide configuration file is located at `/etc/youtube-dl.conf` and user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configration file youtube-dl will always extract the audio, not copy the mtime and use proxy:
+You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime and use a proxy:
```
--extract-audio
--no-mtime
--proxy 127.0.0.1:3128
```
-You can use `--ignore-config` if you want to disable configuration file for a particular youtube-dl run.
+You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run.
-### Authentication with `.netrc` file ###
+### Authentication with `.netrc` file
-You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in shell command history. You can achieve this using [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create `.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
+You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create a`.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
```
touch $HOME/.netrc
chmod a-rwx,u+rw $HOME/.netrc
@@ -261,13 +430,13 @@ For example:
machine youtube login myaccount@gmail.com password my_youtube_password
machine twitch login my_twitch_account_name password my_twitch_password
```
-To activate authentication with `.netrc` file you should pass `--netrc` to youtube-dl or to place it in [configuration file](#configuration).
+To activate authentication with the `.netrc` file you should pass `--netrc` to youtube-dl or place it in the [configuration file](#configuration).
-On Windows you may also need to setup `%HOME%` environment variable manually.
+On Windows you may also need to setup the `%HOME%` environment variable manually.
# OUTPUT TEMPLATE
-The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
+The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
- `id`: The sequence will be replaced by the video identifier.
- `url`: The sequence will be replaced by the video URL.
@@ -277,9 +446,10 @@ The `-o` option allows users to indicate a template for the output file names. T
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
- - `playlist`: The name or the id of the playlist that contains the video.
- - `playlist_index`: The index of the video in the playlist, a five-digit number.
+ - `playlist`: The sequence will be replaced by the name or the id of the playlist that contains the video.
+ - `playlist_index`: The sequence will be replaced by the index of the video in the playlist padded with leading zeros according to the total length of the playlist.
- `format_id`: The sequence will be replaced by the format code specified by `--format`.
+ - `duration`: The sequence will be replaced by the length of the video in seconds.
The current default template is `%(title)s-%(id)s.%(ext)s`.
@@ -294,18 +464,18 @@ youtube-dl_test_video_.mp4 # A simple file name
# FORMAT SELECTION
-By default youtube-dl tries to download the best quality, but sometimes you may want to download other format.
+By default youtube-dl tries to download the best quality, but sometimes you may want to download in a different format.
The simplest case is requesting a specific format, for example `-f 22`. You can get the list of available formats using `--list-formats`, you can also use a file extension (currently it supports aac, m4a, mp3, mp4, ogg, wav, webm) or the special names `best`, `bestvideo`, `bestaudio` and `worst`.
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes, as in `-f 22/17/18`. You can also filter the video results by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec, vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a question mark (?) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. Use commas to download multiple formats, such as `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv), for example `-f bestvideo+bestaudio`. Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
-Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
+Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some dash formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
-If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
+If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
# VIDEO SELECTION
-Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats:
+Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`. They accept dates in two formats:
- Absolute dates: Dates in the format `YYYYMMDD`.
- Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
@@ -319,7 +489,7 @@ $ youtube-dl --dateafter now-6months
# Download only the videos uploaded on January 1, 1970
$ youtube-dl --date 19700101
-$ # will only download the videos uploaded in the 200x decade
+$ # Download only the videos uploaded in the 200x decade
$ youtube-dl --dateafter 20000101 --datebefore 20091231
```
@@ -331,7 +501,7 @@ If you've followed [our manual installation instructions](http://rg3.github.io/y
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
-If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
+If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distribution serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
@@ -357,7 +527,7 @@ If you have installed youtube-dl with a package manager, pip, setup.py or a tarb
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, the only option out of `-citw` that is regularly useful is `-i`.
-### Can you please put the -b option back?
+### Can you please put the `-b` option back?
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
@@ -365,17 +535,23 @@ Most people asking this question are not aware that youtube-dl now defaults to d
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We're [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
+### Do I need any other programs?
+
+youtube-dl works fine on its own on most sites. However, if you want to convert video/audio, you'll need [avconv](https://libav.org/) or [ffmpeg](https://www.ffmpeg.org/). On some sites - most notably YouTube - videos can be retrieved in a higher quality format without sound. youtube-dl will detect whether avconv/ffmpeg is present and automatically pick the best option.
+
+Videos or video formats streamed via RTMP protocol can only be downloaded when [rtmpdump](https://rtmpdump.mplayerhq.hu/) is installed. Downloading MMS and RTSP videos requires either [mplayer](http://mplayerhq.hu/) or [mpv](https://mpv.io/) to be installed.
+
### I have downloaded a video but how can I play it?
Once the video is fully downloaded, use any video player, such as [vlc](http://www.videolan.org) or [mplayer](http://www.mplayerhq.hu/).
-### I extracted a video URL with -g, but it does not play on another machine / in my webbrowser.
+### I extracted a video URL with `-g`, but it does not play on another machine / in my webbrowser.
It depends a lot on the service. In many cases, requests for the video (to download/play it) must come from the same IP address and with the same cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
It may be beneficial to use IPv6; in some cases, the restrictions are only applied to IPv4. Some services (sometimes only for a subset of videos) do not restrict the video URL by IP address, cookie, or user-agent, but these are the exception rather than the rule.
-Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using -g, your own downloader must support these as well.
+Please bear in mind that some URL protocols are **not** supported by browsers out of the box, including RTMP. If you are using `-g`, your own downloader must support these as well.
If you want to play the video on a machine that is not running youtube-dl, you can relay the video content from the machine that runs youtube-dl. You can use `-o -` to let youtube-dl stream a video to stdout, or simply allow the player to download the files written by youtube-dl in turn.
@@ -383,13 +559,13 @@ If you want to play the video on a machine that is not running youtube-dl, you c
YouTube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
-### ERROR: unable to download video ###
+### ERROR: unable to download video
YouTube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
-### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command` ###
+### Video URL contains an ampersand and I'm getting some strange output `[1] 2839` or `'v' is not recognized as an internal or external command`
-That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell).
+That's actually the output from your shell. Since ampersand is one of the special shell characters it's interpreted by the shell preventing you from passing the whole URL to youtube-dl. To disable your shell from interpreting the ampersands (or any other special characters) you have to either put the whole URL in quotes or escape them with a backslash (which approach will work depends on your shell).
For example if your URL is https://www.youtube.com/watch?t=4&v=BaW_jenozKc you should end up with following command:
@@ -411,7 +587,7 @@ In February 2015, the new YouTube player contained a character sequence in a str
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--source-address` options](#network-options) to select another IP address.
-### SyntaxError: Non-ASCII character ###
+### SyntaxError: Non-ASCII character
The error
@@ -440,7 +616,7 @@ From then on, after restarting your shell, you will be able to access both youtu
Use the `-o` to specify an [output template](#output-template), for example `-o "/home/user/videos/%(title)s-%(id)s.%(ext)s"`. If you want this for all of your downloads, put the option into your [configuration file](#configuration).
-### How do I download a video starting with a `-` ?
+### How do I download a video starting with a `-`?
Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the options with `--`:
@@ -449,9 +625,9 @@ Either prepend `http://www.youtube.com/watch?v=` or separate the ID from the opt
### How do I pass cookies to youtube-dl?
-Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that cookies file must be in Mozilla/Netscape format and the first line of cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in cookies file and convert newlines if necessary to correspond your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
+Use the `--cookies` option, for example `--cookies /path/to/cookies/file.txt`. Note that the cookies file must be in Mozilla/Netscape format and the first line of the cookies file must be either `# HTTP Cookie File` or `# Netscape HTTP Cookie File`. Make sure you have correct [newline format](https://en.wikipedia.org/wiki/Newline) in the cookies file and convert newlines if necessary to correspond with your OS, namely `CRLF` (`\r\n`) for Windows, `LF` (`\n`) for Linux and `CR` (`\r`) for Mac OS. `HTTP Error 400: Bad Request` when using `--cookies` is a good sign of invalid newline format.
-Passing cookies to youtube-dl is a good way to workaround login when particular extractor does not implement it explicitly.
+Passing cookies to youtube-dl is a good way to workaround login when a particular extractor does not implement it explicitly.
### Can you add support for this anime video site, or site which shows current movies for free?
@@ -541,17 +717,18 @@ If you want to add support for a new site, you can follow this quick list (assum
webpage = self._download_webpage(url, video_id)
# TODO more code goes here, for example ...
- title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
+ title = self._html_search_regex(r'<h1>(.+?)</h1>', webpage, 'title')
return {
'id': video_id,
'title': title,
'description': self._og_search_description(webpage),
+ 'uploader': self._search_regex(r'<div[^>]+id="uploader"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False),
# TODO more properties (see youtube_dl/extractor/common.py)
}
```
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
-6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
+6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
@@ -580,7 +757,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
```
-Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L117-L265). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
+Most likely, you'll want to use various options. For a list of what can be done, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L121-L269). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
@@ -621,11 +798,25 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# BUGS
-Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the irc channel #youtube-dl on freenode.
+Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
-**Please include the full output of youtube-dl when run with `-v`**.
+**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
+```
+$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
+[debug] System config: []
+[debug] User config: []
+[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
+[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
+[debug] youtube-dl version 2015.12.06
+[debug] Git HEAD: 135392e
+[debug] Python version 2.6.6 - Windows-2003Server-5.2.3790-SP2
+[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
+[debug] Proxy map: {}
+...
+```
+**Do not post screenshots of verbose log only plain text is acceptable.**
-The output (including the first lines) contain important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
+The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
Please re-read your issue once again to avoid a couple of common mistakes (you can and should use this as a checklist):
@@ -641,19 +832,19 @@ So please elaborate on what feature you are requesting, or what bug you want to
If your report is shorter than two lines, it is almost certainly missing some of these, which makes it hard for us to respond to it. We're often too polite to close the issue outright, but the missing info makes misinterpretation likely. As a commiter myself, I often get frustrated by these issues, since the only possible way for me to move forward on them is to ask for clarification over and over.
-For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
+For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the `-v` flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
-If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
+If your server has multiple IPs or you suspect censorship, adding `--call-home` may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
-**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
+**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like `http://www.youtube.com/watch?v=BaW_jenozKc`. There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. `http://www.youtube.com/`) is *not* an example URL.
### Are you using the latest version?
-Before reporting any issue, type youtube-dl -U. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
+Before reporting any issue, type `youtube-dl -U`. This should report that you're up-to-date. About 20% of the reports we receive are already fixed, but people are using outdated versions. This goes for feature requests as well.
### Is the issue already documented?
-Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or at https://github.com/rg3/youtube-dl/search?type=Issues . If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
+Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/rg3/youtube-dl/search?type=Issues) of this repository. If there is an issue, feel free to write something along the lines of "This affects me as well, with version 2015.01.01. Here is some more information on the issue: ...". While some issues may be old, a new post into them often spurs rapid activity.
### Why are existing options not enough?
@@ -683,4 +874,4 @@ It may sound strange, but some bug reports we receive are completely unrelated t
youtube-dl is released into the public domain by the copyright holders.
-This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
+This README file was originally written by [Daniel Bolton](https://github.com/dbbolton) and is likewise released into the public domain.
diff --git a/devscripts/bash-completion.py b/devscripts/bash-completion.py
index cd26cc089..ce68f26f9 100755
--- a/devscripts/bash-completion.py
+++ b/devscripts/bash-completion.py
@@ -5,7 +5,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py
index c2f238798..41629d87d 100755
--- a/devscripts/fish-completion.py
+++ b/devscripts/fish-completion.py
@@ -6,7 +6,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
from youtube_dl.utils import shell_quote
diff --git a/devscripts/gh-pages/update-sites.py b/devscripts/gh-pages/update-sites.py
index d3ef5f0b5..503c1372f 100755
--- a/devscripts/gh-pages/update-sites.py
+++ b/devscripts/gh-pages/update-sites.py
@@ -6,7 +6,7 @@ import os
import textwrap
# We must be able to import youtube_dl
-sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import youtube_dl
diff --git a/devscripts/make_supportedsites.py b/devscripts/make_supportedsites.py
index 3df4385a6..8cb4a4638 100644
--- a/devscripts/make_supportedsites.py
+++ b/devscripts/make_supportedsites.py
@@ -9,7 +9,7 @@ import sys
# Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
-sys.path.append(ROOT_DIR)
+sys.path.insert(0, ROOT_DIR)
import youtube_dl
diff --git a/devscripts/prepare_manpage.py b/devscripts/prepare_manpage.py
index 7ece37754..776e6556e 100644
--- a/devscripts/prepare_manpage.py
+++ b/devscripts/prepare_manpage.py
@@ -8,6 +8,35 @@ import re
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
README_FILE = os.path.join(ROOT_DIR, 'README.md')
+
+def filter_options(readme):
+ ret = ''
+ in_options = False
+ for line in readme.split('\n'):
+ if line.startswith('# '):
+ if line[2:].startswith('OPTIONS'):
+ in_options = True
+ else:
+ in_options = False
+
+ if in_options:
+ if line.lstrip().startswith('-'):
+ option, description = re.split(r'\s{2,}', line.lstrip())
+ split_option = option.split(' ')
+
+ if not split_option[-1].startswith('-'): # metavar
+ option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
+
+ # Pandoc's definition_lists. See http://pandoc.org/README.html
+ # for more information.
+ ret += '\n%s\n: %s\n' % (option, description)
+ else:
+ ret += line.lstrip() + '\n'
+ else:
+ ret += line + '\n'
+
+ return ret
+
with io.open(README_FILE, encoding='utf-8') as f:
readme = f.read()
@@ -26,6 +55,8 @@ readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
readme = PREFIX + readme
+readme = filter_options(readme)
+
if sys.version_info < (3, 0):
print(readme.encode('utf-8'))
else:
diff --git a/devscripts/zsh-completion.py b/devscripts/zsh-completion.py
index f200f2c80..04728e8e2 100755
--- a/devscripts/zsh-completion.py
+++ b/devscripts/zsh-completion.py
@@ -5,7 +5,7 @@ import os
from os.path import dirname as dirn
import sys
-sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
+sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 04b9959ac..8253335e3 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -15,8 +15,12 @@
- **abc.net.au**
- **Abc7News**
- **AcademicEarth:Course**
+ - **acast**
+ - **acast:channel**
- **AddAnime**
- **AdobeTV**
+ - **AdobeTVChannel**
+ - **AdobeTVShow**
- **AdobeTVVideo**
- **AdultSwim**
- **Aftenposten**
@@ -43,6 +47,7 @@
- **arte.tv:future**
- **AtresPlayer**
- **ATTTechChannel**
+ - **AudiMedia**
- **audiomack**
- **audiomack:album**
- **Azubu**
@@ -53,6 +58,7 @@
- **Bandcamp:album**
- **bbc**: BBC
- **bbc.co.uk**: BBC iPlayer
+ - **bbc.co.uk:article**: BBC articles
- **BeatportPro**
- **Beeg**
- **BehindKink**
@@ -66,7 +72,8 @@
- **Bpb**: Bundeszentrale für politische Bildung
- **BR**: Bayerischer Rundfunk Mediathek
- **Break**
- - **Brightcove**
+ - **brightcove:legacy**
+ - **brightcove:new**
- **bt:article**: Bergens Tidende Articles
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
- **BuzzFeed**
@@ -81,6 +88,7 @@
- **CBSSports**
- **CeskaTelevize**
- **channel9**: Channel 9
+ - **Chaturbate**
- **Chilloutzone**
- **chirbit**
- **chirbit:profile**
@@ -89,8 +97,10 @@
- **Clipfish**
- **cliphunter**
- **Clipsyndicate**
+ - **cloudtime**: CloudTime
- **Cloudy**
- **Clubic**
+ - **Clyp**
- **cmt.com**
- **CNET**
- **CNN**
@@ -101,7 +111,7 @@
- **ComCarCoff**
- **ComedyCentral**
- **ComedyCentralShows**: The Daily Show / The Colbert Report
- - **CondeNast**: Condé Nast media group: Condé Nast, GQ, Glamour, Vanity Fair, Vogue, W Magazine, WIRED
+ - **CondeNast**: Condé Nast media group: Allure, Architectural Digest, Ars Technica, Bon Appétit, Brides, Condé Nast, Condé Nast Traveler, Details, Epicurious, GQ, Glamour, Golf Digest, SELF, Teen Vogue, The New Yorker, Vanity Fair, Vogue, W Magazine, WIRED
- **Cracked**
- **Criterion**
- **CrooksAndLiars**
@@ -120,11 +130,12 @@
- **DctpTv**
- **DeezerPlaylist**
- **defense.gouv.fr**
+ - **democracynow**
- **DHM**: Filmarchiv - Deutsches Historisches Museum
- **Discovery**
- - **divxstage**: DivxStage
- **Dotsub**
- **DouyuTV**: 斗鱼
+ - **DPlay**
- **dramafever**
- **dramafever:series**
- **DRBonanza**
@@ -151,6 +162,7 @@
- **Escapist**
- **ESPN** (Currently broken)
- **EsriVideo**
+ - **Europa**
- **EveryonesMixtape**
- **exfm**: ex.fm
- **ExpoTV**
@@ -158,8 +170,8 @@
- **facebook**
- **faz.net**
- **fc2**
+ - **Fczenit**
- **fernsehkritik.tv**
- - **fernsehkritik.tv:postecke**
- **Firstpost**
- **FiveTV**
- **Flickr**
@@ -176,7 +188,9 @@
- **Freesound**
- **freespeech.org**
- **FreeVideo**
+ - **Funimation**
- **FunnyOrDie**
+ - **GameInformer**
- **Gamekings**
- **GameOne**
- **gameone:playlist**
@@ -192,10 +206,10 @@
- **Giga**
- **Glide**: Glide mobile video messages (glide.me)
- **Globo**
+ - **GloboArticle**
- **GodTube**
- **GoldenMoustache**
- **Golem**
- - **GorillaVid**: GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net
- **Goshgay**
- **Groupon**
- **Hark**
@@ -209,7 +223,6 @@
- **hitbox**
- **hitbox:live**
- **HornBunny**
- - **HostingBulk**
- **HotNewHipHop**
- **Howcast**
- **HowStuffWorks**
@@ -266,6 +279,9 @@
- **Libsyn**
- **life:embed**
- **lifenews**: LIFE | NEWS
+ - **limelight**
+ - **limelight:channel**
+ - **limelight:channel_list**
- **LiveLeak**
- **livestream**
- **livestream:original**
@@ -277,16 +293,15 @@
- **macgamestore**: MacGameStore trailers
- **mailru**: Видео@Mail.Ru
- **Malemotion**
- - **MDR**
+ - **MDR**: MDR.DE and KiKA
- **media.ccc.de**
- - **MegaVideoz**
- **metacafe**
- **Metacritic**
- **Mgoon**
- **Minhateca**
- **MinistryGrid**
- **miomio.tv**
- - **mitele.es**
+ - **MiTele**: mitele.es
- **mixcloud**
- **MLB**
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
@@ -300,7 +315,6 @@
- **MovieClips**
- **MovieFap**
- **Moviezine**
- - **movshare**: MovShare
- **MPORA**
- **MSNBC**
- **MTV**
@@ -309,7 +323,6 @@
- **mtvservices:embedded**
- **MuenchenTV**: münchen.tv
- **MusicPlayOn**
- - **MusicVault**
- **muzu.tv**
- **Mwave**
- **MySpace**
@@ -318,7 +331,6 @@
- **Myvi**
- **myvideo**
- **MyVidster**
- - **N-JOY**
- **n-tv.de**
- **NationalGeographic**
- **Naver**
@@ -327,7 +339,9 @@
- **NBCNews**
- **NBCSports**
- **NBCSportsVPlayer**
- - **ndr**: NDR.de - Mediathek
+ - **ndr**: NDR.de - Norddeutscher Rundfunk
+ - **ndr:embed**
+ - **ndr:embed:base**
- **NDTV**
- **NerdCubedFeed**
- **Nerdist**
@@ -350,13 +364,18 @@
- **nhl.com:videocenter**: NHL videocenter category
- **niconico**: ニコニコ動画
- **NiconicoPlaylist**
+ - **njoy**: N-JOY
+ - **njoy:embed**
- **Noco**
- **Normalboots**
- **NosVideo**
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
- **novamov**: NovaMov
- - **Nowness**
+ - **nowness**
+ - **nowness:playlist**
+ - **nowness:series**
- **NowTV**
+ - **NowTVList**
- **nowvideo**: NowVideo
- **npo**: npo.nl and ntr.nl
- **npo.nl:live**
@@ -376,14 +395,13 @@
- **OnionStudios**
- **Ooyala**
- **OoyalaExternal**
- - **OpenFilm**
- **orf:fm4**: radio FM4
- **orf:iptv**: iptv.ORF.at
- **orf:oe1**: Radio Österreich 1
- **orf:tvthek**: ORF TVthek
- **parliamentlive.tv**: UK parliament videos
- **Patreon**
- - **PBS**
+ - **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
- **Periscope**: Periscope
- **PhilharmonieDeParis**: Philharmonie de Paris
- **Phoenix**
@@ -417,7 +435,6 @@
- **qqmusic:playlist**: QQ音乐 - 歌单
- **qqmusic:singer**: QQ音乐 - 歌手
- **qqmusic:toplist**: QQ音乐 - 排行榜
- - **Quickscope**: Quick Scope
- **QuickVid**
- **R7**
- **radio.de**
@@ -470,6 +487,8 @@
- **Shared**: shared.sx and vivo.sx
- **ShareSix**
- **Sina**
+ - **skynewsarabia:video**
+ - **skynewsarabia:video**
- **Slideshare**
- **Slutload**
- **smotri**: Smotri.com
@@ -484,6 +503,7 @@
- **soompi:show**
- **soundcloud**
- **soundcloud:playlist**
+ - **soundcloud:search**: Soundcloud search
- **soundcloud:set**
- **soundcloud:user**
- **soundgasm**
@@ -510,6 +530,7 @@
- **SSA**
- **stanfordoc**: Stanford Open ClassRoom
- **Steam**
+ - **Stitcher**
- **streamcloud.eu**
- **StreamCZ**
- **StreetVoice**
@@ -531,7 +552,7 @@
- **techtv.mit.edu**
- **ted**
- **TeleBruxelles**
- - **telecinco.es**
+ - **Telecinco**: telecinco.es, cuatro.com and mediaset.es
- **Telegraaf**
- **TeleMB**
- **TeleTask**
@@ -583,7 +604,8 @@
- **twitch:stream**
- **twitch:video**
- **twitch:vod**
- - **TwitterCard**
+ - **twitter**
+ - **twitter:card**
- **Ubu**
- **udemy**
- **udemy:course**
@@ -608,7 +630,6 @@
- **video.mit.edu**
- **VideoDetective**
- **videofy.me**
- - **videolectures.net**
- **VideoMega**
- **VideoPremium**
- **VideoTt**: video.tt - Your True Tube
@@ -618,6 +639,7 @@
- **vier**
- **vier:videos**
- **Viewster**
+ - **Viidea**
- **viki**
- **viki:channel**
- **vimeo**
@@ -633,6 +655,7 @@
- **vine:user**
- **vk**: VK
- **vk:uservideos**: VK - User's Videos
+ - **vlive**
- **Vodlocker**
- **VoiceRepublic**
- **Vporn**
@@ -651,6 +674,7 @@
- **WebOfStories**
- **WebOfStoriesPlaylist**
- **Weibo**
+ - **wholecloud**: WholeCloud
- **Wimp**
- **Wistia**
- **WNL**
@@ -659,6 +683,7 @@
- **WSJ**: Wall Street Journal
- **XBef**
- **XboxClips**
+ - **XFileShare**: XFileShare based sites: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net, filehoot.com and vidto.me
- **XHamster**
- **XHamsterEmbed**
- **XMinus**
@@ -693,6 +718,7 @@
- **youtube:show**: YouTube.com (multi-season) shows
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
+ - **youtube:user:playlists**: YouTube.com user playlists
- **youtube:watchlater**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
- **Zapiks**
- **ZDF**
diff --git a/setup.py b/setup.py
index 4686260e0..bfe931f5b 100644
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@ py2exe_options = {
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
- "dll_excludes": ['w9xpopen.exe'],
+ "dll_excludes": ['w9xpopen.exe', 'crypt32.dll'],
}
py2exe_console = [{
diff --git a/test/helper.py b/test/helper.py
index cb6eec8d9..bdd7acca4 100644
--- a/test/helper.py
+++ b/test/helper.py
@@ -89,66 +89,81 @@ def gettestcases(include_onlymatching=False):
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
-def expect_info_dict(self, got_dict, expected_dict):
+def expect_value(self, got, expected, field):
+ if isinstance(expected, compat_str) and expected.startswith('re:'):
+ match_str = expected[len('re:'):]
+ match_rex = re.compile(match_str)
+
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ match_rex.match(got),
+ 'field %s (value: %r) should match %r' % (field, got, match_str))
+ elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
+ start_str = expected[len('startswith:'):]
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ got.startswith(start_str),
+ 'field %s (value: %r) should start with %r' % (field, got, start_str))
+ elif isinstance(expected, compat_str) and expected.startswith('contains:'):
+ contains_str = expected[len('contains:'):]
+ self.assertTrue(
+ isinstance(got, compat_str),
+ 'Expected a %s object, but got %s for field %s' % (
+ compat_str.__name__, type(got).__name__, field))
+ self.assertTrue(
+ contains_str in got,
+ 'field %s (value: %r) should contain %r' % (field, got, contains_str))
+ elif isinstance(expected, type):
+ self.assertTrue(
+ isinstance(got, expected),
+ 'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
+ elif isinstance(expected, dict) and isinstance(got, dict):
+ expect_dict(self, got, expected)
+ elif isinstance(expected, list) and isinstance(got, list):
+ self.assertEqual(
+ len(expected), len(got),
+ 'Expect a list of length %d, but got a list of length %d for field %s' % (
+ len(expected), len(got), field))
+ for index, (item_got, item_expected) in enumerate(zip(got, expected)):
+ type_got = type(item_got)
+ type_expected = type(item_expected)
+ self.assertEqual(
+ type_expected, type_got,
+ 'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
+ index, field, type_expected, type_got))
+ expect_value(self, item_got, item_expected, field)
+ else:
+ if isinstance(expected, compat_str) and expected.startswith('md5:'):
+ got = 'md5:' + md5(got)
+ elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
+ self.assertTrue(
+ isinstance(got, (list, dict)),
+ 'Expected field %s to be a list or a dict, but it is of type %s' % (
+ field, type(got).__name__))
+ expected_num = int(expected.partition(':')[2])
+ assertGreaterEqual(
+ self, len(got), expected_num,
+ 'Expected %d items in field %s, but only got %d' % (expected_num, field, len(got)))
+ return
+ self.assertEqual(
+ expected, got,
+ 'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
+
+
+def expect_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
- if isinstance(expected, compat_str) and expected.startswith('re:'):
- got = got_dict.get(info_field)
- match_str = expected[len('re:'):]
- match_rex = re.compile(match_str)
+ got = got_dict.get(info_field)
+ expect_value(self, got, expected, info_field)
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- match_rex.match(got),
- 'field %s (value: %r) should match %r' % (info_field, got, match_str))
- elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
- got = got_dict.get(info_field)
- start_str = expected[len('startswith:'):]
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- got.startswith(start_str),
- 'field %s (value: %r) should start with %r' % (info_field, got, start_str))
- elif isinstance(expected, compat_str) and expected.startswith('contains:'):
- got = got_dict.get(info_field)
- contains_str = expected[len('contains:'):]
- self.assertTrue(
- isinstance(got, compat_str),
- 'Expected a %s object, but got %s for field %s' % (
- compat_str.__name__, type(got).__name__, info_field))
- self.assertTrue(
- contains_str in got,
- 'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
- elif isinstance(expected, type):
- got = got_dict.get(info_field)
- self.assertTrue(isinstance(got, expected),
- 'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
- else:
- if isinstance(expected, compat_str) and expected.startswith('md5:'):
- got = 'md5:' + md5(got_dict.get(info_field))
- elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
- got = got_dict.get(info_field)
- self.assertTrue(
- isinstance(got, (list, dict)),
- 'Expected field %s to be a list or a dict, but it is of type %s' % (
- info_field, type(got).__name__))
- expected_num = int(expected.partition(':')[2])
- assertGreaterEqual(
- self, len(got), expected_num,
- 'Expected %d items in field %s, but only got %d' % (
- expected_num, info_field, len(got)
- )
- )
- continue
- else:
- got = got_dict.get(info_field)
- self.assertEqual(expected, got,
- 'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
+def expect_info_dict(self, got_dict, expected_dict):
+ expect_dict(self, got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py
index be8d12997..938466a80 100644
--- a/test/test_InfoExtractor.py
+++ b/test/test_InfoExtractor.py
@@ -35,10 +35,18 @@ class TestInfoExtractor(unittest.TestCase):
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&amp;key2=val2'/>
+ <meta content='application/x-shockwave-flash' property='og:video:type'>
+ <meta content='Foo' property=og:foobar>
+ <meta name="og:test1" content='foo > < bar'/>
+ <meta name="og:test2" content="foo >//< bar"/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
+ self.assertEqual(ie._og_search_video_url(html, default=None), None)
+ self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
+ self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
+ self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
def test_html_search_meta(self):
ie = self.ie
diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index a9db42b30..a0c11e6c1 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -121,8 +121,8 @@ class TestAllURLsMatching(unittest.TestCase):
def test_pbs(self):
# https://github.com/rg3/youtube-dl/issues/2350
- self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
- self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
+ self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['pbs'])
+ self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['pbs'])
def test_yahoo_https(self):
# https://github.com/rg3/youtube-dl/issues/2701
diff --git a/test/test_compat.py b/test/test_compat.py
index 4ee0dc99d..b6bfad05e 100644
--- a/test/test_compat.py
+++ b/test/test_compat.py
@@ -13,8 +13,10 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.utils import get_filesystem_encoding
from youtube_dl.compat import (
compat_getenv,
+ compat_etree_fromstring,
compat_expanduser,
compat_shlex_split,
+ compat_str,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
@@ -71,5 +73,20 @@ class TestCompat(unittest.TestCase):
def test_compat_shlex_split(self):
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
+ def test_compat_etree_fromstring(self):
+ xml = '''
+ <root foo="bar" spam="中文">
+ <normal>foo</normal>
+ <chinese>中文</chinese>
+ <foo><bar>spam</bar></foo>
+ </root>
+ '''
+ doc = compat_etree_fromstring(xml.encode('utf-8'))
+ self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
+ self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
+ self.assertTrue(isinstance(doc.find('normal').text, compat_str))
+ self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
+ self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_download.py b/test/test_download.py
index 284418834..a3f1c0644 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -102,7 +102,7 @@ def generator(test_case):
params = get_params(test_case.get('params', {}))
if is_playlist and 'playlist' not in test_case:
- params.setdefault('extract_flat', True)
+ params.setdefault('extract_flat', 'in_playlist')
params.setdefault('skip_download', True)
ydl = YoutubeDL(params, auto_init=False)
diff --git a/test/test_jsinterp.py b/test/test_jsinterp.py
index fc73e5dc2..63c350b8f 100644
--- a/test/test_jsinterp.py
+++ b/test/test_jsinterp.py
@@ -19,6 +19,9 @@ class TestJSInterpreter(unittest.TestCase):
jsi = JSInterpreter('function x3(){return 42;}')
self.assertEqual(jsi.call_function('x3'), 42)
+ jsi = JSInterpreter('var x5 = function(){return 42;}')
+ self.assertEqual(jsi.call_function('x5'), 42)
+
def test_calc(self):
jsi = JSInterpreter('function x4(a){return 2*a+1;}')
self.assertEqual(jsi.call_function('x4', 3), 7)
diff --git a/test/test_subtitles.py b/test/test_subtitles.py
index 0343967d9..75f0ea75f 100644
--- a/test/test_subtitles.py
+++ b/test/test_subtitles.py
@@ -28,6 +28,7 @@ from youtube_dl.extractor import (
ThePlatformFeedIE,
RTVEALaCartaIE,
FunnyOrDieIE,
+ DemocracynowIE,
)
@@ -346,5 +347,25 @@ class TestFunnyOrDieSubtitles(BaseTestSubtitles):
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
+class TestDemocracynowSubtitles(BaseTestSubtitles):
+ url = 'http://www.democracynow.org/shows/2015/7/3'
+ IE = DemocracynowIE
+
+ def test_allsubtitles(self):
+ self.DL.params['writesubtitles'] = True
+ self.DL.params['allsubtitles'] = True
+ subtitles = self.getSubtitles()
+ self.assertEqual(set(subtitles.keys()), set(['en']))
+ self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
+
+ def test_subtitles_in_page(self):
+ self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
+ self.DL.params['writesubtitles'] = True
+ self.DL.params['allsubtitles'] = True
+ subtitles = self.getSubtitles()
+ self.assertEqual(set(subtitles.keys()), set(['en']))
+ self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_utils.py b/test/test_utils.py
index a5f164c49..1c3290d9b 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -21,6 +21,8 @@ from youtube_dl.utils import (
clean_html,
DateRange,
detect_exe_version,
+ determine_ext,
+ encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
@@ -42,6 +44,7 @@ from youtube_dl.utils import (
sanitize_path,
prepend_extension,
replace_extension,
+ remove_quotes,
shell_quote,
smuggle_url,
str_to_int,
@@ -68,6 +71,9 @@ from youtube_dl.utils import (
cli_valueless_option,
cli_bool_option,
)
+from youtube_dl.compat import (
+ compat_etree_fromstring,
+)
class TestUtil(unittest.TestCase):
@@ -196,6 +202,15 @@ class TestUtil(unittest.TestCase):
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
+ def test_remove_quotes(self):
+ self.assertEqual(remove_quotes(None), None)
+ self.assertEqual(remove_quotes('"'), '"')
+ self.assertEqual(remove_quotes("'"), "'")
+ self.assertEqual(remove_quotes(';'), ';')
+ self.assertEqual(remove_quotes('";'), '";')
+ self.assertEqual(remove_quotes('""'), '')
+ self.assertEqual(remove_quotes('";"'), ';')
+
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
@@ -207,8 +222,8 @@ class TestUtil(unittest.TestCase):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('&#x2F;'), '/')
self.assertEqual(unescapeHTML('&#47;'), '/')
- self.assertEqual(
- unescapeHTML('&eacute;'), 'é')
+ self.assertEqual(unescapeHTML('&eacute;'), 'é')
+ self.assertEqual(unescapeHTML('&#2013266066;'), '&#2013266066;')
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
@@ -233,6 +248,14 @@ class TestUtil(unittest.TestCase):
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
+ self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
+
+ def test_determine_ext(self):
+ self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
+ self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
+ self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
+ self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
+ self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
def test_find_xpath_attr(self):
testxml = '''<root>
@@ -242,7 +265,7 @@ class TestUtil(unittest.TestCase):
<node x="b" y="d" />
<node x="" />
</root>'''
- doc = xml.etree.ElementTree.fromstring(testxml)
+ doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
@@ -263,7 +286,7 @@ class TestUtil(unittest.TestCase):
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
- doc = xml.etree.ElementTree.fromstring(testxml)
+ doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
@@ -275,9 +298,16 @@ class TestUtil(unittest.TestCase):
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
+ self.assertEqual(xpath_element(doc, ['div/p']), p)
+ self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
+ self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
+ self.assertTrue(xpath_element(doc, ['div/bar']) is None)
+ self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
+ self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
+ self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
@@ -285,7 +315,7 @@ class TestUtil(unittest.TestCase):
<p>Foo</p>
</div>
</root>'''
- doc = xml.etree.ElementTree.fromstring(testxml)
+ doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
@@ -297,7 +327,7 @@ class TestUtil(unittest.TestCase):
<p x="a">Foo</p>
</div>
</root>'''
- doc = xml.etree.ElementTree.fromstring(testxml)
+ doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
@@ -420,11 +450,17 @@ class TestUtil(unittest.TestCase):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
+ def test_encode_compat_str(self):
+ self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
+ self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
+
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
+ self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
+ self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
@@ -495,6 +531,9 @@ class TestUtil(unittest.TestCase):
"playlist":[{"controls":{"all":null}}]
}''')
+ inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
+ self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
+
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
@@ -627,12 +666,13 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
{'like_count': 190, 'dislike_count': 10}))
def test_parse_dfxp_time_expr(self):
- self.assertEqual(parse_dfxp_time_expr(None), 0.0)
- self.assertEqual(parse_dfxp_time_expr(''), 0.0)
+ self.assertEqual(parse_dfxp_time_expr(None), None)
+ self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
+ self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
@@ -642,6 +682,9 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
+ <p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
+ <p begin="-1" end="-1">Ignore, two</p>
+ <p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index c889b6f15..26aadb34f 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -57,5 +57,14 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertEqual(len(entries), 100)
+ def test_youtube_flat_playlist_titles(self):
+ dl = FakeYDL()
+ dl.params['extract_flat'] = True
+ ie = YoutubePlaylistIE(dl)
+ result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
+ self.assertIsPlaylist(result)
+ for entry in result['entries']:
+ self.assertTrue(entry.get('title'))
+
if __name__ == '__main__':
unittest.main()
diff --git a/tox.ini b/tox.ini
index cd805fe8a..48504329f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,py33,py34
+envlist = py26,py27,py33,py34,py35
[testenv]
deps =
nose
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index d65253882..50425b8d7 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -28,6 +28,7 @@ if os.name == 'nt':
import ctypes
from .compat import (
+ compat_basestring,
compat_cookiejar,
compat_expanduser,
compat_get_terminal_size,
@@ -37,6 +38,7 @@ from .compat import (
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
+ compat_urllib_request_DataHandler,
)
from .utils import (
ContentTooShortError,
@@ -45,7 +47,9 @@ from .utils import (
DEFAULT_OUTTMPL,
determine_ext,
DownloadError,
+ encode_compat_str,
encodeFilename,
+ error_to_compat_str,
ExtractorError,
format_bytes,
formatSeconds,
@@ -62,6 +66,7 @@ from .utils import (
SameFileError,
sanitize_filename,
sanitize_path,
+ sanitized_Request,
std_headers,
subtitles_filename,
UnavailableVideoError,
@@ -155,7 +160,7 @@ class YoutubeDL(object):
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
- writeautomaticsub: Write the automatic subtitles to a file
+ writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
@@ -492,7 +497,7 @@ class YoutubeDL(object):
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
- tb += compat_str(traceback.format_exc())
+ tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
@@ -571,7 +576,7 @@ class YoutubeDL(object):
if v is not None)
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
- outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL))
+ outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
tmpl = compat_expanduser(outtmpl)
filename = tmpl % template_dict
# Temporary fix for #4787
@@ -579,7 +584,7 @@ class YoutubeDL(object):
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
- return filename
+ return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
@@ -671,14 +676,14 @@ class YoutubeDL(object):
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
- except ExtractorError as de: # An error we somewhat expected
- self.report_error(compat_str(de), de.format_traceback())
+ except ExtractorError as e: # An error we somewhat expected
+ self.report_error(compat_str(e), e.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
- self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
+ self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
break
else:
raise
@@ -832,6 +837,7 @@ class YoutubeDL(object):
extra_info=extra)
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
+ self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
elif result_type == 'compat_list':
self.report_warning(
@@ -936,7 +942,7 @@ class YoutubeDL(object):
filter_parts.append(string)
def _remove_unused_ops(tokens):
- # Remove operators that we don't use and join them with the sourrounding strings
+ # Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
@@ -1106,6 +1112,12 @@ class YoutubeDL(object):
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
+ # Formats must be opposite (video+audio)
+ if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
+ self.report_error(
+ 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
+ % (format_1, format_2))
+ return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
@@ -1185,7 +1197,7 @@ class YoutubeDL(object):
return res
def _calc_cookies(self, info_dict):
- pr = compat_urllib_request.Request(info_dict['url'])
+ pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
@@ -1232,13 +1244,20 @@ class YoutubeDL(object):
except (ValueError, OverflowError, OSError):
pass
+ subtitles = info_dict.get('subtitles')
+ if subtitles:
+ for _, subtitle in subtitles.items():
+ for subtitle_format in subtitle:
+ if 'ext' not in subtitle_format:
+ subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
+
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
- self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles')
+ self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
- info_dict['id'], info_dict.get('subtitles'),
+ info_dict['id'], subtitles,
info_dict.get('automatic_captions'))
# We now pick which formats have to be downloaded
@@ -1442,7 +1461,7 @@ class YoutubeDL(object):
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
- self.report_error('unable to create directory ' + compat_str(err))
+ self.report_error('unable to create directory ' + error_to_compat_str(err))
return
if self.params.get('writedescription', False):
@@ -1493,7 +1512,7 @@ class YoutubeDL(object):
sub_info['url'], info_dict['id'], note=False)
except ExtractorError as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
- (sub_lang, compat_str(err.cause)))
+ (sub_lang, error_to_compat_str(err.cause)))
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
@@ -1862,6 +1881,8 @@ class YoutubeDL(object):
def urlopen(self, req):
""" Start an HTTP download """
+ if isinstance(req, compat_basestring):
+ req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
@@ -1960,8 +1981,9 @@ class YoutubeDL(object):
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
+ data_handler = compat_urllib_request_DataHandler()
opener = compat_urllib_request.build_opener(
- proxy_handler, https_handler, cookie_processor, ydlh)
+ proxy_handler, https_handler, cookie_processor, ydlh, data_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
@@ -2019,4 +2041,4 @@ class YoutubeDL(object):
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
- (t['url'], compat_str(err)))
+ (t['url'], error_to_compat_str(err)))
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 5e2ed4d4b..9f131f5db 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -377,7 +377,7 @@ def _real_main(argv=None):
with YoutubeDL(ydl_opts) as ydl:
# Update version
if opts.update_self:
- update_self(ydl.to_screen, opts.verbose)
+ update_self(ydl.to_screen, opts.verbose, ydl._opener)
# Remove cache dir
if opts.rm_cachedir:
diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py
index 65a0f891c..42a0f8c6f 100755
--- a/youtube_dl/__main__.py
+++ b/youtube_dl/__main__.py
@@ -11,7 +11,7 @@ if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
- sys.path.append(os.path.dirname(os.path.dirname(path)))
+ sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import youtube_dl
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index e32bef279..a3e85264a 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -1,7 +1,10 @@
from __future__ import unicode_literals
+import binascii
import collections
+import email
import getpass
+import io
import optparse
import os
import re
@@ -11,6 +14,7 @@ import socket
import subprocess
import sys
import itertools
+import xml.etree.ElementTree
try:
@@ -39,6 +43,11 @@ except ImportError: # Python 2
import urlparse as compat_urlparse
try:
+ import urllib.response as compat_urllib_response
+except ImportError: # Python 2
+ import urllib as compat_urllib_response
+
+try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
@@ -81,6 +90,11 @@ except ImportError:
import BaseHTTPServer as compat_http_server
try:
+ compat_str = unicode # Python 2
+except NameError:
+ compat_str = str
+
+try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
from urllib.parse import unquote_plus as compat_urllib_parse_unquote_plus
@@ -100,7 +114,7 @@ except ImportError: # Python 2
# Is it a string-like object?
string.split
return b''
- if isinstance(string, unicode):
+ if isinstance(string, compat_str):
string = string.encode('utf-8')
bits = string.split(b'%')
if len(bits) == 1:
@@ -151,9 +165,38 @@ except ImportError: # Python 2
return compat_urllib_parse_unquote(string, encoding, errors)
try:
- compat_str = unicode # Python 2
-except NameError:
- compat_str = str
+ from urllib.request import DataHandler as compat_urllib_request_DataHandler
+except ImportError: # Python < 3.4
+ # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
+ class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler):
+ def data_open(self, req):
+ # data URLs as specified in RFC 2397.
+ #
+ # ignores POSTed data
+ #
+ # syntax:
+ # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
+ # mediatype := [ type "/" subtype ] *( ";" parameter )
+ # data := *urlchar
+ # parameter := attribute "=" value
+ url = req.get_full_url()
+
+ scheme, data = url.split(":", 1)
+ mediatype, data = data.split(",", 1)
+
+ # even base64 encoded data URLs might be quoted so unquote in any case:
+ data = compat_urllib_parse_unquote_to_bytes(data)
+ if mediatype.endswith(";base64"):
+ data = binascii.a2b_base64(data)
+ mediatype = mediatype[:-7]
+
+ if not mediatype:
+ mediatype = "text/plain;charset=US-ASCII"
+
+ headers = email.message_from_string(
+ "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
+
+ return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
try:
compat_basestring = basestring # Python 2
@@ -170,6 +213,43 @@ try:
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
+if sys.version_info[0] >= 3:
+ compat_etree_fromstring = xml.etree.ElementTree.fromstring
+else:
+ # python 2.x tries to encode unicode strings with ascii (see the
+ # XMLParser._fixtext method)
+ etree = xml.etree.ElementTree
+
+ try:
+ _etree_iter = etree.Element.iter
+ except AttributeError: # Python <=2.6
+ def _etree_iter(root):
+ for el in root.findall('*'):
+ yield el
+ for sub in _etree_iter(el):
+ yield sub
+
+ # on 2.6 XML doesn't have a parser argument, function copied from CPython
+ # 2.7 source
+ def _XML(text, parser=None):
+ if not parser:
+ parser = etree.XMLParser(target=etree.TreeBuilder())
+ parser.feed(text)
+ return parser.close()
+
+ def _element_factory(*args, **kwargs):
+ el = etree.Element(*args, **kwargs)
+ for k, v in el.items():
+ if isinstance(v, bytes):
+ el.set(k, v.decode('utf-8'))
+ return el
+
+ def compat_etree_fromstring(text):
+ doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory)))
+ for el in _etree_iter(doc):
+ if el.text is not None and isinstance(el.text, bytes):
+ el.text = el.text.decode('utf-8')
+ return doc
try:
from urllib.parse import parse_qs as compat_parse_qs
@@ -234,7 +314,7 @@ else:
# Working around shlex issue with unicode strings on some python 2
# versions (see http://bugs.python.org/issue1548891)
def compat_shlex_split(s, comments=False, posix=True):
- if isinstance(s, unicode):
+ if isinstance(s, compat_str):
s = s.encode('utf-8')
return shlex.split(s, comments, posix)
@@ -416,26 +496,32 @@ if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
- def compat_get_terminal_size():
- columns = compat_getenv('COLUMNS', None)
+ def compat_get_terminal_size(fallback=(80, 24)):
+ columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
- lines = compat_getenv('LINES', None)
+ lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
- try:
- sp = subprocess.Popen(
- ['stty', 'size'],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = sp.communicate()
- lines, columns = map(int, out.split())
- except Exception:
- pass
+ if columns is None or lines is None or columns <= 0 or lines <= 0:
+ try:
+ sp = subprocess.Popen(
+ ['stty', 'size'],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = sp.communicate()
+ _lines, _columns = map(int, out.split())
+ except Exception:
+ _columns, _lines = _terminal_size(*fallback)
+
+ if columns is None or columns <= 0:
+ columns = _columns
+ if lines is None or lines <= 0:
+ lines = _lines
return _terminal_size(columns, lines)
try:
@@ -459,6 +545,7 @@ __all__ = [
'compat_chr',
'compat_cookiejar',
'compat_cookies',
+ 'compat_etree_fromstring',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
@@ -483,6 +570,8 @@ __all__ = [
'compat_urllib_parse_unquote_to_bytes',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
+ 'compat_urllib_request_DataHandler',
+ 'compat_urllib_response',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py
index 97e755d4b..beae8c4d0 100644
--- a/youtube_dl/downloader/common.py
+++ b/youtube_dl/downloader/common.py
@@ -5,9 +5,9 @@ import re
import sys
import time
-from ..compat import compat_str
from ..utils import (
encodeFilename,
+ error_to_compat_str,
decodeArgument,
format_bytes,
timeconvert,
@@ -42,7 +42,7 @@ class FileDownloader(object):
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
- (experimenatal)
+ (experimental)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
@@ -186,7 +186,7 @@ class FileDownloader(object):
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
- self.report_error('unable to rename file: %s' % compat_str(err))
+ self.report_error('unable to rename file: %s' % error_to_compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
@@ -325,7 +325,7 @@ class FileDownloader(object):
)
# Check file already present
- if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
+ if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py
index 8b6fa2753..535f2a7fc 100644
--- a/youtube_dl/downloader/dash.py
+++ b/youtube_dl/downloader/dash.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import FileDownloader
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
class DashSegmentsFD(FileDownloader):
@@ -22,7 +22,7 @@ class DashSegmentsFD(FileDownloader):
def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
- req = compat_urllib_request.Request(target_url)
+ req = sanitized_Request(target_url)
if remaining_bytes is not None:
req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py
index 174180db5..aaf0c49c8 100644
--- a/youtube_dl/downloader/f4m.py
+++ b/youtube_dl/downloader/f4m.py
@@ -5,15 +5,17 @@ import io
import itertools
import os
import time
-import xml.etree.ElementTree as etree
from .fragment import FragmentFD
from ..compat import (
+ compat_etree_fromstring,
compat_urlparse,
compat_urllib_error,
+ compat_urllib_parse_urlparse,
)
from ..utils import (
encodeFilename,
+ fix_xml_ampersands,
sanitize_open,
struct_pack,
struct_unpack,
@@ -285,9 +287,14 @@ class F4mFD(FragmentFD):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
- manifest = self.ydl.urlopen(man_url).read()
-
- doc = etree.fromstring(manifest)
+ urlh = self.ydl.urlopen(man_url)
+ man_url = urlh.geturl()
+ # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
+ # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
+ # and https://github.com/rg3/youtube-dl/issues/7823)
+ manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
+
+ doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
@@ -329,20 +336,25 @@ class F4mFD(FragmentFD):
if not live:
write_metadata_tag(dest_stream, metadata)
+ base_url_parsed = compat_urllib_parse_urlparse(base_url)
+
self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
- url = base_url + name
+ query = []
+ if base_url_parsed.query:
+ query.append(base_url_parsed.query)
if akamai_pv:
- url += '?' + akamai_pv.strip(';')
+ query.append(akamai_pv.strip(';'))
if info_dict.get('extra_param_to_segment_url'):
- url += info_dict.get('extra_param_to_segment_url')
+ query.append(info_dict['extra_param_to_segment_url'])
+ url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
- success = ctx['dl'].download(frag_filename, {'url': url})
+ success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
if not success:
return False
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
index 71aafdc73..b5a3e1167 100644
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -13,6 +13,7 @@ from ..utils import (
encodeArgument,
encodeFilename,
sanitize_open,
+ handle_youtubedl_headers,
)
@@ -28,10 +29,20 @@ class HlsFD(FileDownloader):
return False
ffpp.check_version()
- args = [
- encodeArgument(opt)
- for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
- args.append(encodeFilename(tmpfilename, True))
+ args = [ffpp.executable, '-y']
+
+ if info_dict['http_headers'] and re.match(r'^https?://', url):
+ # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
+ # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
+ headers = handle_youtubedl_headers(info_dict['http_headers'])
+ args += [
+ '-headers',
+ ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
+
+ args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc']
+
+ args = [encodeArgument(opt) for opt in args]
+ args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
self._debug_cmd(args)
@@ -92,6 +103,7 @@ class NativeHlsFD(FragmentFD):
return False
down, frag_sanitized = sanitize_open(frag_filename, 'rb')
ctx['dest_stream'].write(down.read())
+ down.close()
frags_filenames.append(frag_sanitized)
self._finish_frag_download(ctx)
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index a29f5cf31..56840e026 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -7,14 +7,12 @@ import time
import re
from .common import FileDownloader
-from ..compat import (
- compat_urllib_request,
- compat_urllib_error,
-)
+from ..compat import compat_urllib_error
from ..utils import (
ContentTooShortError,
encodeFilename,
sanitize_open,
+ sanitized_Request,
)
@@ -29,8 +27,8 @@ class HttpFD(FileDownloader):
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
- basic_request = compat_urllib_request.Request(url, None, headers)
- request = compat_urllib_request.Request(url, None, headers)
+ basic_request = sanitized_Request(url, None, headers)
+ request = sanitized_Request(url, None, headers)
is_test = self.params.get('test', False)
diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
index 7d19bb808..14d56db47 100644
--- a/youtube_dl/downloader/rtmp.py
+++ b/youtube_dl/downloader/rtmp.py
@@ -105,7 +105,7 @@ class RtmpFD(FileDownloader):
protocol = info_dict.get('rtmp_protocol', None)
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
- continue_dl = info_dict.get('continuedl', True)
+ continue_dl = self.params.get('continuedl', True)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
@@ -117,7 +117,7 @@ class RtmpFD(FileDownloader):
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
- # the connection was interrumpted and resuming appears to be
+ # the connection was interrupted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = [
'rtmpdump', '--verbose', '-r', url,
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index 661b53e63..908581bf7 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -3,9 +3,15 @@ from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
+from .acast import (
+ ACastIE,
+ ACastChannelIE,
+)
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
+ AdobeTVShowIE,
+ AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
@@ -38,6 +44,7 @@ from .arte import (
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
+from .audimedia import AudiMediaIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
@@ -45,6 +52,7 @@ from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
+ BBCCoUkArticleIE,
BBCIE,
)
from .beeg import BeegIE
@@ -59,7 +67,10 @@ from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
-from .brightcove import BrightcoveIE
+from .brightcove import (
+ BrightcoveLegacyIE,
+ BrightcoveNewIE,
+)
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
@@ -75,6 +86,7 @@ from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
+from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
@@ -87,6 +99,7 @@ from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
+from .clyp import ClypIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
@@ -120,10 +133,12 @@ from .dbtv import DBTVIE
from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
+from .democracynow import DemocracynowIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
+from .dplay import DPlayIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
@@ -137,7 +152,6 @@ from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
-from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
@@ -158,6 +172,7 @@ from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
+from .europa import EuropaIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
@@ -165,14 +180,12 @@ from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
+from .fczenit import FczenitIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
-from .fktv import (
- FKTVIE,
- FKTVPosteckeIE,
-)
+from .fktv import FKTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
@@ -192,7 +205,9 @@ from .francetv import (
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
+from .funimation import FunimationIE
from .funnyordie import FunnyOrDieIE
+from .gameinformer import GameInformerIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
@@ -209,14 +224,17 @@ from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
-from .globo import GloboIE
+from .globo import (
+ GloboIE,
+ GloboArticleIE,
+)
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
-from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
+from .gputechconf import GPUTechConfIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
@@ -228,7 +246,6 @@ from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
-from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
@@ -298,6 +315,11 @@ from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
+from .limelight import (
+ LimelightMediaIE,
+ LimelightChannelIE,
+ LimelightChannelListIE,
+)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
@@ -315,7 +337,6 @@ from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
-from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
@@ -337,7 +358,6 @@ from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
-from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
@@ -346,7 +366,6 @@ from .mtv import (
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
-from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
@@ -367,6 +386,9 @@ from .nbc import (
from .ndr import (
NDRIE,
NJoyIE,
+ NDREmbedBaseIE,
+ NDREmbedIE,
+ NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
@@ -401,10 +423,22 @@ from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
-from .novamov import NovaMovIE
-from .nowness import NownessIE
-from .nowtv import NowTVIE
-from .nowvideo import NowVideoIE
+from .novamov import (
+ NovaMovIE,
+ WholeCloudIE,
+ NowVideoIE,
+ VideoWeedIE,
+ CloudTimeIE,
+)
+from .nowness import (
+ NownessIE,
+ NownessPlaylistIE,
+ NownessSeriesIE,
+)
+from .nowtv import (
+ NowTVIE,
+ NowTVListIE,
+)
from .npo import (
NPOIE,
NPOLiveIE,
@@ -432,7 +466,6 @@ from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
-from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
@@ -442,10 +475,7 @@ from .orf import (
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
-from .periscope import (
- PeriscopeIE,
- QuickscopeIE,
-)
+from .periscope import PeriscopeIE
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
@@ -537,6 +567,10 @@ from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
+from .skynewsarabia import (
+ SkyNewsArabiaIE,
+ SkyNewsArabiaArticleIE,
+)
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
@@ -559,7 +593,8 @@ from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
- SoundcloudPlaylistIE
+ SoundcloudPlaylistIE,
+ SoundcloudSearchIE
)
from .soundgasm import (
SoundgasmIE,
@@ -578,6 +613,7 @@ from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
+from .stitcher import StitcherIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
@@ -640,6 +676,7 @@ from .tnaflix import (
EMPFlixIE,
MovieFapIE,
)
+from .toggle import ToggleIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
@@ -683,7 +720,7 @@ from .twitch import (
TwitchBookmarksIE,
TwitchStreamIE,
)
-from .twitter import TwitterCardIE
+from .twitter import TwitterCardIE, TwitterIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
@@ -710,16 +747,15 @@ from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
-from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
-from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
+from .viidea import ViideaIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
@@ -772,6 +808,7 @@ from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
+from .xfileshare import XFileShareIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
@@ -815,6 +852,7 @@ from .youtube import (
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
+ YoutubePlaylistsIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py
index f9a389f67..c0e5d1abf 100644
--- a/youtube_dl/extractor/abc.py
+++ b/youtube_dl/extractor/abc.py
@@ -12,7 +12,7 @@ from ..utils import (
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
- _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
+ _VALID_URL = r'http://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
@@ -36,6 +36,18 @@ class ABCIE(InfoExtractor):
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
+ }, {
+ 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
+ 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
+ 'info_dict': {
+ 'id': '6880080',
+ 'ext': 'mp3',
+ 'title': 'NAB lifts interest rates, following Westpac and CBA',
+ 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
+ },
+ }, {
+ 'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -43,7 +55,7 @@ class ABCIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
mobj = re.search(
- r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
+ r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
raise ExtractorError('Unable to extract video urls')
@@ -60,11 +72,13 @@ class ABCIE(InfoExtractor):
formats = [{
'url': url_info['url'],
+ 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
'width': int_or_none(url_info.get('width')),
'height': int_or_none(url_info.get('height')),
'tbr': int_or_none(url_info.get('bitrate')),
'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
+
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/acast.py b/youtube_dl/extractor/acast.py
new file mode 100644
index 000000000..be7913bc7
--- /dev/null
+++ b/youtube_dl/extractor/acast.py
@@ -0,0 +1,70 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import int_or_none
+
+
+class ACastBaseIE(InfoExtractor):
+ _API_BASE_URL = 'https://www.acast.com/api/'
+
+
+class ACastIE(ACastBaseIE):
+ IE_NAME = 'acast'
+ _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<channel>[^/]+)/(?P<id>[^/#?]+)'
+ _TEST = {
+ 'url': 'https://www.acast.com/condenasttraveler/-where-are-you-taipei-101-taiwan',
+ 'md5': 'ada3de5a1e3a2a381327d749854788bb',
+ 'info_dict': {
+ 'id': '57de3baa-4bb0-487e-9418-2692c1277a34',
+ 'ext': 'mp3',
+ 'title': '"Where Are You?": Taipei 101, Taiwan',
+ 'timestamp': 1196172000000,
+ 'description': 'md5:0c5d8201dfea2b93218ea986c91eee6e',
+ 'duration': 211,
+ }
+ }
+
+ def _real_extract(self, url):
+ channel, display_id = re.match(self._VALID_URL, url).groups()
+ cast_data = self._download_json(self._API_BASE_URL + 'channels/%s/acasts/%s/playback' % (channel, display_id), display_id)
+
+ return {
+ 'id': compat_str(cast_data['id']),
+ 'display_id': display_id,
+ 'url': cast_data['blings'][0]['audio'],
+ 'title': cast_data['name'],
+ 'description': cast_data.get('description'),
+ 'thumbnail': cast_data.get('image'),
+ 'timestamp': int_or_none(cast_data.get('publishingDate')),
+ 'duration': int_or_none(cast_data.get('duration')),
+ }
+
+
+class ACastChannelIE(ACastBaseIE):
+ IE_NAME = 'acast:channel'
+ _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<id>[^/#?]+)'
+ _TEST = {
+ 'url': 'https://www.acast.com/condenasttraveler',
+ 'info_dict': {
+ 'id': '50544219-29bb-499e-a083-6087f4cb7797',
+ 'title': 'Condé Nast Traveler Podcast',
+ 'description': 'md5:98646dee22a5b386626ae31866638fbd',
+ },
+ 'playlist_mincount': 20,
+ }
+
+ @classmethod
+ def suitable(cls, url):
+ return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ channel_data = self._download_json(self._API_BASE_URL + 'channels/%s' % display_id, display_id)
+ casts = self._download_json(self._API_BASE_URL + 'channels/%s/acasts' % display_id, display_id)
+ entries = [self.url_result('https://www.acast.com/%s/%s' % (display_id, cast['url']), 'ACast') for cast in casts]
+
+ return self.playlist_result(entries, compat_str(channel_data['id']), channel_data['name'], channel_data.get('description'))
diff --git a/youtube_dl/extractor/adobetv.py b/youtube_dl/extractor/adobetv.py
index 5e43adc51..8753ee2cf 100644
--- a/youtube_dl/extractor/adobetv.py
+++ b/youtube_dl/extractor/adobetv.py
@@ -1,23 +1,32 @@
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
+from ..compat import compat_str
from ..utils import (
parse_duration,
unified_strdate,
str_to_int,
+ int_or_none,
float_or_none,
ISO639Utils,
+ determine_ext,
)
-class AdobeTVIE(InfoExtractor):
- _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)'
+class AdobeTVBaseIE(InfoExtractor):
+ _API_BASE_URL = 'http://tv.adobe.com/api/v4/'
+
+
+class AdobeTVIE(AdobeTVBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/',
'md5': '9bc5727bcdd55251f35ad311ca74fa1e',
'info_dict': {
- 'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop',
+ 'id': '10981',
'ext': 'mp4',
'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop',
'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311',
@@ -29,50 +38,106 @@ class AdobeTVIE(InfoExtractor):
}
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- player = self._parse_json(
- self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'),
- video_id)
+ language, show_urlname, urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
- title = player.get('title') or self._search_regex(
- r'data-title="([^"]+)"', webpage, 'title')
- description = self._og_search_description(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
-
- upload_date = unified_strdate(
- self._html_search_meta('datepublished', webpage, 'upload date'))
-
- duration = parse_duration(
- self._html_search_meta('duration', webpage, 'duration') or
- self._search_regex(
- r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
- webpage, 'duration', fatal=False))
-
- view_count = str_to_int(self._search_regex(
- r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
- webpage, 'view count'))
+ video_data = self._download_json(
+ self._API_BASE_URL + 'episode/get/?language=%s&show_urlname=%s&urlname=%s&disclosure=standard' % (language, show_urlname, urlname),
+ urlname)['data'][0]
formats = [{
- 'url': source['src'],
- 'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None,
- 'tbr': source.get('bitrate'),
- } for source in player['sources']]
+ 'url': source['url'],
+ 'format_id': source.get('quality_level') or source['url'].split('-')[-1].split('.')[0] or None,
+ 'width': int_or_none(source.get('width')),
+ 'height': int_or_none(source.get('height')),
+ 'tbr': int_or_none(source.get('video_data_rate')),
+ } for source in video_data['videos']]
self._sort_formats(formats)
return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'upload_date': upload_date,
- 'duration': duration,
- 'view_count': view_count,
+ 'id': compat_str(video_data['id']),
+ 'title': video_data['title'],
+ 'description': video_data.get('description'),
+ 'thumbnail': video_data.get('thumbnail'),
+ 'upload_date': unified_strdate(video_data.get('start_date')),
+ 'duration': parse_duration(video_data.get('duration')),
+ 'view_count': str_to_int(video_data.get('playcount')),
'formats': formats,
}
+class AdobeTVPlaylistBaseIE(AdobeTVBaseIE):
+ def _parse_page_data(self, page_data):
+ return [self.url_result(self._get_element_url(element_data)) for element_data in page_data]
+
+ def _extract_playlist_entries(self, url, display_id):
+ page = self._download_json(url, display_id)
+ entries = self._parse_page_data(page['data'])
+ for page_num in range(2, page['paging']['pages'] + 1):
+ entries.extend(self._parse_page_data(
+ self._download_json(url + '&page=%d' % page_num, display_id)['data']))
+ return entries
+
+
+class AdobeTVShowIE(AdobeTVPlaylistBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)'
+
+ _TEST = {
+ 'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost',
+ 'info_dict': {
+ 'id': '36',
+ 'title': 'The Complete Picture with Julieanne Kost',
+ 'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27',
+ },
+ 'playlist_mincount': 136,
+ }
+
+ def _get_element_url(self, element_data):
+ return element_data['urls'][0]
+
+ def _real_extract(self, url):
+ language, show_urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
+ query = 'language=%s&show_urlname=%s' % (language, show_urlname)
+
+ show_data = self._download_json(self._API_BASE_URL + 'show/get/?%s' % query, show_urlname)['data'][0]
+
+ return self.playlist_result(
+ self._extract_playlist_entries(self._API_BASE_URL + 'episode/?%s' % query, show_urlname),
+ compat_str(show_data['id']),
+ show_data['show_name'],
+ show_data['show_description'])
+
+
+class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
+ _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?'
+
+ _TEST = {
+ 'url': 'http://tv.adobe.com/channel/development',
+ 'info_dict': {
+ 'id': 'development',
+ },
+ 'playlist_mincount': 96,
+ }
+
+ def _get_element_url(self, element_data):
+ return element_data['url']
+
+ def _real_extract(self, url):
+ language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups()
+ if not language:
+ language = 'en'
+ query = 'language=%s&channel_urlname=%s' % (language, channel_urlname)
+ if category_urlname:
+ query += '&category_urlname=%s' % category_urlname
+
+ return self.playlist_result(
+ self._extract_playlist_entries(self._API_BASE_URL + 'show/?%s' % query, channel_urlname),
+ channel_urlname)
+
+
class AdobeTVVideoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
@@ -91,28 +156,25 @@ class AdobeTVVideoIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
-
- webpage = self._download_webpage(url, video_id)
-
- player_params = self._parse_json(self._search_regex(
- r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'),
- video_id)
+ video_data = self._download_json(url + '?format=json', video_id)
formats = [{
+ 'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')),
'url': source['src'],
- 'width': source.get('width'),
- 'height': source.get('height'),
- 'tbr': source.get('bitrate'),
- } for source in player_params['sources']]
+ 'width': int_or_none(source.get('width')),
+ 'height': int_or_none(source.get('height')),
+ 'tbr': int_or_none(source.get('bitrate')),
+ } for source in video_data['sources']]
+ self._sort_formats(formats)
# For both metadata and downloaded files the duration varies among
# formats. I just pick the max one
duration = max(filter(None, [
float_or_none(source.get('duration'), scale=1000)
- for source in player_params['sources']]))
+ for source in video_data['sources']]))
subtitles = {}
- for translation in player_params.get('translations', []):
+ for translation in video_data.get('translations', []):
lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium'])
if lang_id not in subtitles:
subtitles[lang_id] = []
@@ -124,8 +186,9 @@ class AdobeTVVideoIE(InfoExtractor):
return {
'id': video_id,
'formats': formats,
- 'title': player_params['title'],
- 'description': self._og_search_description(webpage),
+ 'title': video_data['title'],
+ 'description': video_data.get('description'),
+ 'thumbnail': video_data['video'].get('poster'),
'duration': duration,
'subtitles': subtitles,
}
diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py
index 4327c2f61..3ae618e71 100644
--- a/youtube_dl/extractor/adultswim.py
+++ b/youtube_dl/extractor/adultswim.py
@@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import (
+ determine_ext,
ExtractorError,
float_or_none,
xpath_text,
@@ -40,7 +41,8 @@ class AdultSwimIE(InfoExtractor):
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
'title': 'Rick and Morty - Pilot',
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
- }
+ },
+ 'skip': 'This video is only available for registered users',
}, {
'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/',
'playlist': [
@@ -123,7 +125,6 @@ class AdultSwimIE(InfoExtractor):
else:
collections = bootstrapped_data['show']['collections']
collection, video_info = self.find_collection_containing_video(collections, episode_path)
-
# Video wasn't found in the collections, let's try `slugged_video`.
if video_info is None:
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
@@ -133,7 +134,15 @@ class AdultSwimIE(InfoExtractor):
show = bootstrapped_data['show']
show_title = show['title']
- segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
+ stream = video_info.get('stream')
+ clips = [stream] if stream else video_info.get('clips')
+ if not clips:
+ raise ExtractorError(
+ 'This video is only available via cable service provider subscription that'
+ ' is not currently supported. You may want to use --cookies.'
+ if video_info.get('auth') is True else 'Unable to find stream or clips',
+ expected=True)
+ segment_ids = [clip['videoPlaybackID'] for clip in clips]
episode_id = video_info['id']
episode_title = video_info['title']
@@ -142,7 +151,7 @@ class AdultSwimIE(InfoExtractor):
entries = []
for part_num, segment_id in enumerate(segment_ids):
- segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id
+ segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id
segment_title = '%s - %s' % (show_title, episode_title)
if len(segment_ids) > 1:
@@ -158,17 +167,30 @@ class AdultSwimIE(InfoExtractor):
formats = []
file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
+ unique_urls = []
+ unique_file_els = []
for file_el in file_els:
+ media_url = file_el.text
+ if not media_url or determine_ext(media_url) == 'f4m':
+ continue
+ if file_el.text not in unique_urls:
+ unique_urls.append(file_el.text)
+ unique_file_els.append(file_el)
+
+ for file_el in unique_file_els:
bitrate = file_el.attrib.get('bitrate')
ftype = file_el.attrib.get('type')
-
- formats.append({
- 'format_id': '%s_%s' % (bitrate, ftype),
- 'url': file_el.text.strip(),
- # The bitrate may not be a number (for example: 'iphone')
- 'tbr': int(bitrate) if bitrate.isdigit() else None,
- 'quality': 1 if ftype == 'hd' else -1
- })
+ media_url = file_el.text
+ if determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, segment_title, 'mp4', preference=0, m3u8_id='hls'))
+ else:
+ formats.append({
+ 'format_id': '%s_%s' % (bitrate, ftype),
+ 'url': file_el.text.strip(),
+ # The bitrate may not be a number (for example: 'iphone')
+ 'tbr': int(bitrate) if bitrate.isdigit() else None,
+ })
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/aljazeera.py b/youtube_dl/extractor/aljazeera.py
index 184a14a4f..5b2c0dc9a 100644
--- a/youtube_dl/extractor/aljazeera.py
+++ b/youtube_dl/extractor/aljazeera.py
@@ -15,7 +15,7 @@ class AlJazeeraIE(InfoExtractor):
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
'uploader': 'Al Jazeera English',
},
- 'add_ie': ['Brightcove'],
+ 'add_ie': ['BrightcoveLegacy'],
'skip': 'Not accessible from Travis CI server',
}
@@ -32,5 +32,5 @@ class AlJazeeraIE(InfoExtractor):
'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc'
'&%40videoPlayer={0}'.format(brightcove_id)
),
- 'ie_key': 'Brightcove',
+ 'ie_key': 'BrightcoveLegacy',
}
diff --git a/youtube_dl/extractor/anitube.py b/youtube_dl/extractor/anitube.py
index 31f0d417c..23f942ae2 100644
--- a/youtube_dl/extractor/anitube.py
+++ b/youtube_dl/extractor/anitube.py
@@ -26,8 +26,8 @@ class AnitubeIE(InfoExtractor):
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
- key = self._html_search_regex(
- r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
+ key = self._search_regex(
+ r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key')
config_xml = self._download_xml(
'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py
index 576f03b5b..f68dc3236 100644
--- a/youtube_dl/extractor/appletrailers.py
+++ b/youtube_dl/extractor/appletrailers.py
@@ -13,53 +13,53 @@ from ..utils import (
class AppleTrailersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TESTS = [{
- "url": "http://trailers.apple.com/trailers/wb/manofsteel/",
+ 'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
'info_dict': {
'id': 'manofsteel',
},
- "playlist": [
+ 'playlist': [
{
- "md5": "d97a8e575432dbcb81b7c3acb741f8a8",
- "info_dict": {
- "id": "manofsteel-trailer4",
- "ext": "mov",
- "duration": 111,
- "title": "Trailer 4",
- "upload_date": "20130523",
- "uploader_id": "wb",
+ 'md5': 'd97a8e575432dbcb81b7c3acb741f8a8',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer4',
+ 'ext': 'mov',
+ 'duration': 111,
+ 'title': 'Trailer 4',
+ 'upload_date': '20130523',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "b8017b7131b721fb4e8d6f49e1df908c",
- "info_dict": {
- "id": "manofsteel-trailer3",
- "ext": "mov",
- "duration": 182,
- "title": "Trailer 3",
- "upload_date": "20130417",
- "uploader_id": "wb",
+ 'md5': 'b8017b7131b721fb4e8d6f49e1df908c',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer3',
+ 'ext': 'mov',
+ 'duration': 182,
+ 'title': 'Trailer 3',
+ 'upload_date': '20130417',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "d0f1e1150989b9924679b441f3404d48",
- "info_dict": {
- "id": "manofsteel-trailer",
- "ext": "mov",
- "duration": 148,
- "title": "Trailer",
- "upload_date": "20121212",
- "uploader_id": "wb",
+ 'md5': 'd0f1e1150989b9924679b441f3404d48',
+ 'info_dict': {
+ 'id': 'manofsteel-trailer',
+ 'ext': 'mov',
+ 'duration': 148,
+ 'title': 'Trailer',
+ 'upload_date': '20121212',
+ 'uploader_id': 'wb',
},
},
{
- "md5": "5fe08795b943eb2e757fa95cb6def1cb",
- "info_dict": {
- "id": "manofsteel-teaser",
- "ext": "mov",
- "duration": 93,
- "title": "Teaser",
- "upload_date": "20120721",
- "uploader_id": "wb",
+ 'md5': '5fe08795b943eb2e757fa95cb6def1cb',
+ 'info_dict': {
+ 'id': 'manofsteel-teaser',
+ 'ext': 'mov',
+ 'duration': 93,
+ 'title': 'Teaser',
+ 'upload_date': '20120721',
+ 'uploader_id': 'wb',
},
},
]
diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index 6f465789b..73be6d204 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -14,8 +14,8 @@ from ..utils import (
parse_duration,
unified_strdate,
xpath_text,
- parse_xml,
)
+from ..compat import compat_etree_fromstring
class ARDMediathekIE(InfoExtractor):
@@ -161,7 +161,7 @@ class ARDMediathekIE(InfoExtractor):
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
- doc = parse_xml(webpage)
+ doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
index 76de24477..2a00da3ee 100644
--- a/youtube_dl/extractor/arte.py
+++ b/youtube_dl/extractor/arte.py
@@ -4,6 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..compat import (
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+)
from ..utils import (
find_xpath_attr,
unified_strdate,
@@ -77,7 +81,13 @@ class ArteTVPlus7IE(InfoExtractor):
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
- webpage, 'json vp url')
+ webpage, 'json vp url', default=None)
+ if not json_url:
+ iframe_url = self._html_search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1',
+ webpage, 'iframe url', group='url')
+ json_url = compat_parse_qs(
+ compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
diff --git a/youtube_dl/extractor/atresplayer.py b/youtube_dl/extractor/atresplayer.py
index 29f8795d3..50e47ba0a 100644
--- a/youtube_dl/extractor/atresplayer.py
+++ b/youtube_dl/extractor/atresplayer.py
@@ -7,11 +7,11 @@ from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
)
from ..utils import (
int_or_none,
float_or_none,
+ sanitized_Request,
xpath_text,
ExtractorError,
)
@@ -63,7 +63,7 @@ class AtresPlayerIE(InfoExtractor):
'j_password': password,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
@@ -94,7 +94,7 @@ class AtresPlayerIE(InfoExtractor):
formats = []
for fmt in ['windows', 'android_tablet']:
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token))
request.add_header('User-Agent', self._USER_AGENT)
diff --git a/youtube_dl/extractor/audimedia.py b/youtube_dl/extractor/audimedia.py
new file mode 100644
index 000000000..b0b089dee
--- /dev/null
+++ b/youtube_dl/extractor/audimedia.py
@@ -0,0 +1,80 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
+)
+
+
+class AudiMediaIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)'
+ _TEST = {
+ 'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test',
+ 'md5': '79a8b71c46d49042609795ab59779b66',
+ 'info_dict': {
+ 'id': '1564',
+ 'ext': 'mp4',
+ 'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
+ 'description': 'md5:60e5d30a78ced725f7b8d34370762941',
+ 'upload_date': '20151124',
+ 'timestamp': 1448354940,
+ 'duration': 74022,
+ 'view_count': int,
+ }
+ }
+ # extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
+ _AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload')
+ _, stage_mode, video_id, lang = raw_payload.split('-')
+
+ # TODO: handle s and e stage_mode (live streams and ended live streams)
+ if stage_mode not in ('s', 'e'):
+ request = sanitized_Request(
+ 'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
+ headers={'X-Auth-Token': self._AUTH_TOKEN})
+ json_data = self._download_json(request, video_id)['results']
+ formats = []
+
+ stream_url_hls = json_data.get('stream_url_hls')
+ if stream_url_hls:
+ m3u8_formats = self._extract_m3u8_formats(stream_url_hls, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+
+ stream_url_hds = json_data.get('stream_url_hds')
+ if stream_url_hds:
+ f4m_formats = self._extract_f4m_formats(json_data.get('stream_url_hds') + '?hdcore=3.4.0', video_id, -1, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+
+ for video_version in json_data.get('video_versions'):
+ video_version_url = video_version.get('download_url') or video_version.get('stream_url')
+ if not video_version_url:
+ continue
+ formats.append({
+ 'url': video_version_url,
+ 'width': int_or_none(video_version.get('width')),
+ 'height': int_or_none(video_version.get('height')),
+ 'abr': int_or_none(video_version.get('audio_bitrate')),
+ 'vbr': int_or_none(video_version.get('video_bitrate')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': json_data['title'],
+ 'description': json_data.get('subtitle'),
+ 'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
+ 'timestamp': parse_iso8601(json_data.get('publication_date')),
+ 'duration': int_or_none(json_data.get('duration')),
+ 'view_count': int_or_none(json_data.get('view_count')),
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py
index 8dff1d6e3..da986e063 100644
--- a/youtube_dl/extractor/bambuser.py
+++ b/youtube_dl/extractor/bambuser.py
@@ -6,13 +6,13 @@ import itertools
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_request,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
+ sanitized_Request,
)
@@ -57,7 +57,7 @@ class BambuserIE(InfoExtractor):
'pass': password,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
@@ -126,7 +126,7 @@ class BambuserChannelIE(InfoExtractor):
'&sort=created&access_mode=0%2C1%2C2&limit={count}'
'&method=broadcast&format=json&vid_older_than={last}'
).format(user=user, count=self._STEP, last=last_id)
- req = compat_urllib_request.Request(req_url)
+ req = sanitized_Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
data = self._download_json(
diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
index 505877b77..c1ef8051d 100644
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -10,6 +10,8 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ float_or_none,
+ int_or_none,
)
@@ -52,11 +54,11 @@ class BandcampIE(InfoExtractor):
ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
- 'url': format_url,
+ 'url': self._proto_relative_url(format_url, 'http:'),
'ext': ext,
'vcodec': 'none',
'acodec': ext,
- 'abr': int(abr_str),
+ 'abr': int_or_none(abr_str),
})
self._sort_formats(formats)
@@ -65,7 +67,7 @@ class BandcampIE(InfoExtractor):
'id': compat_str(data['id']),
'title': data['title'],
'formats': formats,
- 'duration': float(data['duration']),
+ 'duration': float_or_none(data.get('duration')),
}
else:
raise ExtractorError('No free songs found')
@@ -93,8 +95,8 @@ class BandcampIE(InfoExtractor):
final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url')
# If we could correctly generate the .rand field the url would be
# in the "download_url" key
- final_url = self._search_regex(
- r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL')
+ final_url = self._proto_relative_url(self._search_regex(
+ r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:')
return {
'id': video_id,
diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index abc5a44a1..691aecc0d 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -2,7 +2,6 @@
from __future__ import unicode_literals
import re
-import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
@@ -11,28 +10,45 @@ from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
+ remove_end,
+ unescapeHTML,
+)
+from ..compat import (
+ compat_etree_fromstring,
+ compat_HTTPError,
)
-from ..compat import compat_HTTPError
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
- _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
+ _ID_REGEX = r'[pb][\da-z]{7}'
+ _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>%s)' % _ID_REGEX
_MEDIASELECTOR_URLS = [
+ # Provides HQ HLS streams with even better quality that pc mediaset but fails
+ # with geolocation in some cases when it's even not geo restricted at all (e.g.
+ # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable.
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
]
+ _MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection'
+ _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist'
+
+ _NAMESPACES = (
+ _MEDIASELECTION_NS,
+ _EMP_PLAYLIST_NS,
+ )
+
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
- 'title': 'Kaleidoscope, Leonard Cohen',
+ 'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
- 'duration': 1740,
},
'params': {
# rtmp download
@@ -95,7 +111,8 @@ class BBCCoUkIE(InfoExtractor):
'params': {
# rtmp download
'skip_download': True,
- }
+ },
+ 'skip': 'Episode is no longer available on BBC iPlayer Radio',
}, {
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
'note': 'Audio',
@@ -153,6 +170,21 @@ class BBCCoUkIE(InfoExtractor):
},
'skip': 'geolocation',
}, {
+ # iptv-all mediaset fails with geolocation however there is no geo restriction
+ # for this programme at all
+ 'url': 'http://www.bbc.co.uk/programmes/b06bp7lf',
+ 'info_dict': {
+ 'id': 'b06bp7kf',
+ 'ext': 'flv',
+ 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie",
+ 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.',
+ 'duration': 10800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
@@ -174,6 +206,7 @@ class BBCCoUkIE(InfoExtractor):
def _extract_connection(self, connection, programme_id):
formats = []
+ kind = connection.get('kind')
protocol = connection.get('protocol')
supplier = connection.get('supplier')
if protocol == 'http':
@@ -189,11 +222,17 @@ class BBCCoUkIE(InfoExtractor):
# Skip DASH until supported
elif transfer_format == 'dash':
pass
+ elif transfer_format == 'hls':
+ m3u8_formats = self._extract_m3u8_formats(
+ href, programme_id, ext='mp4', entry_protocol='m3u8_native',
+ m3u8_id=supplier, fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
# Direct link
else:
formats.append({
'url': href,
- 'format_id': supplier,
+ 'format_id': supplier or kind or protocol,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
@@ -213,16 +252,24 @@ class BBCCoUkIE(InfoExtractor):
return formats
def _extract_items(self, playlist):
- return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
+ return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS)
+
+ def _findall_ns(self, element, xpath):
+ elements = []
+ for ns in self._NAMESPACES:
+ elements.extend(element.findall(xpath % ns))
+ return elements
def _extract_medias(self, media_selection):
- error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
+ error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS)
+ if error is None:
+ media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS)
if error is not None:
raise BBCCoUkIE.MediaSelectionError(error.get('id'))
- return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
+ return self._findall_ns(media_selection, './{%s}media')
def _extract_connections(self, media):
- return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
+ return self._findall_ns(media, './{%s}connection')
def _extract_video(self, media, programme_id):
formats = []
@@ -236,13 +283,14 @@ class BBCCoUkIE(InfoExtractor):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
- 'format_id': '%s_%s' % (service, format['format_id']),
'width': width,
'height': height,
'vbr': vbr,
'vcodec': vcodec,
'filesize': file_size,
})
+ if service:
+ format['format_id'] = '%s_%s' % (service, format['format_id'])
formats.extend(conn_formats)
return formats
@@ -287,7 +335,7 @@ class BBCCoUkIE(InfoExtractor):
return self._download_media_selector_url(
mediaselector_url % programme_id, programme_id)
except BBCCoUkIE.MediaSelectionError as e:
- if e.id == 'notukerror':
+ if e.id in ('notukerror', 'geolocation', 'selectionunavailable'):
last_exception = e
continue
self._raise_extractor_error(e)
@@ -298,8 +346,8 @@ class BBCCoUkIE(InfoExtractor):
media_selection = self._download_xml(
url, programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
- if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
- media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
+ if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404):
+ media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8'))
else:
raise
return self._process_media_selector(media_selection, programme_id)
@@ -357,7 +405,7 @@ class BBCCoUkIE(InfoExtractor):
url, playlist_id, 'Downloading legacy playlist XML')
def _extract_from_legacy_playlist(self, playlist, playlist_id):
- no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
+ no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS)
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
@@ -374,8 +422,9 @@ class BBCCoUkIE(InfoExtractor):
kind = item.get('kind')
if kind != 'programme' and kind != 'radioProgramme':
continue
- title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
- description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+ title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text
+ description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS)
+ description = description_el.text if description_el is not None else None
def get_programme_id(item):
def get_from_attributes(item):
@@ -384,16 +433,18 @@ class BBCCoUkIE(InfoExtractor):
if value and re.match(r'^[pb][\da-z]{7}$', value):
return value
get_from_attributes(item)
- mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator')
+ mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS)
if mediator is not None:
return get_from_attributes(mediator)
programme_id = get_programme_id(item)
duration = int_or_none(item.get('duration'))
- # TODO: programme_id can be None and media items can be incorporated right inside
- # playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
- # as f4m and m3u8
- formats, subtitles = self._download_media_selector(programme_id)
+
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ else:
+ formats, subtitles = self._process_media_selector(item, playlist_id)
+ programme_id = playlist_id
return programme_id, title, description, duration, formats, subtitles
@@ -403,6 +454,7 @@ class BBCCoUkIE(InfoExtractor):
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
+ duration = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
@@ -415,14 +467,16 @@ class BBCCoUkIE(InfoExtractor):
if not programme_id:
programme_id = self._search_regex(
- r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
+ r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage)
description = self._search_regex(
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
- webpage, 'description', fatal=False)
+ webpage, 'description', default=None)
+ if not description:
+ description = self._html_search_meta('description', webpage)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
@@ -445,6 +499,9 @@ class BBCIE(BBCCoUkIE):
_VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
_MEDIASELECTOR_URLS = [
+ # Provides HQ HLS streams but fails with geolocation in some cases when it's
+ # even not geo restricted at all
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s',
# Provides more formats, namely direct mp4 links, but fails on some videos with
# notukerror for non UK (?) users (e.g.
# http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
@@ -454,8 +511,7 @@ class BBCIE(BBCCoUkIE):
]
_TESTS = [{
- # article with multiple videos embedded with data-media-meta containing
- # playlist.sxml, externalId and no direct video links
+ # article with multiple videos embedded with data-playable containing vpids
'url': 'http://www.bbc.com/news/world-europe-32668511',
'info_dict': {
'id': 'world-europe-32668511',
@@ -464,7 +520,7 @@ class BBCIE(BBCCoUkIE):
},
'playlist_count': 2,
}, {
- # article with multiple videos embedded with data-media-meta (more videos)
+ # article with multiple videos embedded with data-playable (more videos)
'url': 'http://www.bbc.com/news/business-28299555',
'info_dict': {
'id': 'business-28299555',
@@ -475,6 +531,7 @@ class BBCIE(BBCCoUkIE):
'skip': 'Save time',
}, {
# article with multiple videos embedded with `new SMP()`
+ # broken
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
'info_dict': {
'id': '3662a707-0af9-3149-963f-47bea720b460',
@@ -482,12 +539,13 @@ class BBCIE(BBCCoUkIE):
},
'playlist_count': 18,
}, {
- # single video embedded with mediaAssetPage.init()
+ # single video embedded with data-playable containing vpid
'url': 'http://www.bbc.com/news/world-europe-32041533',
'info_dict': {
'id': 'p02mprgb',
'ext': 'mp4',
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
+ 'description': 'md5:2868290467291b37feda7863f7a83f54',
'duration': 47,
'timestamp': 1427219242,
'upload_date': '20150324',
@@ -497,15 +555,14 @@ class BBCIE(BBCCoUkIE):
'skip_download': True,
}
}, {
- # article with single video embedded with data-media-meta containing
- # direct video links (for now these are extracted) and playlist.xml (with
- # media items as f4m and m3u8 - currently unsupported)
+ # article with single video embedded with data-playable containing XML playlist
+ # with direct video links as progressiveDownloadUrl (for now these are extracted)
+ # and playlist with f4m and m3u8 as streamingUrl
'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
'info_dict': {
'id': '150615_telabyad_kentin_cogu',
'ext': 'mp4',
'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
- 'duration': 47,
'timestamp': 1434397334,
'upload_date': '20150615',
},
@@ -513,13 +570,12 @@ class BBCIE(BBCCoUkIE):
'skip_download': True,
}
}, {
- # single video embedded with mediaAssetPage.init() (regional section)
+ # single video embedded with data-playable containing XML playlists (regional section)
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
'info_dict': {
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
'ext': 'mp4',
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
- 'duration': 87,
'timestamp': 1434713142,
'upload_date': '20150619',
},
@@ -534,6 +590,7 @@ class BBCIE(BBCCoUkIE):
'ext': 'mp4',
'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
'duration': 56,
+ 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
},
'params': {
'skip_download': True,
@@ -561,21 +618,21 @@ class BBCIE(BBCCoUkIE):
'ext': 'mp4',
'title': 'Hyundai Santa Fe Sport: Rock star',
'description': 'md5:b042a26142c4154a6e472933cf20793d',
- 'timestamp': 1368473503,
- 'upload_date': '20130513',
+ 'timestamp': 1415867444,
+ 'upload_date': '20141113',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
- # single video with playlist.sxml URL
+ # single video with playlist.sxml URL in playlist param
'url': 'http://www.bbc.com/sport/0/football/33653409',
'info_dict': {
'id': 'p02xycnp',
'ext': 'mp4',
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
- 'description': 'md5:398fca0e2e701c609d726e034fa1fc89',
+ 'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
'duration': 140,
},
'params': {
@@ -583,6 +640,14 @@ class BBCIE(BBCCoUkIE):
'skip_download': True,
}
}, {
+ # article with multiple videos embedded with playlist.sxml in playlist param
+ 'url': 'http://www.bbc.com/sport/0/football/34475836',
+ 'info_dict': {
+ 'id': '34475836',
+ 'title': 'What Liverpool can expect from Klopp',
+ },
+ 'playlist_count': 3,
+ }, {
# single video with playlist URL from weather section
'url': 'http://www.bbc.com/weather/features/33601775',
'only_matching': True,
@@ -594,7 +659,7 @@ class BBCIE(BBCCoUkIE):
@classmethod
def suitable(cls, url):
- return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
+ return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
def _extract_from_media_meta(self, media_meta, video_id):
# Direct links to media in media metadata (e.g.
@@ -623,40 +688,109 @@ class BBCIE(BBCCoUkIE):
return [], []
+ def _extract_from_playlist_sxml(self, url, playlist_id, timestamp):
+ programme_id, title, description, duration, formats, subtitles = \
+ self._process_legacy_playlist_url(url, playlist_id)
+ self._sort_formats(formats)
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
- timestamp = parse_iso8601(self._search_regex(
- [r'"datePublished":\s*"([^"]+)',
- r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
- r'itemprop="datePublished"[^>]+datetime="([^"]+)"'],
- webpage, 'date', default=None))
-
- # single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng)
- playlist = self._search_regex(
- r'<param[^>]+name="playlist"[^>]+value="([^"]+)"',
- webpage, 'playlist', default=None)
- if playlist:
- programme_id, title, description, duration, formats, subtitles = \
- self._process_legacy_playlist_url(playlist, playlist_id)
- self._sort_formats(formats)
- return {
- 'id': programme_id,
- 'title': title,
- 'description': description,
- 'duration': duration,
- 'timestamp': timestamp,
- 'formats': formats,
- 'subtitles': subtitles,
- }
+ timestamp = None
+ playlist_title = None
+ playlist_description = None
+
+ ld = self._parse_json(
+ self._search_regex(
+ r'(?s)<script type="application/ld\+json">(.+?)</script>',
+ webpage, 'ld json', default='{}'),
+ playlist_id, fatal=False)
+ if ld:
+ timestamp = parse_iso8601(ld.get('datePublished'))
+ playlist_title = ld.get('headline')
+ playlist_description = ld.get('articleBody')
+
+ if not timestamp:
+ timestamp = parse_iso8601(self._search_regex(
+ [r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
+ r'itemprop="datePublished"[^>]+datetime="([^"]+)"',
+ r'"datePublished":\s*"([^"]+)'],
+ webpage, 'date', default=None))
+
+ entries = []
+
+ # article with multiple videos embedded with playlist.sxml (e.g.
+ # http://www.bbc.com/sport/0/football/34475836)
+ playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage)
+ playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage))
+ if playlists:
+ entries = [
+ self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp)
+ for playlist_url in playlists]
+
+ # news article with multiple videos embedded with data-playable
+ data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage)
+ if data_playables:
+ for _, data_playable_json in data_playables:
+ data_playable = self._parse_json(
+ unescapeHTML(data_playable_json), playlist_id, fatal=False)
+ if not data_playable:
+ continue
+ settings = data_playable.get('settings', {})
+ if settings:
+ # data-playable with video vpid in settings.playlistObject.items (e.g.
+ # http://www.bbc.com/news/world-us-canada-34473351)
+ playlist_object = settings.get('playlistObject', {})
+ if playlist_object:
+ items = playlist_object.get('items')
+ if items and isinstance(items, list):
+ title = playlist_object['title']
+ description = playlist_object.get('summary')
+ duration = int_or_none(items[0].get('duration'))
+ programme_id = items[0].get('vpid')
+ formats, subtitles = self._download_media_selector(programme_id)
+ self._sort_formats(formats)
+ entries.append({
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+ else:
+ # data-playable without vpid but with a playlist.sxml URLs
+ # in otherSettings.playlist (e.g.
+ # http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani)
+ playlist = data_playable.get('otherSettings', {}).get('playlist', {})
+ if playlist:
+ entries.append(self._extract_from_playlist_sxml(
+ playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
+
+ if entries:
+ playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News')
+ playlist_description = playlist_description or self._og_search_description(webpage, default=None)
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
programme_id = self._search_regex(
- [r'data-video-player-vpid="([\da-z]{8})"',
- r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
+ [r'data-video-player-vpid="(%s)"' % self._ID_REGEX,
+ r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX,
+ r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX],
webpage, 'vpid', default=None)
+
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
self._sort_formats(formats)
@@ -689,7 +823,7 @@ class BBCIE(BBCCoUkIE):
# Multiple video article (e.g.
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
- EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
+ EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
entries = []
for match in extract_all(r'new\s+SMP\(({.+?})\)'):
embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
@@ -778,3 +912,33 @@ class BBCIE(BBCCoUkIE):
})
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
+
+
+class BBCCoUkArticleIE(InfoExtractor):
+ _VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
+ IE_NAME = 'bbc.co.uk:article'
+ IE_DESC = 'BBC articles'
+
+ _TEST = {
+ 'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer',
+ 'info_dict': {
+ 'id': '3jNQLTMrPlYGTBn0WV6M2MS',
+ 'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four',
+ 'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.',
+ },
+ 'playlist_count': 4,
+ 'add_ie': ['BBCCoUk'],
+ }
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ title = self._og_search_title(webpage)
+ description = self._og_search_description(webpage).strip()
+
+ entries = [self.url_result(programme_url) for programme_url in re.findall(
+ r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
+
+ return self.playlist_result(entries, playlist_id, title, description)
diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py
index b38057f2f..c8d921daf 100644
--- a/youtube_dl/extractor/beeg.py
+++ b/youtube_dl/extractor/beeg.py
@@ -1,65 +1,105 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..compat import (
+ compat_chr,
+ compat_ord,
+ compat_urllib_parse_unquote,
+)
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
- 'md5': '1bff67111adb785c51d1b42959ec10e5',
+ 'md5': '46c384def73b33dbc581262e5ee67cef',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
- 'description': 'md5:6db3c6177972822aaba18652ff59c773',
- 'categories': list, # NSFW
- 'thumbnail': 're:https?://.*\.jpg$',
+ 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
+ 'timestamp': 1391813355,
+ 'upload_date': '20140207',
+ 'duration': 383,
+ 'tags': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
+
+ video = self._download_json(
+ 'http://beeg.com/api/v5/video/%s' % video_id, video_id)
- webpage = self._download_webpage(url, video_id)
+ def split(o, e):
+ def cut(s, x):
+ n.append(s[:x])
+ return s[x:]
+ n = []
+ r = len(o) % e
+ if r > 0:
+ o = cut(o, r)
+ while len(o) > e:
+ o = cut(o, e)
+ n.append(o)
+ return n
- quality_arr = self._search_regex(
- r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
+ def decrypt_key(key):
+ # Reverse engineered from http://static.beeg.com/cpl/1105.js
+ a = '5ShMcIQlssOd7zChAIOlmeTZDaUxULbJRnywYaiB'
+ e = compat_urllib_parse_unquote(key)
+ o = ''.join([
+ compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
+ for n in range(len(e))])
+ return ''.join(split(o, 3)[::-1])
- formats = [{
- 'url': fmt[1],
- 'format_id': fmt[0],
- 'height': int(fmt[0][:-1]),
- } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
+ def decrypt_url(encrypted_url):
+ encrypted_url = self._proto_relative_url(
+ encrypted_url.replace('{DATA_MARKERS}', ''), 'http:')
+ key = self._search_regex(
+ r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
+ if not key:
+ return encrypted_url
+ return encrypted_url.replace(key, decrypt_key(key))
+ formats = []
+ for format_id, video_url in video.items():
+ if not video_url:
+ continue
+ height = self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None)
+ if not height:
+ continue
+ formats.append({
+ 'url': decrypt_url(video_url),
+ 'format_id': format_id,
+ 'height': int(height),
+ })
self._sort_formats(formats)
- title = self._html_search_regex(
- r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
+ title = video['title']
+ video_id = video.get('id') or video_id
+ display_id = video.get('code')
+ description = video.get('desc')
- description = self._html_search_regex(
- r'<meta name="description" content="([^"]*)"',
- webpage, 'description', fatal=False)
- thumbnail = self._html_search_regex(
- r'\'previewer.url\'\s*:\s*"([^"]*)"',
- webpage, 'thumbnail', fatal=False)
+ timestamp = parse_iso8601(video.get('date'), ' ')
+ duration = int_or_none(video.get('duration'))
- categories_str = self._html_search_regex(
- r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
- categories = (
- None if categories_str is None
- else categories_str.split(','))
+ tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
return {
'id': video_id,
+ 'display_id': display_id,
'title': title,
'description': description,
- 'thumbnail': thumbnail,
- 'categories': categories,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'tags': tags,
'formats': formats,
'age_limit': 18,
}
diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py
index 4d8cce1ef..1a0184861 100644
--- a/youtube_dl/extractor/bild.py
+++ b/youtube_dl/extractor/bild.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
- fix_xml_ampersands,
+ unescapeHTML,
)
@@ -17,26 +17,24 @@ class BildIE(InfoExtractor):
'info_dict': {
'id': '38184146',
'ext': 'mp4',
- 'title': 'BILD hat sie getestet',
+ 'title': 'Das können die neuen iPads',
+ 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 196,
- 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
- xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
- doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands)
-
- duration = int_or_none(doc.attrib.get('duration'), scale=1000)
+ video_data = self._download_json(
+ url.split('.bild.html')[0] + ',view=json.bild.html', video_id)
return {
'id': video_id,
- 'title': doc.attrib['ueberschrift'],
- 'description': doc.attrib.get('text'),
- 'url': doc.attrib['src'],
- 'thumbnail': doc.attrib.get('img'),
- 'duration': duration,
+ 'title': unescapeHTML(video_data['title']).strip(),
+ 'description': unescapeHTML(video_data.get('description')),
+ 'url': video_data['clipList'][0]['srces'][0]['src'],
+ 'thumbnail': video_data.get('poster'),
+ 'duration': int_or_none(video_data.get('durationSec')),
}
diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py
index ecc17ebeb..59beb11bc 100644
--- a/youtube_dl/extractor/bilibili.py
+++ b/youtube_dl/extractor/bilibili.py
@@ -2,141 +2,109 @@
from __future__ import unicode_literals
import re
-import itertools
-import json
-import xml.etree.ElementTree as ET
from .common import InfoExtractor
+from ..compat import compat_str
from ..utils import (
int_or_none,
- unified_strdate,
+ unescapeHTML,
ExtractorError,
+ xpath_text,
)
class BiliBiliIE(InfoExtractor):
- _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
+ _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
_TESTS = [{
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '2c301e4dab317596e837c3e7633e7d86',
'info_dict': {
- 'id': '1074402_part1',
+ 'id': '1554319',
'ext': 'flv',
'title': '【金坷垃】金泡沫',
- 'duration': 308,
+ 'duration': 308313,
'upload_date': '20140420',
'thumbnail': 're:^https?://.+\.jpg',
+ 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
+ 'timestamp': 1397983878,
+ 'uploader': '菊子桑',
},
}, {
'url': 'http://www.bilibili.com/video/av1041170/',
'info_dict': {
'id': '1041170',
'title': '【BD1080P】刀语【诸神&异域】',
+ 'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~',
+ 'uploader': '枫叶逝去',
+ 'timestamp': 1396501299,
},
'playlist_count': 9,
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- if '(此视频不存在或被删除)' in webpage:
- raise ExtractorError(
- 'The video does not exist or was deleted', expected=True)
-
- if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage:
- raise ExtractorError(
- 'The video is not available in your region due to copyright reasons',
- expected=True)
-
- video_code = self._search_regex(
- r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code')
-
- title = self._html_search_meta(
- 'media:title', video_code, 'title', fatal=True)
- duration_str = self._html_search_meta(
- 'duration', video_code, 'duration')
- if duration_str is None:
- duration = None
- else:
- duration_mobj = re.match(
- r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$',
- duration_str)
- duration = (
- int_or_none(duration_mobj.group('hours'), default=0) * 3600 +
- int(duration_mobj.group('minutes')) * 60 +
- int(duration_mobj.group('seconds')))
- upload_date = unified_strdate(self._html_search_meta(
- 'uploadDate', video_code, fatal=False))
- thumbnail = self._html_search_meta(
- 'thumbnailUrl', video_code, 'thumbnail', fatal=False)
-
- cid = self._search_regex(r'cid=(\d+)', webpage, 'cid')
-
- entries = []
-
- lq_page = self._download_webpage(
- 'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading LQ video info'
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ page_num = mobj.group('page_num') or '1'
+
+ view_data = self._download_json(
+ 'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num),
+ video_id)
+ if 'error' in view_data:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True)
+
+ cid = view_data['cid']
+ title = unescapeHTML(view_data['title'])
+
+ doc = self._download_xml(
+ 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid,
+ cid,
+ 'Downloading page %s/%s' % (page_num, view_data['pages'])
)
- try:
- err_info = json.loads(lq_page)
- raise ExtractorError(
- 'BiliBili said: ' + err_info['error_text'], expected=True)
- except ValueError:
- pass
- lq_doc = ET.fromstring(lq_page)
- lq_durls = lq_doc.findall('./durl')
+ if xpath_text(doc, './result') == 'error':
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True)
- hq_doc = self._download_xml(
- 'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid,
- video_id,
- note='Downloading HQ video info',
- fatal=False,
- )
- if hq_doc is not False:
- hq_durls = hq_doc.findall('./durl')
- assert len(lq_durls) == len(hq_durls)
- else:
- hq_durls = itertools.repeat(None)
+ entries = []
- i = 1
- for lq_durl, hq_durl in zip(lq_durls, hq_durls):
+ for durl in doc.findall('./durl'):
+ size = xpath_text(durl, ['./filesize', './size'])
formats = [{
- 'format_id': 'lq',
- 'quality': 1,
- 'url': lq_durl.find('./url').text,
- 'filesize': int_or_none(
- lq_durl.find('./size'), get_attr='text'),
+ 'url': durl.find('./url').text,
+ 'filesize': int_or_none(size),
+ 'ext': 'flv',
}]
- if hq_durl is not None:
- formats.append({
- 'format_id': 'hq',
- 'quality': 2,
- 'ext': 'flv',
- 'url': hq_durl.find('./url').text,
- 'filesize': int_or_none(
- hq_durl.find('./size'), get_attr='text'),
- })
- self._sort_formats(formats)
+ backup_urls = durl.find('./backup_url')
+ if backup_urls is not None:
+ for backup_url in backup_urls.findall('./url'):
+ formats.append({'url': backup_url.text})
+ formats.reverse()
entries.append({
- 'id': '%s_part%d' % (video_id, i),
+ 'id': '%s_part%s' % (cid, xpath_text(durl, './order')),
'title': title,
+ 'duration': int_or_none(xpath_text(durl, './length'), 1000),
'formats': formats,
- 'duration': duration,
- 'upload_date': upload_date,
- 'thumbnail': thumbnail,
})
- i += 1
-
- return {
- '_type': 'multi_video',
- 'entries': entries,
- 'id': video_id,
- 'title': title
+ info = {
+ 'id': compat_str(cid),
+ 'title': title,
+ 'description': view_data.get('description'),
+ 'thumbnail': view_data.get('pic'),
+ 'uploader': view_data.get('author'),
+ 'timestamp': int_or_none(view_data.get('created')),
+ 'view_count': int_or_none(view_data.get('play')),
+ 'duration': int_or_none(xpath_text(doc, './timelength')),
}
+
+ if len(entries) == 1:
+ entries[0].update(info)
+ return entries[0]
+ else:
+ info.update({
+ '_type': 'multi_video',
+ 'id': video_id,
+ 'entries': entries,
+ })
+ return info
diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py
index c3296283d..35375f7b1 100644
--- a/youtube_dl/extractor/bliptv.py
+++ b/youtube_dl/extractor/bliptv.py
@@ -4,14 +4,12 @@ import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
clean_html,
int_or_none,
parse_iso8601,
+ sanitized_Request,
unescapeHTML,
xpath_text,
xpath_with_ns,
@@ -219,7 +217,7 @@ class BlipTVIE(InfoExtractor):
for lang, url in subtitles_urls.items():
# For some weird reason, blip.tv serves a video instead of subtitles
# when we request with a common UA
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('User-Agent', 'youtube-dl')
subtitles[lang] = [{
# The extension is 'srt' but it's actually an 'ass' file
diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py
index 0dca29b71..ebeef8f2a 100644
--- a/youtube_dl/extractor/bloomberg.py
+++ b/youtube_dl/extractor/bloomberg.py
@@ -6,9 +6,9 @@ from .common import InfoExtractor
class BloombergIE(InfoExtractor):
- _VALID_URL = r'https?://www\.bloomberg\.com/news/videos/[^/]+/(?P<id>[^/?#]+)'
+ _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2',
# The md5 checksum changes
'info_dict': {
@@ -17,22 +17,39 @@ class BloombergIE(InfoExtractor):
'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
'description': 'md5:a8ba0302912d03d246979735c17d2761',
},
- }
+ }, {
+ 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
name = self._match_id(url)
webpage = self._download_webpage(url, name)
- video_id = self._search_regex(r'"bmmrId":"(.+?)"', webpage, 'id')
+ video_id = self._search_regex(
+ r'["\']bmmrId["\']\s*:\s*(["\'])(?P<url>.+?)\1',
+ webpage, 'id', group='url')
title = re.sub(': Video$', '', self._og_search_title(webpage))
embed_info = self._download_json(
'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id)
formats = []
for stream in embed_info['streams']:
- if stream["muxing_format"] == "TS":
- formats.extend(self._extract_m3u8_formats(stream['url'], video_id))
+ stream_url = stream.get('url')
+ if not stream_url:
+ continue
+ if stream['muxing_format'] == 'TS':
+ m3u8_formats = self._extract_m3u8_formats(
+ stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
else:
- formats.extend(self._extract_f4m_formats(stream['url'], video_id))
+ f4m_formats = self._extract_f4m_formats(
+ stream_url, video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 4721c2293..03a4f446e 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -3,15 +3,14 @@ from __future__ import unicode_literals
import re
import json
-import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
+ compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
- compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
@@ -20,12 +19,18 @@ from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
+ float_or_none,
+ js_to_json,
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
unescapeHTML,
unsmuggle_url,
)
-class BrightcoveIE(InfoExtractor):
+class BrightcoveLegacyIE(InfoExtractor):
+ IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
@@ -119,7 +124,7 @@ class BrightcoveIE(InfoExtractor):
object_str = fix_xml_ampersands(object_str)
try:
- object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
+ object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
@@ -245,7 +250,7 @@ class BrightcoveIE(InfoExtractor):
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
- req = compat_urllib_request.Request(request_url)
+ req = sanitized_Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
@@ -346,3 +351,183 @@ class BrightcoveIE(InfoExtractor):
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % info['id'])
return info
+
+
+class BrightcoveNewIE(InfoExtractor):
+ IE_NAME = 'brightcove:new'
+ _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)'
+ _TESTS = [{
+ 'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
+ 'md5': 'c8100925723840d4b0d243f7025703be',
+ 'info_dict': {
+ 'id': '4463358922001',
+ 'ext': 'mp4',
+ 'title': 'Meet the man behind Popcorn Time',
+ 'description': 'md5:eac376a4fe366edc70279bfb681aea16',
+ 'duration': 165.768,
+ 'timestamp': 1441391203,
+ 'upload_date': '20150904',
+ 'uploader_id': '929656772001',
+ 'formats': 'mincount:22',
+ },
+ }, {
+ # with rtmp streams
+ 'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
+ 'info_dict': {
+ 'id': '4279049078001',
+ 'ext': 'mp4',
+ 'title': 'Titansgrave: Chapter 0',
+ 'description': 'Titansgrave: Chapter 0',
+ 'duration': 1242.058,
+ 'timestamp': 1433556729,
+ 'upload_date': '20150606',
+ 'uploader_id': '4036320279001',
+ 'formats': 'mincount:41',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # ref: prefixed video id
+ 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
+ 'only_matching': True,
+ }]
+
+ @staticmethod
+ def _extract_url(webpage):
+ urls = BrightcoveNewIE._extract_urls(webpage)
+ return urls[0] if urls else None
+
+ @staticmethod
+ def _extract_urls(webpage):
+ # Reference:
+ # 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
+ # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
+ # 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
+ # 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
+
+ entries = []
+
+ # Look for iframe embeds [1]
+ for _, url in re.findall(
+ r'<iframe[^>]+src=(["\'])((?:https?:)//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
+ entries.append(url)
+
+ # Look for embed_in_page embeds [2]
+ for video_id, account_id, player_id, embed in re.findall(
+ # According to examples from [3] it's unclear whether video id
+ # may be optional and what to do when it is
+ # According to [4] data-video-id may be prefixed with ref:
+ r'''(?sx)
+ <video[^>]+
+ data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*?
+ </video>.*?
+ <script[^>]+
+ src=["\'](?:https?:)?//players\.brightcove\.net/
+ (\d+)/([\da-f-]+)_([^/]+)/index\.min\.js
+ ''', webpage):
+ entries.append(
+ 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
+ % (account_id, player_id, embed, video_id))
+
+ return entries
+
+ def _real_extract(self, url):
+ account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
+
+ webpage = self._download_webpage(
+ 'http://players.brightcove.net/%s/%s_%s/index.min.js'
+ % (account_id, player_id, embed), video_id)
+
+ policy_key = None
+
+ catalog = self._search_regex(
+ r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
+ if catalog:
+ catalog = self._parse_json(
+ js_to_json(catalog), video_id, fatal=False)
+ if catalog:
+ policy_key = catalog.get('policyKey')
+
+ if not policy_key:
+ policy_key = self._search_regex(
+ r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
+ webpage, 'policy key', group='pk')
+
+ req = sanitized_Request(
+ 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
+ % (account_id, video_id),
+ headers={'Accept': 'application/json;pk=%s' % policy_key})
+ json_data = self._download_json(req, video_id)
+
+ title = json_data['name']
+
+ formats = []
+ for source in json_data.get('sources', []):
+ source_type = source.get('type')
+ src = source.get('src')
+ if source_type == 'application/x-mpegURL':
+ if not src:
+ continue
+ m3u8_formats = self._extract_m3u8_formats(
+ src, video_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ else:
+ streaming_src = source.get('streaming_src')
+ stream_name, app_name = source.get('stream_name'), source.get('app_name')
+ if not src and not streaming_src and (not stream_name or not app_name):
+ continue
+ tbr = float_or_none(source.get('avg_bitrate'), 1000)
+ height = int_or_none(source.get('height'))
+ f = {
+ 'tbr': tbr,
+ 'width': int_or_none(source.get('width')),
+ 'height': height,
+ 'filesize': int_or_none(source.get('size')),
+ 'container': source.get('container'),
+ 'vcodec': source.get('codec'),
+ 'ext': source.get('container').lower(),
+ }
+
+ def build_format_id(kind):
+ format_id = kind
+ if tbr:
+ format_id += '-%dk' % int(tbr)
+ if height:
+ format_id += '-%dp' % height
+ return format_id
+
+ if src or streaming_src:
+ f.update({
+ 'url': src or streaming_src,
+ 'format_id': build_format_id('http' if src else 'http-streaming'),
+ 'preference': 2 if src else 1,
+ })
+ else:
+ f.update({
+ 'url': app_name,
+ 'play_path': stream_name,
+ 'format_id': build_format_id('rtmp'),
+ })
+ formats.append(f)
+ self._sort_formats(formats)
+
+ description = json_data.get('description')
+ thumbnail = json_data.get('thumbnail')
+ timestamp = parse_iso8601(json_data.get('published_at'))
+ duration = float_or_none(json_data.get('duration'), 1000)
+ tags = json_data.get('tags', [])
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'uploader_id': account_id,
+ 'formats': formats,
+ 'tags': tags,
+ }
diff --git a/youtube_dl/extractor/byutv.py b/youtube_dl/extractor/byutv.py
index 3b2de517e..dda98059e 100644
--- a/youtube_dl/extractor/byutv.py
+++ b/youtube_dl/extractor/byutv.py
@@ -14,9 +14,10 @@ class BYUtvIE(InfoExtractor):
'info_dict': {
'id': 'studio-c-season-5-episode-5',
'ext': 'mp4',
- 'description': 'md5:5438d33774b6bdc662f9485a340401cc',
+ 'description': 'md5:e07269172baff037f8e8bf9956bc9747',
'title': 'Season 5 Episode 5',
- 'thumbnail': 're:^https?://.*\.jpg$'
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 1486.486,
},
'params': {
'skip_download': True,
diff --git a/youtube_dl/extractor/canalc2.py b/youtube_dl/extractor/canalc2.py
index c4fefefe4..f6a1ff381 100644
--- a/youtube_dl/extractor/canalc2.py
+++ b/youtube_dl/extractor/canalc2.py
@@ -4,38 +4,53 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import parse_duration
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
- _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)'
_TEST = {
- 'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
+ 'url': 'http://www.canalc2.tv/video/12163',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'id': '12163',
- 'ext': 'mp4',
- 'title': 'Terrasses du Numérique'
+ 'ext': 'flv',
+ 'title': 'Terrasses du Numérique',
+ 'duration': 122,
+ },
+ 'params': {
+ 'skip_download': True, # Requires rtmpdump
}
}
def _real_extract(self, url):
- video_id = re.match(self._VALID_URL, url).group('id')
- # We need to set the voir field for getting the file name
- url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- file_name = self._search_regex(
- r"so\.addVariable\('file','(.*?)'\);",
- webpage, 'file name')
- video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
+ video_url = self._search_regex(
+ r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2',
+ webpage, 'video_url', group='file')
+ formats = [{'url': video_url}]
+ if video_url.startswith('rtmp://'):
+ rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url)
+ formats[0].update({
+ 'url': rtmp.group('url'),
+ 'ext': 'flv',
+ 'app': rtmp.group('app'),
+ 'play_path': rtmp.group('play_path'),
+ 'page_url': url,
+ })
title = self._html_search_regex(
- r'class="evenement8">(.*?)</a>', webpage, 'title')
+ r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title')
+ duration = parse_duration(self._search_regex(
+ r'id=["\']video_duree["\'][^>]*>([^<]+)',
+ webpage, 'duration', fatal=False))
return {
'id': video_id,
- 'ext': 'mp4',
- 'url': video_url,
'title': title,
+ 'duration': duration,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py
index 57e0cda2c..004372f8d 100644
--- a/youtube_dl/extractor/canalplus.py
+++ b/youtube_dl/extractor/canalplus.py
@@ -78,7 +78,8 @@ class CanalplusIE(InfoExtractor):
if video_id is None:
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
- r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
+ [r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)', r'id=["\']canal_video_player(?P<id>\d+)'],
+ webpage, 'video id', group='id')
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
diff --git a/youtube_dl/extractor/cbs.py b/youtube_dl/extractor/cbs.py
index 75fffb156..40d07ab18 100644
--- a/youtube_dl/extractor/cbs.py
+++ b/youtube_dl/extractor/cbs.py
@@ -1,6 +1,10 @@
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..utils import (
+ sanitized_Request,
+ smuggle_url,
+)
class CBSIE(InfoExtractor):
@@ -46,13 +50,19 @@ class CBSIE(InfoExtractor):
def _real_extract(self, url):
display_id = self._match_id(url)
- webpage = self._download_webpage(url, display_id)
+ request = sanitized_Request(url)
+ # Android UA is served with higher quality (720p) streams (see
+ # https://github.com/rg3/youtube-dl/issues/7490)
+ request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)')
+ webpage = self._download_webpage(request, display_id)
real_id = self._search_regex(
[r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"],
webpage, 'real video ID')
return {
'_type': 'url_transparent',
'ie_key': 'ThePlatform',
- 'url': 'theplatform:%s' % real_id,
+ 'url': smuggle_url(
+ 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true&manifest=m3u' % real_id,
+ {'force_smil_url': True}),
'display_id': display_id,
}
diff --git a/youtube_dl/extractor/cbsnews.py b/youtube_dl/extractor/cbsnews.py
index 52e61d85b..f9a64a0a2 100644
--- a/youtube_dl/extractor/cbsnews.py
+++ b/youtube_dl/extractor/cbsnews.py
@@ -67,9 +67,12 @@ class CBSNewsIE(InfoExtractor):
'format_id': format_id,
}
if uri.startswith('rtmp'):
+ play_path = re.sub(
+ r'{slistFilePath}', '',
+ uri.split('<break>')[-1].split('{break}')[-1])
fmt.update({
'app': 'ondemand?auth=cbs',
- 'play_path': 'mp4:' + uri.split('<break>')[-1],
+ 'play_path': 'mp4:' + play_path,
'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf',
'page_url': 'http://www.cbsnews.com',
'ext': 'flv',
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index e857e66f4..6f7b2a70d 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -5,7 +5,6 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
@@ -13,6 +12,7 @@ from ..compat import (
from ..utils import (
ExtractorError,
float_or_none,
+ sanitized_Request,
)
@@ -100,7 +100,7 @@ class CeskaTelevizeIE(InfoExtractor):
'requestSource': 'iVysilani',
}
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse.urlencode(data))
@@ -115,7 +115,7 @@ class CeskaTelevizeIE(InfoExtractor):
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
- req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url))
+ req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
req.add_header('Referer', url)
playlist_title = self._og_search_title(webpage)
diff --git a/youtube_dl/extractor/channel9.py b/youtube_dl/extractor/channel9.py
index 3dfc24f5b..c74553dcf 100644
--- a/youtube_dl/extractor/channel9.py
+++ b/youtube_dl/extractor/channel9.py
@@ -3,7 +3,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ parse_filesize,
+ qualities,
+)
class Channel9IE(InfoExtractor):
@@ -28,7 +32,7 @@ class Channel9IE(InfoExtractor):
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
- 'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
+ 'thumbnail': 're:http://.*\.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
@@ -44,31 +48,29 @@ class Channel9IE(InfoExtractor):
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
- 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
+ 'thumbnail': 're:http://.*\.jpg',
'authors': ['Mike Wilmot'],
},
+ },
+ {
+ # low quality mp4 is best
+ 'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
+ 'info_dict': {
+ 'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library',
+ 'ext': 'mp4',
+ 'title': 'Ranges for the Standard Library',
+ 'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d',
+ 'duration': 5646,
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
- # Sorted by quality
- _known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4']
-
- def _restore_bytes(self, formatted_size):
- if not formatted_size:
- return 0
- m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size)
- if not m:
- return 0
- units = m.group('units')
- try:
- exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper())
- except ValueError:
- return 0
- size = float(m.group('size'))
- return int(size * (1024 ** exponent))
-
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
@@ -78,16 +80,20 @@ class Channel9IE(InfoExtractor):
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
- # Extract known formats
+ quality = qualities((
+ 'MP3', 'MP4',
+ 'Low Quality WMV', 'Low Quality MP4',
+ 'Mid Quality WMV', 'Mid Quality MP4',
+ 'High Quality WMV', 'High Quality MP4'))
formats = [{
'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
- 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
- 'preference': self._known_formats.index(x.group('quality')),
+ 'filesize_approx': parse_filesize(x.group('filesize')),
+ 'quality': quality(x.group('quality')),
'vcodec': 'none' if x.group('note') == 'Audio only' else None,
- } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
+ } for x in list(re.finditer(FORMAT_REGEX, html))]
self._sort_formats(formats)
@@ -158,7 +164,7 @@ class Channel9IE(InfoExtractor):
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
- return m.group('day') if m is not None else None
+ return m.group('day').strip() if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
@@ -224,12 +230,12 @@ class Channel9IE(InfoExtractor):
if contents is None:
return contents
- authors = self._extract_authors(html)
+ if len(contents) > 1:
+ raise ExtractorError('Got more than one entry')
+ result = contents[0]
+ result['authors'] = self._extract_authors(html)
- for content in contents:
- content['authors'] = authors
-
- return contents
+ return result
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
diff --git a/youtube_dl/extractor/chaturbate.py b/youtube_dl/extractor/chaturbate.py
new file mode 100644
index 000000000..0b67ba67d
--- /dev/null
+++ b/youtube_dl/extractor/chaturbate.py
@@ -0,0 +1,50 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class ChaturbateIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)'
+ _TESTS = [{
+ 'url': 'https://www.chaturbate.com/siswet19/',
+ 'info_dict': {
+ 'id': 'siswet19',
+ 'ext': 'mp4',
+ 'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'age_limit': 18,
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'https://en.chaturbate.com/siswet19/',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ m3u8_url = self._search_regex(
+ r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage,
+ 'playlist', default=None, group='url')
+
+ if not m3u8_url:
+ error = self._search_regex(
+ r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>',
+ webpage, 'error', group='error')
+ raise ExtractorError(error, expected=True)
+
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
+
+ return {
+ 'id': video_id,
+ 'title': self._live_title(video_id),
+ 'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id,
+ 'age_limit': self._rta_search(webpage),
+ 'is_live': True,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py
index 7af903571..3a47f6fa4 100644
--- a/youtube_dl/extractor/clipfish.py
+++ b/youtube_dl/extractor/clipfish.py
@@ -1,14 +1,9 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..utils import (
- determine_ext,
int_or_none,
- js_to_json,
- parse_iso8601,
- remove_end,
+ unified_strdate,
)
@@ -21,48 +16,47 @@ class ClipfishIE(InfoExtractor):
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
- 'timestamp': 1370938118,
+ 'description': 'Video zu FIFA 14: E3 2013 Trailer',
'upload_date': '20130611',
'duration': 82,
+ 'view_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- video_info = self._parse_json(
- js_to_json(self._html_search_regex(
- '(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')),
- video_id)
+ video_info = self._download_json(
+ 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id,
+ video_id)['items'][0]
formats = []
- for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage):
- ext = determine_ext(video_url)
- if ext == 'm3u8':
- formats.append({
- 'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
- 'ext': 'mp4',
- 'format_id': 'hls',
- })
- else:
- formats.append({
- 'url': video_url,
- 'format_id': ext,
- })
- self._sort_formats(formats)
- title = remove_end(self._og_search_title(webpage), ' - Video')
- thumbnail = self._og_search_thumbnail(webpage)
- duration = int_or_none(video_info.get('length'))
- timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date'))
+ m3u8_url = video_info.get('media_videourl_hls')
+ if m3u8_url:
+ formats.append({
+ 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
+ 'ext': 'mp4',
+ 'format_id': 'hls',
+ })
+
+ mp4_url = video_info.get('media_videourl')
+ if mp4_url:
+ formats.append({
+ 'url': mp4_url,
+ 'format_id': 'mp4',
+ 'width': int_or_none(video_info.get('width')),
+ 'height': int_or_none(video_info.get('height')),
+ 'tbr': int_or_none(video_info.get('bitrate')),
+ })
return {
'id': video_id,
- 'title': title,
+ 'title': video_info['title'],
+ 'description': video_info.get('descr'),
'formats': formats,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'timestamp': timestamp,
+ 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'),
+ 'duration': int_or_none(video_info.get('media_length')),
+ 'upload_date': unified_strdate(video_info.get('pubDate')),
+ 'view_count': int_or_none(video_info.get('media_views'))
}
diff --git a/youtube_dl/extractor/cliphunter.py b/youtube_dl/extractor/cliphunter.py
index d46592cc5..2996b6b09 100644
--- a/youtube_dl/extractor/cliphunter.py
+++ b/youtube_dl/extractor/cliphunter.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import determine_ext
+from ..utils import int_or_none
_translation_table = {
@@ -42,31 +42,26 @@ class CliphunterIE(InfoExtractor):
video_title = self._search_regex(
r'mediaTitle = "([^"]+)"', webpage, 'title')
- fmts = {}
- for fmt in ('mp4', 'flv'):
- fmt_list = self._parse_json(self._search_regex(
- r'var %sjson\s*=\s*(\[.*?\]);' % fmt, webpage, '%s formats' % fmt), video_id)
- for f in fmt_list:
- fmts[f['fname']] = _decode(f['sUrl'])
-
- qualities = self._parse_json(self._search_regex(
- r'var player_btns\s*=\s*(.*?);\n', webpage, 'quality info'), video_id)
+ gexo_files = self._parse_json(
+ self._search_regex(
+ r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'),
+ video_id)
formats = []
- for fname, url in fmts.items():
- f = {
- 'url': url,
- }
- if fname in qualities:
- qual = qualities[fname]
- f.update({
- 'format_id': '%s_%sp' % (determine_ext(url), qual['h']),
- 'width': qual['w'],
- 'height': qual['h'],
- 'tbr': qual['br'],
- })
- formats.append(f)
-
+ for format_id, f in gexo_files.items():
+ video_url = f.get('url')
+ if not video_url:
+ continue
+ fmt = f.get('fmt')
+ height = f.get('h')
+ format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id
+ formats.append({
+ 'url': _decode(video_url),
+ 'format_id': format_id,
+ 'width': int_or_none(f.get('w')),
+ 'height': int_or_none(height),
+ 'tbr': int_or_none(f.get('br')),
+ })
self._sort_formats(formats)
thumbnail = self._search_regex(
diff --git a/youtube_dl/extractor/clubic.py b/youtube_dl/extractor/clubic.py
index 14f215c5c..1dfa7c12e 100644
--- a/youtube_dl/extractor/clubic.py
+++ b/youtube_dl/extractor/clubic.py
@@ -12,9 +12,9 @@ from ..utils import (
class ClubicIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html'
+ _VALID_URL = r'http://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
'md5': '1592b694ba586036efac1776b0b43cd3',
'info_dict': {
@@ -24,7 +24,10 @@ class ClubicIE(InfoExtractor):
'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*',
'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$',
}
- }
+ }, {
+ 'url': 'http://www.clubic.com/video/video-clubic-week-2-0-apple-iphone-6s-et-plus-mais-surtout-le-pencil-469792.html',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
diff --git a/youtube_dl/extractor/clyp.py b/youtube_dl/extractor/clyp.py
new file mode 100644
index 000000000..57e643799
--- /dev/null
+++ b/youtube_dl/extractor/clyp.py
@@ -0,0 +1,57 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ float_or_none,
+ parse_iso8601,
+)
+
+
+class ClypIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)'
+ _TEST = {
+ 'url': 'https://clyp.it/ojz2wfah',
+ 'md5': '1d4961036c41247ecfdcc439c0cddcbb',
+ 'info_dict': {
+ 'id': 'ojz2wfah',
+ 'ext': 'mp3',
+ 'title': 'Krisson80 - bits wip wip',
+ 'description': '#Krisson80BitsWipWip #chiptune\n#wip',
+ 'duration': 263.21,
+ 'timestamp': 1443515251,
+ 'upload_date': '20150929',
+ },
+ }
+
+ def _real_extract(self, url):
+ audio_id = self._match_id(url)
+
+ metadata = self._download_json(
+ 'https://api.clyp.it/%s' % audio_id, audio_id)
+
+ formats = []
+ for secure in ('', 'Secure'):
+ for ext in ('Ogg', 'Mp3'):
+ format_id = '%s%s' % (secure, ext)
+ format_url = metadata.get('%sUrl' % format_id)
+ if format_url:
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ 'vcodec': 'none',
+ })
+ self._sort_formats(formats)
+
+ title = metadata['Title']
+ description = metadata.get('Description')
+ duration = float_or_none(metadata.get('Duration'))
+ timestamp = parse_iso8601(metadata.get('DateCreated'))
+
+ return {
+ 'id': audio_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py
index e96c59f71..f1311b14f 100644
--- a/youtube_dl/extractor/cmt.py
+++ b/youtube_dl/extractor/cmt.py
@@ -4,7 +4,7 @@ from .mtv import MTVIE
class CMTIE(MTVIE):
IE_NAME = 'cmt.com'
- _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
+ _VALID_URL = r'https?://www\.cmt\.com/(?:videos|shows)/(?:[^/]+/)*(?P<videoid>\d+)'
_FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/'
_TESTS = [{
@@ -16,4 +16,7 @@ class CMTIE(MTVIE):
'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
'description': 'Blame It All On My Roots',
},
+ }, {
+ 'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172',
+ 'only_matching': True,
}]
diff --git a/youtube_dl/extractor/cnet.py b/youtube_dl/extractor/cnet.py
index 5dd69bff7..5c3908f72 100644
--- a/youtube_dl/extractor/cnet.py
+++ b/youtube_dl/extractor/cnet.py
@@ -1,15 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals
-import json
+from .theplatform import ThePlatformIE
+from ..utils import int_or_none
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
-
-class CNETIE(InfoExtractor):
+class CNETIE(ThePlatformIE):
_VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
@@ -18,25 +14,20 @@ class CNETIE(InfoExtractor):
'ext': 'flv',
'title': 'Hands-on with Microsoft Windows 8.1 Update',
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
- 'thumbnail': 're:^http://.*/flmswindows8.jpg$',
'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
'uploader': 'Sarah Mitroff',
+ 'duration': 70,
},
- 'params': {
- 'skip_download': 'requires rtmpdump',
- }
}, {
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
'info_dict': {
'id': '56527b93-d25d-44e3-b738-f989ce2e49ba',
'ext': 'flv',
+ 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole',
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
'uploader': 'Ashley Esqueda',
- 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
- },
- 'params': {
- 'skip_download': True, # requires rtmpdump
+ 'duration': 1482,
},
}]
@@ -45,26 +36,13 @@ class CNETIE(InfoExtractor):
webpage = self._download_webpage(url, display_id)
data_json = self._html_search_regex(
- r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
+ r"data-cnet-video(?:-uvp)?-options='([^']+)'",
webpage, 'data json')
- data = json.loads(data_json)
- vdata = data['video']
- if not vdata:
- vdata = data['videos'][0]
- if not vdata:
- raise ExtractorError('Cannot find video data')
-
- mpx_account = data['config']['players']['default']['mpx_account']
- vid = vdata['files'].get('rtmp', vdata['files']['hds'])
- tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid)
+ data = self._parse_json(data_json, display_id)
+ vdata = data.get('video') or data['videos'][0]
video_id = vdata['id']
- title = vdata.get('headline')
- if title is None:
- title = vdata.get('title')
- if title is None:
- raise ExtractorError('Cannot find title!')
- thumbnail = vdata.get('image', {}).get('path')
+ title = vdata['title']
author = vdata.get('author')
if author:
uploader = '%s %s' % (author['firstName'], author['lastName'])
@@ -73,13 +51,34 @@ class CNETIE(InfoExtractor):
uploader = None
uploader_id = None
+ mpx_account = data['config']['uvpConfig']['default']['mpx_account']
+
+ metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id)
+ description = vdata.get('description') or metadata.get('description')
+ duration = int_or_none(vdata.get('duration')) or metadata.get('duration')
+
+ formats = []
+ subtitles = {}
+ for (fkey, vid) in vdata['files'].items():
+ if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']:
+ continue
+ release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid)
+ if fkey == 'hds':
+ release_url += '&manifest=f4m'
+ tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey)
+ formats.extend(tp_formats)
+ subtitles = self._merge_subtitles(subtitles, tp_subtitles)
+ self._sort_formats(formats)
+
return {
- '_type': 'url_transparent',
- 'url': tp_link,
'id': video_id,
'display_id': display_id,
'title': title,
+ 'description': description,
+ 'thumbnail': metadata.get('thumbnail'),
+ 'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
- 'thumbnail': thumbnail,
+ 'subtitles': subtitles,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/collegerama.py b/youtube_dl/extractor/collegerama.py
index fedd48490..40667a0f1 100644
--- a/youtube_dl/extractor/collegerama.py
+++ b/youtube_dl/extractor/collegerama.py
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
import json
from .common import InfoExtractor
-from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
int_or_none,
+ sanitized_Request,
)
@@ -52,7 +52,7 @@ class CollegeRamaIE(InfoExtractor):
}
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
json.dumps(player_options_request))
request.add_header('Content-Type', 'application/json')
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 91ebb0ce5..3e4bd10b6 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -151,12 +151,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
mobj = re.match(self._VALID_URL, url)
if mobj.group('shortname'):
- if mobj.group('shortname') in ('tds', 'thedailyshow'):
- url = 'http://thedailyshow.cc.com/full-episodes/'
- else:
- url = 'http://thecolbertreport.cc.com/full-episodes/'
- mobj = re.match(self._VALID_URL, url, re.VERBOSE)
- assert mobj is not None
+ return self.url_result('http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes')
if mobj.group('clip'):
if mobj.group('videotitle'):
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 5eeeda08d..828f58f12 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -10,20 +10,18 @@ import re
import socket
import sys
import time
-import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_getpass,
- compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlparse,
- compat_urllib_request,
compat_urlparse,
compat_str,
+ compat_etree_fromstring,
)
from ..utils import (
NO_DEFAULT,
@@ -32,13 +30,16 @@ from ..utils import (
clean_html,
compiled_regex_type,
determine_ext,
+ error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
+ sanitized_Request,
unescapeHTML,
+ unified_strdate,
url_basename,
xpath_text,
xpath_with_ns,
@@ -152,6 +153,7 @@ class InfoExtractor(object):
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
+ release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
@@ -163,12 +165,14 @@ class InfoExtractor(object):
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
+ "ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
- duration: Length of the video in seconds, as an integer.
+ duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
+ repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
@@ -307,11 +311,11 @@ class InfoExtractor(object):
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
- return cls.__name__[:-2]
+ return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
- return type(self).__name__[:-2]
+ return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
@@ -329,7 +333,8 @@ class InfoExtractor(object):
return False
if errnote is None:
errnote = 'Unable to download webpage'
- errmsg = '%s: %s' % (errnote, compat_str(err))
+
+ errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
@@ -458,7 +463,7 @@ class InfoExtractor(object):
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
- return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
+ return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
@@ -516,6 +521,12 @@ class InfoExtractor(object):
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
+ @staticmethod
+ def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
+ raise ExtractorError(
+ '%s. You might want to use --proxy to workaround.' % msg,
+ expected=True)
+
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
@@ -613,7 +624,7 @@ class InfoExtractor(object):
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
- self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
+ self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
@@ -636,8 +647,9 @@ class InfoExtractor(object):
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
- content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
- property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
+ content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
+ property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
+ % {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
@@ -731,8 +743,9 @@ class InfoExtractor(object):
@staticmethod
def _hidden_inputs(html):
+ html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
- for input in re.findall(r'<input([^>]+)>', html):
+ for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
@@ -746,7 +759,7 @@ class InfoExtractor(object):
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
- r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
+ r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
@@ -830,7 +843,7 @@ class InfoExtractor(object):
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
- if isinstance(e.cause, compat_HTTPError):
+ if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
@@ -861,13 +874,18 @@ class InfoExtractor(object):
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
- transform_source=lambda s: fix_xml_ampersands(s).strip()):
+ transform_source=lambda s: fix_xml_ampersands(s).strip(),
+ fatal=True):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
- transform_source=transform_source)
+ transform_source=transform_source,
+ fatal=fatal)
+
+ if manifest is False:
+ return manifest
formats = []
manifest_version = '1.0'
@@ -875,6 +893,11 @@ class InfoExtractor(object):
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
+ base_url = xpath_text(
+ manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
+ 'base URL', default=None)
+ if base_url:
+ base_url = base_url.strip()
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
media_url = media_el.attrib.get('href') or media_el.attrib.get('url')
@@ -882,13 +905,16 @@ class InfoExtractor(object):
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
- else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url))
+ else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
if determine_ext(manifest_url) == 'f4m':
- formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id))
+ f4m_formats = self._extract_f4m_formats(
+ manifest_url, video_id, preference, f4m_id, fatal=fatal)
+ if f4m_formats:
+ formats.extend(f4m_formats)
continue
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
@@ -924,13 +950,15 @@ class InfoExtractor(object):
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
- m3u8_doc = self._download_webpage(
+ res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
- if m3u8_doc is False:
- return m3u8_doc
+ if res is False:
+ return res
+ m3u8_doc, urlh = res
+ m3u8_url = urlh.geturl()
last_info = None
last_media = None
kv_rex = re.compile(
@@ -1036,6 +1064,7 @@ class InfoExtractor(object):
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
+ upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
@@ -1045,11 +1074,22 @@ class InfoExtractor(object):
title = content
elif not description and name in ('description', 'abstract'):
description = content
+ elif not upload_date and name == 'date':
+ upload_date = unified_strdate(content)
+
+ thumbnails = [{
+ 'id': image.get('type'),
+ 'url': image.get('src'),
+ 'width': int_or_none(image.get('width')),
+ 'height': int_or_none(image.get('height')),
+ } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
+ 'upload_date': upload_date,
+ 'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
@@ -1076,7 +1116,7 @@ class InfoExtractor(object):
if not src:
continue
- bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+ bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
filesize = int_or_none(video.get('size') or video.get('fileSize'))
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
@@ -1108,8 +1148,10 @@ class InfoExtractor(object):
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
if proto == 'm3u8' or src_ext == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- src_url, video_id, ext or 'mp4', m3u8_id='hls'))
+ m3u8_formats = self._extract_m3u8_formats(
+ src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
@@ -1121,10 +1163,12 @@ class InfoExtractor(object):
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse.urlencode(f4m_params)
- formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds'))
+ f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
continue
- if src_url.startswith('http'):
+ if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
@@ -1243,7 +1287,7 @@ class InfoExtractor(object):
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py
index 3db4db4e4..6f92ae2ed 100644
--- a/youtube_dl/extractor/condenast.py
+++ b/youtube_dl/extractor/condenast.py
@@ -2,7 +2,6 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
from ..compat import (
@@ -12,6 +11,7 @@ from ..compat import (
)
from ..utils import (
orderedSet,
+ remove_end,
)
@@ -24,21 +24,33 @@ class CondeNastIE(InfoExtractor):
# The keys are the supported sites and the values are the name to be shown
# to the user and in the extractor description.
_SITES = {
- 'wired': 'WIRED',
+ 'allure': 'Allure',
+ 'architecturaldigest': 'Architectural Digest',
+ 'arstechnica': 'Ars Technica',
+ 'bonappetit': 'Bon Appétit',
+ 'brides': 'Brides',
+ 'cnevids': 'Condé Nast',
+ 'cntraveler': 'Condé Nast Traveler',
+ 'details': 'Details',
+ 'epicurious': 'Epicurious',
+ 'glamour': 'Glamour',
+ 'golfdigest': 'Golf Digest',
'gq': 'GQ',
+ 'newyorker': 'The New Yorker',
+ 'self': 'SELF',
+ 'teenvogue': 'Teen Vogue',
+ 'vanityfair': 'Vanity Fair',
'vogue': 'Vogue',
- 'glamour': 'Glamour',
+ 'wired': 'WIRED',
'wmagazine': 'W Magazine',
- 'vanityfair': 'Vanity Fair',
- 'cnevids': 'Condé Nast',
}
- _VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
+ _VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
- EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
+ EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys())
- _TEST = {
+ _TESTS = [{
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'md5': '1921f713ed48aabd715691f774c451f7',
'info_dict': {
@@ -47,7 +59,16 @@ class CondeNastIE(InfoExtractor):
'title': '3D Printed Speakers Lit With LED',
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
}
- }
+ }, {
+ # JS embed
+ 'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js',
+ 'md5': 'f1a6f9cafb7083bab74a710f65d08999',
+ 'info_dict': {
+ 'id': '55f9cf8b61646d1acf00000c',
+ 'ext': 'mp4',
+ 'title': '3D printed TSA Travel Sentry keys really do open TSA locks',
+ }
+ }]
def _extract_series(self, url, webpage):
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
@@ -86,8 +107,8 @@ class CondeNastIE(InfoExtractor):
info_url = base_info_url + data
info_page = self._download_webpage(info_url, video_id,
'Downloading video info')
- video_info = self._search_regex(r'var video = ({.+?});', info_page, 'video info')
- video_info = json.loads(video_info)
+ video_info = self._search_regex(r'var\s+video\s*=\s*({.+?});', info_page, 'video info')
+ video_info = self._parse_json(video_info, video_id)
formats = [{
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
@@ -111,6 +132,13 @@ class CondeNastIE(InfoExtractor):
url_type = mobj.group('type')
item_id = mobj.group('id')
+ # Convert JS embed to regular embed
+ if url_type == 'embedjs':
+ parsed_url = compat_urlparse.urlparse(url)
+ url = compat_urlparse.urlunparse(parsed_url._replace(
+ path=remove_end(parsed_url.path, '.js').replace('/embedjs/', '/embed/')))
+ url_type = 'embed'
+
self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, item_id)
diff --git a/youtube_dl/extractor/criterion.py b/youtube_dl/extractor/criterion.py
index 4fb178165..dedb810a0 100644
--- a/youtube_dl/extractor/criterion.py
+++ b/youtube_dl/extractor/criterion.py
@@ -27,9 +27,7 @@ class CriterionIE(InfoExtractor):
final_url = self._search_regex(
r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url')
title = self._og_search_title(webpage)
- description = self._html_search_regex(
- r'<meta name="description" content="(.+?)" />',
- webpage, 'video description')
+ description = self._html_search_meta('description', webpage)
thumbnail = self._search_regex(
r'so.addVariable\("thumbnailURL", "(.+?)"\)\;',
webpage, 'thumbnail url')
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index ce123482e..00d943f77 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -5,12 +5,12 @@ import re
import json
import base64
import zlib
-import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor
from ..compat import (
+ compat_etree_fromstring,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_request,
@@ -21,7 +21,9 @@ from ..utils import (
bytes_to_intlist,
intlist_to_bytes,
int_or_none,
+ lowercase_escape,
remove_end,
+ sanitized_Request,
unified_strdate,
urlencode_postdata,
xpath_text,
@@ -31,9 +33,57 @@ from ..aes import (
)
-class CrunchyrollIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
+class CrunchyrollBaseIE(InfoExtractor):
_NETRC_MACHINE = 'crunchyroll'
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+ self.report_login()
+ login_url = 'https://www.crunchyroll.com/?a=formhandler'
+ data = urlencode_postdata({
+ 'formname': 'RpcApiUser_Login',
+ 'name': username,
+ 'password': password,
+ })
+ login_request = sanitized_Request(login_url, data)
+ login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ self._download_webpage(login_request, None, False, 'Wrong login info')
+
+ def _real_initialize(self):
+ self._login()
+
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
+ request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
+ else sanitized_Request(url_or_request))
+ # Accept-Language must be set explicitly to accept any language to avoid issues
+ # similar to https://github.com/rg3/youtube-dl/issues/6797.
+ # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
+ # should be imposed or not (from what I can see it just takes the first language
+ # ignoring the priority and requires it to correspond the IP). By the way this causes
+ # Crunchyroll to not work in georestriction cases in some browsers that don't place
+ # the locale lang first in header. However allowing any language seems to workaround the issue.
+ request.add_header('Accept-Language', '*')
+ return super(CrunchyrollBaseIE, self)._download_webpage(
+ request, video_id, note, errnote, fatal, tries, timeout, encoding)
+
+ @staticmethod
+ def _add_skip_wall(url):
+ parsed_url = compat_urlparse.urlparse(url)
+ qs = compat_urlparse.parse_qs(parsed_url.query)
+ # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message:
+ # > This content may be inappropriate for some people.
+ # > Are you sure you want to continue?
+ # since it's not disabled by default in crunchyroll account's settings.
+ # See https://github.com/rg3/youtube-dl/issues/7202.
+ qs['skip_wall'] = ['1']
+ return compat_urlparse.urlunparse(
+ parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+
+
+class CrunchyrollIE(CrunchyrollBaseIE):
+ _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
'info_dict': {
@@ -56,7 +106,7 @@ class CrunchyrollIE(InfoExtractor):
'id': '589804',
'ext': 'flv',
'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11',
- 'description': 'md5:fe2743efedb49d279552926d0bd0cd9e',
+ 'description': 'md5:2fbc01f90b87e8e9137296f37b461c12',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Danny Choo Network',
'upload_date': '20120213',
@@ -65,10 +115,13 @@ class CrunchyrollIE(InfoExtractor):
# rtmp
'skip_download': True,
},
-
}, {
'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697',
'only_matching': True,
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium available
+ 'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617',
+ 'only_matching': True,
}]
_FORMAT_IDS = {
@@ -78,24 +131,6 @@ class CrunchyrollIE(InfoExtractor):
'1080': ('80', '108'),
}
- def _login(self):
- (username, password) = self._get_login_info()
- if username is None:
- return
- self.report_login()
- login_url = 'https://www.crunchyroll.com/?a=formhandler'
- data = urlencode_postdata({
- 'formname': 'RpcApiUser_Login',
- 'name': username,
- 'password': password,
- })
- login_request = compat_urllib_request.Request(login_url, data)
- login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- self._download_webpage(login_request, None, False, 'Wrong login info')
-
- def _real_initialize(self):
- self._login()
-
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(base64.b64decode(data.encode('utf-8')))
iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8')))
@@ -201,7 +236,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
return output
def _extract_subtitles(self, subtitle):
- sub_root = xml.etree.ElementTree.fromstring(subtitle)
+ sub_root = compat_etree_fromstring(subtitle)
return [{
'ext': 'srt',
'data': self._convert_subtitles_to_srt(sub_root),
@@ -212,7 +247,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
def _get_subtitles(self, video_id, webpage):
subtitles = {}
- for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
+ for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage):
sub_page = self._download_webpage(
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
video_id, note='Downloading subtitles for ' + sub_name)
@@ -238,7 +273,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
else:
webpage_url = 'http://www.' + mobj.group('url')
- webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
+ webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage')
note_m = self._html_search_regex(
r'<div class="showmedia-trailer-notice">(.+?)</div>',
webpage, 'trailer-notice', default='')
@@ -254,18 +289,26 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
if 'To view this, please log in to verify you are 18 or older.' in webpage:
self.raise_login_required()
- video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
+ video_title = self._html_search_regex(
+ r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>',
+ webpage, 'video_title')
video_title = re.sub(r' {2,}', ' ', video_title)
- video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
- if not video_description:
- video_description = None
- video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
+ video_description = self._html_search_regex(
+ r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id,
+ webpage, 'description', default=None)
+ if video_description:
+ video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
+ video_upload_date = self._html_search_regex(
+ [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
+ webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
- video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
+ video_uploader = self._html_search_regex(
+ r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage,
+ 'video_uploader', fatal=False)
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
- playerdata_req = compat_urllib_request.Request(playerdata_url)
+ playerdata_req = sanitized_Request(playerdata_url)
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
@@ -277,7 +320,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt]
video_format = fmt + 'p'
- streamdata_req = compat_urllib_request.Request(
+ streamdata_req = sanitized_Request(
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
% (stream_id, stream_format, stream_quality),
compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
@@ -330,9 +373,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
}
-class CrunchyrollShowPlaylistIE(InfoExtractor):
+class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
IE_NAME = "crunchyroll:playlist"
- _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
+ _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
@@ -341,12 +384,25 @@ class CrunchyrollShowPlaylistIE(InfoExtractor):
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
},
'playlist_count': 13,
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium available
+ 'url': 'http://www.crunchyroll.com/cosplay-complex-ova',
+ 'info_dict': {
+ 'id': 'cosplay-complex-ova',
+ 'title': 'Cosplay Complex OVA'
+ },
+ 'playlist_count': 3,
+ 'skip': 'Georestricted',
+ }, {
+ # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
+ 'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
+ 'only_matching': True,
}]
def _real_extract(self, url):
show_id = self._match_id(url)
- webpage = self._download_webpage(url, show_id)
+ webpage = self._download_webpage(self._add_skip_wall(url), show_id)
title = self._html_search_regex(
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
webpage, 'title')
diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py
index fbefd37d0..7b685d157 100644
--- a/youtube_dl/extractor/cspan.py
+++ b/youtube_dl/extractor/cspan.py
@@ -9,6 +9,7 @@ from ..utils import (
find_xpath_attr,
smuggle_url,
determine_ext,
+ ExtractorError,
)
from .senateisvp import SenateISVPIE
@@ -18,33 +19,32 @@ class CSpanIE(InfoExtractor):
IE_DESC = 'C-SPAN'
_TESTS = [{
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
- 'md5': '8e44ce11f0f725527daccc453f553eb0',
+ 'md5': '94b29a4f131ff03d23471dd6f60b6a1d',
'info_dict': {
'id': '315139',
'ext': 'mp4',
'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
- 'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
+ 'description': 'Attorney General Eric Holder speaks to reporters following the Supreme Court decision in [Shelby County v. Holder], in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced.',
},
'skip': 'Regularly fails on travis, for unknown reasons',
}, {
'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models',
- # For whatever reason, the served video alternates between
- # two different ones
+ 'md5': '8e5fbfabe6ad0f89f3012a7943c1287b',
'info_dict': {
- 'id': '340723',
+ 'id': 'c4486943',
'ext': 'mp4',
- 'title': 'International Health Care Models',
+ 'title': 'CSPAN - International Health Care Models',
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
}
}, {
'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall',
- 'md5': '446562a736c6bf97118e389433ed88d4',
+ 'md5': '2ae5051559169baadba13fc35345ae74',
'info_dict': {
'id': '342759',
'ext': 'mp4',
'title': 'General Motors Ignition Switch Recall',
'duration': 14848,
- 'description': 'md5:70c7c3b8fa63fa60d42772440596034c'
+ 'description': 'md5:118081aedd24bf1d3b68b3803344e7f3'
},
}, {
# Video from senate.gov
@@ -57,67 +57,77 @@ class CSpanIE(InfoExtractor):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- page_id = mobj.group('id')
- webpage = self._download_webpage(url, page_id)
- video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id')
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ matches = re.search(r'data-(prog|clip)id=\'([0-9]+)\'', webpage)
+ if matches:
+ video_type, video_id = matches.groups()
+ if video_type == 'prog':
+ video_type = 'program'
+ else:
+ senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
+ if senate_isvp_url:
+ title = self._og_search_title(webpage)
+ surl = smuggle_url(senate_isvp_url, {'force_title': title})
+ return self.url_result(surl, 'SenateISVP', video_id, title)
- description = self._html_search_regex(
- [
- # The full description
- r'<div class=\'expandable\'>(.*?)<a href=\'#\'',
- # If the description is small enough the other div is not
- # present, otherwise this is a stripped version
- r'<p class=\'initial\'>(.*?)</p>'
- ],
- webpage, 'description', flags=re.DOTALL, default=None)
+ def get_text_attr(d, attr):
+ return d.get(attr, {}).get('#text')
- info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
- data = self._download_json(info_url, video_id)
+ data = self._download_json(
+ 'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id),
+ video_id)['video']
+ if data['@status'] != 'Success':
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, get_text_attr(data, 'error')), expected=True)
doc = self._download_xml(
- 'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
+ 'http://www.c-span.org/common/services/flashXml.php?%sid=%s' % (video_type, video_id),
video_id)
+ description = self._html_search_meta('description', webpage)
+
title = find_xpath_attr(doc, './/string', 'name', 'title').text
thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text
- senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
- if senate_isvp_url:
- surl = smuggle_url(senate_isvp_url, {'force_title': title})
- return self.url_result(surl, 'SenateISVP', video_id, title)
-
- files = data['video']['files']
- try:
- capfile = data['video']['capfile']['#text']
- except KeyError:
- capfile = None
+ files = data['files']
+ capfile = get_text_attr(data, 'capfile')
- entries = [{
- 'id': '%s_%d' % (video_id, partnum + 1),
- 'title': (
- title if len(files) == 1 else
- '%s part %d' % (title, partnum + 1)),
- 'url': unescapeHTML(f['path']['#text']),
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': int_or_none(f.get('length', {}).get('#text')),
- 'subtitles': {
- 'en': [{
- 'url': capfile,
- 'ext': determine_ext(capfile, 'dfxp')
- }],
- } if capfile else None,
- } for partnum, f in enumerate(files)]
+ entries = []
+ for partnum, f in enumerate(files):
+ formats = []
+ for quality in f['qualities']:
+ formats.append({
+ 'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')),
+ 'url': unescapeHTML(get_text_attr(quality, 'file')),
+ 'height': int_or_none(get_text_attr(quality, 'height')),
+ 'tbr': int_or_none(get_text_attr(quality, 'bitrate')),
+ })
+ self._sort_formats(formats)
+ entries.append({
+ 'id': '%s_%d' % (video_id, partnum + 1),
+ 'title': (
+ title if len(files) == 1 else
+ '%s part %d' % (title, partnum + 1)),
+ 'formats': formats,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': int_or_none(get_text_attr(f, 'length')),
+ 'subtitles': {
+ 'en': [{
+ 'url': capfile,
+ 'ext': determine_ext(capfile, 'dfxp')
+ }],
+ } if capfile else None,
+ })
if len(entries) == 1:
entry = dict(entries[0])
- entry['id'] = video_id
+ entry['id'] = 'c' + video_id if video_type == 'clip' else video_id
return entry
else:
return {
'_type': 'playlist',
'entries': entries,
'title': title,
- 'id': video_id,
+ 'id': 'c' + video_id if video_type == 'clip' else video_id,
}
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 2d90b2224..0c5b6617f 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -7,15 +7,13 @@ import itertools
from .common import InfoExtractor
-from ..compat import (
- compat_str,
- compat_urllib_request,
-)
from ..utils import (
- ExtractorError,
determine_ext,
+ error_to_compat_str,
+ ExtractorError,
int_or_none,
parse_iso8601,
+ sanitized_Request,
str_to_int,
unescapeHTML,
)
@@ -25,7 +23,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
@@ -96,6 +94,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'uploader': 'HotWaves1012',
'age_limit': 18,
}
+ },
+ # geo-restricted, player v5
+ {
+ 'url': 'http://www.dailymotion.com/video/xhza0o',
+ 'only_matching': True,
+ },
+ # with subtitles
+ {
+ 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news',
+ 'only_matching': True,
}
]
@@ -119,11 +127,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
webpage, 'comment count', fatal=False))
player_v5 = self._search_regex(
- r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
+ [r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
+ r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
+ r'buildPlayer\(({.+?})\);'],
webpage, 'player v5', default=None)
if player_v5:
player = self._parse_json(player_v5, video_id)
metadata = player['metadata']
+
+ self._check_error(metadata)
+
formats = []
for quality, media_list in metadata['qualities'].items():
for media in media_list:
@@ -133,9 +146,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
type_ = media.get('type')
if type_ == 'application/vnd.lumberjack.manifest':
continue
- if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- media_url, video_id, 'mp4', m3u8_id='hls'))
+ ext = determine_ext(media_url)
+ if type_ == 'application/x-mpegURL' or ext == 'm3u8':
+ m3u8_formats = self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ elif type_ == 'application/f4m' or ext == 'f4m':
+ f4m_formats = self._extract_f4m_formats(
+ media_url, video_id, preference=-1, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
else:
f = {
'url': media_url,
@@ -158,11 +179,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
uploader_id = metadata.get('owner', {}).get('id')
subtitles = {}
- for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
- subtitles[subtitle_lang] = [{
- 'ext': determine_ext(subtitle_url),
- 'url': subtitle_url,
- } for subtitle_url in subtitle.get('urls', [])]
+ subtitles_data = metadata.get('subtitles', {}).get('data', {})
+ if subtitles_data and isinstance(subtitles_data, dict):
+ for subtitle_lang, subtitle in subtitles_data.items():
+ subtitles[subtitle_lang] = [{
+ 'ext': determine_ext(subtitle_url),
+ 'url': subtitle_url,
+ } for subtitle_url in subtitle.get('urls', [])]
return {
'id': video_id,
@@ -201,9 +224,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'video info', flags=re.MULTILINE),
video_id)
- if info.get('error') is not None:
- msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
- raise ExtractorError(msg, expected=True)
+ self._check_error(info)
formats = []
for (key, format_id) in self._FORMATS:
@@ -246,13 +267,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'duration': info['duration']
}
+ def _check_error(self, info):
+ if info.get('error') is not None:
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True)
+
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
diff --git a/youtube_dl/extractor/dbtv.py b/youtube_dl/extractor/dbtv.py
index 212217625..133cdc50b 100644
--- a/youtube_dl/extractor/dbtv.py
+++ b/youtube_dl/extractor/dbtv.py
@@ -13,8 +13,8 @@ from ..utils import (
class DBTVIE(InfoExtractor):
- _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?dbtv\.no/(?:(?:lazyplayer|player)/)?(?P<id>[0-9]+)(?:#(?P<display_id>.+))?'
+ _TESTS = [{
'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
'info_dict': {
@@ -30,12 +30,18 @@ class DBTVIE(InfoExtractor):
'view_count': int,
'categories': list,
}
- }
+ }, {
+ 'url': 'http://dbtv.no/3649835190001',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.dbtv.no/lazyplayer/4631135248001',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- display_id = mobj.group('display_id')
+ display_id = mobj.group('display_id') or video_id
data = self._download_json(
'http://api.dbtv.no/discovery/%s' % video_id, display_id)
diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py
index 6f2fea5ff..9737cff14 100644
--- a/youtube_dl/extractor/dcn.py
+++ b/youtube_dl/extractor/dcn.py
@@ -2,13 +2,11 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
int_or_none,
parse_iso8601,
+ sanitized_Request,
)
@@ -36,7 +34,7 @@ class DCNIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
headers={'Origin': 'http://www.dcndigital.ae'})
diff --git a/youtube_dl/extractor/democracynow.py b/youtube_dl/extractor/democracynow.py
new file mode 100644
index 000000000..6cd395e11
--- /dev/null
+++ b/youtube_dl/extractor/democracynow.py
@@ -0,0 +1,88 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import os.path
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ url_basename,
+ remove_start,
+)
+
+
+class DemocracynowIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)'
+ IE_NAME = 'democracynow'
+ _TESTS = [{
+ 'url': 'http://www.democracynow.org/shows/2015/7/3',
+ 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
+ 'info_dict': {
+ 'id': '2015-0703-001',
+ 'ext': 'mp4',
+ 'title': 'July 03, 2015 - Democracy Now!',
+ 'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs',
+ },
+ }, {
+ 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
+ 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d',
+ 'info_dict': {
+ 'id': '2015-0703-001',
+ 'ext': 'mp4',
+ 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
+ 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
+ },
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ description = self._og_search_description(webpage)
+
+ json_data = self._parse_json(self._search_regex(
+ r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
+ display_id)
+ video_id = None
+ formats = []
+
+ default_lang = 'en'
+
+ subtitles = {}
+
+ def add_subtitle_item(lang, info_dict):
+ if lang not in subtitles:
+ subtitles[lang] = []
+ subtitles[lang].append(info_dict)
+
+ # chapter_file are not subtitles
+ if 'caption_file' in json_data:
+ add_subtitle_item(default_lang, {
+ 'url': compat_urlparse.urljoin(url, json_data['caption_file']),
+ })
+
+ for subtitle_item in json_data.get('captions', []):
+ lang = subtitle_item.get('language', '').lower() or default_lang
+ add_subtitle_item(lang, {
+ 'url': compat_urlparse.urljoin(url, subtitle_item['url']),
+ })
+
+ for key in ('file', 'audio', 'video'):
+ media_url = json_data.get(key, '')
+ if not media_url:
+ continue
+ media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
+ video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
+ formats.append({
+ 'url': media_url,
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id or display_id,
+ 'title': json_data['title'],
+ 'description': description,
+ 'subtitles': subtitles,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/divxstage.py b/youtube_dl/extractor/divxstage.py
deleted file mode 100644
index b88379e06..000000000
--- a/youtube_dl/extractor/divxstage.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class DivxStageIE(NovaMovIE):
- IE_NAME = 'divxstage'
- IE_DESC = 'DivxStage'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
-
- _HOST = 'www.divxstage.eu'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<div class="video_det">\s*<strong>([^<]+)</strong>'
- _DESCRIPTION_REGEX = r'<div class="video_det">\s*<strong>[^<]+</strong>\s*<p>([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.divxstage.eu/video/57f238e2e5e01',
- 'md5': '63969f6eb26533a1968c4d325be63e72',
- 'info_dict': {
- 'id': '57f238e2e5e01',
- 'ext': 'flv',
- 'title': 'youtubedl test video',
- 'description': 'This is a test video for youtubedl.',
- }
- }
diff --git a/youtube_dl/extractor/dplay.py b/youtube_dl/extractor/dplay.py
new file mode 100644
index 000000000..6cda56a7f
--- /dev/null
+++ b/youtube_dl/extractor/dplay.py
@@ -0,0 +1,51 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import time
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class DPlayIE(InfoExtractor):
+ _VALID_URL = r'http://www\.dplay\.se/[^/]+/(?P<id>[^/?#]+)'
+
+ _TEST = {
+ 'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
+ 'info_dict': {
+ 'id': '3172',
+ 'ext': 'mp4',
+ 'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
+ 'title': 'Svensken lär sig njuta av livet',
+ 'duration': 2650,
+ },
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(
+ r'data-video-id="(\d+)"', webpage, 'video id')
+
+ info = self._download_json(
+ 'http://www.dplay.se/api/v2/ajax/videos?video_id=' + video_id,
+ video_id)['data'][0]
+
+ self._set_cookie(
+ 'secure.dplay.se', 'dsc-geo',
+ '{"countryCode":"NL","expiry":%d}' % ((time.time() + 20 * 60) * 1000))
+ # TODO: consider adding support for 'stream_type=hds', it seems to
+ # require setting some cookies
+ manifest_url = self._download_json(
+ 'https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls' % video_id,
+ video_id, 'Getting manifest url for hls stream')['hls']
+ formats = self._extract_m3u8_formats(
+ manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native')
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': info['title'],
+ 'formats': formats,
+ 'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
+ }
diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py
index 38e6597c8..d836c1a6c 100644
--- a/youtube_dl/extractor/dramafever.py
+++ b/youtube_dl/extractor/dramafever.py
@@ -7,7 +7,6 @@ from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
@@ -16,6 +15,7 @@ from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
+ sanitized_Request,
)
@@ -51,7 +51,7 @@ class DramaFeverBaseIE(InfoExtractor):
'password': password,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py
index 1f00386fe..e5aadcd25 100644
--- a/youtube_dl/extractor/dumpert.py
+++ b/youtube_dl/extractor/dumpert.py
@@ -2,14 +2,17 @@
from __future__ import unicode_literals
import base64
+import re
from .common import InfoExtractor
-from ..compat import compat_urllib_request
-from ..utils import qualities
+from ..utils import (
+ qualities,
+ sanitized_Request,
+)
class DumpertIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
+ _VALID_URL = r'(?P<protocol>https?)://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
@@ -26,10 +29,12 @@ class DumpertIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ protocol = mobj.group('protocol')
- url = 'https://www.dumpert.nl/mediabase/' + video_id
- req = compat_urllib_request.Request(url)
+ url = '%s://www.dumpert.nl/mediabase/%s' % (protocol, video_id)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'nsfw=1; cpc=10')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/eagleplatform.py b/youtube_dl/extractor/eagleplatform.py
index a1ee51568..7bbf617d4 100644
--- a/youtube_dl/extractor/eagleplatform.py
+++ b/youtube_dl/extractor/eagleplatform.py
@@ -21,7 +21,7 @@ class EaglePlatformIE(InfoExtractor):
_TESTS = [{
# http://lenta.ru/news/2015/03/06/navalny/
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
- 'md5': '0b7994faa2bd5c0f69a3db6db28d078d',
+ 'md5': '70f5187fb620f2c1d503b3b22fd4efe3',
'info_dict': {
'id': '227304',
'ext': 'mp4',
@@ -36,7 +36,7 @@ class EaglePlatformIE(InfoExtractor):
# http://muz-tv.ru/play/7129/
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
'url': 'eagleplatform:media.clipyou.ru:12820',
- 'md5': '6c2ebeab03b739597ce8d86339d5a905',
+ 'md5': '90b26344ba442c8e44aa4cf8f301164a',
'info_dict': {
'id': '12820',
'ext': 'mp4',
@@ -48,7 +48,8 @@ class EaglePlatformIE(InfoExtractor):
'skip': 'Georestricted',
}]
- def _handle_error(self, response):
+ @staticmethod
+ def _handle_error(response):
status = int_or_none(response.get('status', 200))
if status != 200:
raise ExtractorError(' '.join(response['errors']), expected=True)
@@ -58,6 +59,9 @@ class EaglePlatformIE(InfoExtractor):
self._handle_error(response)
return response
+ def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'):
+ return self._download_json(url_or_request, video_id, note)['data'][0]
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
@@ -69,7 +73,7 @@ class EaglePlatformIE(InfoExtractor):
title = media['title']
description = media.get('description')
- thumbnail = media.get('snapshot')
+ thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:')
duration = int_or_none(media.get('duration'))
view_count = int_or_none(media.get('views'))
@@ -78,13 +82,20 @@ class EaglePlatformIE(InfoExtractor):
if age_restriction:
age_limit = 0 if age_restriction == 'allow_all' else 18
- m3u8_data = self._download_json(
- self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:'),
- video_id, 'Downloading m3u8 JSON')
+ secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:')
+ m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
- m3u8_data['data'][0], video_id,
- 'mp4', entry_protocol='m3u8_native')
+ m3u8_url, video_id,
+ 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
+
+ mp4_url = self._get_video_url(
+ # Secure mp4 URL is constructed according to Player.prototype.mp4 from
+ # http://lentaru.media.eagleplatform.com/player/player.js
+ re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4', secure_m3u8),
+ video_id, 'Downloading mp4 JSON')
+ formats.append({'url': mp4_url, 'format_id': 'mp4'})
+
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/eitb.py b/youtube_dl/extractor/eitb.py
index 2cba82532..c83845fc2 100644
--- a/youtube_dl/extractor/eitb.py
+++ b/youtube_dl/extractor/eitb.py
@@ -1,39 +1,92 @@
# encoding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from .brightcove import BrightcoveIE
-from ..utils import ExtractorError
+from ..utils import (
+ float_or_none,
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
+)
class EitbIE(InfoExtractor):
IE_NAME = 'eitb.tv'
- _VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
+ _VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)'
_TEST = {
- 'add_ie': ['Brightcove'],
- 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
+ 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/',
'md5': 'edf4436247185adee3ea18ce64c47998',
'info_dict': {
- 'id': '2743577154001',
+ 'id': '4090227752001',
'ext': 'mp4',
'title': '60 minutos (Lasa y Zabala, 30 años)',
- # All videos from eitb has this description in the brightcove info
- 'description': '.',
- 'uploader': 'Euskal Telebista',
+ 'description': 'Programa de reportajes de actualidad.',
+ 'duration': 3996.76,
+ 'timestamp': 1381789200,
+ 'upload_date': '20131014',
+ 'tags': list,
},
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- chapter_id = mobj.group('chapter_id')
- webpage = self._download_webpage(url, chapter_id)
- bc_url = BrightcoveIE._extract_brightcove_url(webpage)
- if bc_url is None:
- raise ExtractorError('Could not extract the Brightcove url')
- # The BrightcoveExperience object doesn't contain the video id, we set
- # it manually
- bc_url += '&%40videoPlayer={0}'.format(chapter_id)
- return self.url_result(bc_url, BrightcoveIE.ie_key())
+ video_id = self._match_id(url)
+
+ video = self._download_json(
+ 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id,
+ video_id, 'Downloading video JSON')
+
+ media = video['web_media'][0]
+
+ formats = []
+ for rendition in media['RENDITIONS']:
+ video_url = rendition.get('PMD_URL')
+ if not video_url:
+ continue
+ tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000)
+ format_id = 'http'
+ if tbr:
+ format_id += '-%d' % int(tbr)
+ formats.append({
+ 'url': rendition['PMD_URL'],
+ 'format_id': format_id,
+ 'width': int_or_none(rendition.get('FRAME_WIDTH')),
+ 'height': int_or_none(rendition.get('FRAME_HEIGHT')),
+ 'tbr': tbr,
+ })
+
+ hls_url = media.get('HLS_SURL')
+ if hls_url:
+ request = sanitized_Request(
+ 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
+ headers={'Referer': url})
+ token_data = self._download_json(
+ request, video_id, 'Downloading auth token', fatal=False)
+ if token_data:
+ token = token_data.get('token')
+ if token:
+ m3u8_formats = self._extract_m3u8_formats(
+ '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+
+ hds_url = media.get('HDS_SURL')
+ if hds_url:
+ f4m_formats = self._extract_f4m_formats(
+ '%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'),
+ video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'],
+ 'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'),
+ 'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'),
+ 'duration': float_or_none(media.get('LENGTH'), 1000),
+ 'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '),
+ 'tags': media.get('TAGS'),
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/engadget.py b/youtube_dl/extractor/engadget.py
index 4ea37ebd9..e4180701d 100644
--- a/youtube_dl/extractor/engadget.py
+++ b/youtube_dl/extractor/engadget.py
@@ -10,7 +10,7 @@ from ..utils import (
class EngadgetIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://www.engadget.com/
- (?:video/5min/(?P<id>\d+)|
+ (?:video(?:/5min)?/(?P<id>\d+)|
[\d/]+/.*?)
'''
diff --git a/youtube_dl/extractor/escapist.py b/youtube_dl/extractor/escapist.py
index c85b4c458..a3d7bbbcb 100644
--- a/youtube_dl/extractor/escapist.py
+++ b/youtube_dl/extractor/escapist.py
@@ -3,13 +3,12 @@ from __future__ import unicode_literals
import json
from .common import InfoExtractor
-from ..compat import compat_urllib_request
-
from ..utils import (
determine_ext,
clean_html,
int_or_none,
float_or_none,
+ sanitized_Request,
)
@@ -75,7 +74,7 @@ class EscapistIE(InfoExtractor):
video_id = ims_video['videoID']
key = ims_video['hash']
- config_req = compat_urllib_request.Request(
+ config_req = sanitized_Request(
'http://www.escapistmagazine.com/videos/'
'vidconfig.php?videoID=%s&hash=%s' % (video_id, key))
config_req.add_header('Referer', url)
diff --git a/youtube_dl/extractor/europa.py b/youtube_dl/extractor/europa.py
new file mode 100644
index 000000000..adc43919e
--- /dev/null
+++ b/youtube_dl/extractor/europa.py
@@ -0,0 +1,93 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ int_or_none,
+ orderedSet,
+ parse_duration,
+ qualities,
+ unified_strdate,
+ xpath_text
+)
+
+
+class EuropaIE(InfoExtractor):
+ _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
+ _TESTS = [{
+ 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
+ 'md5': '574f080699ddd1e19a675b0ddf010371',
+ 'info_dict': {
+ 'id': 'I107758',
+ 'ext': 'mp4',
+ 'title': 'TRADE - Wikileaks on TTIP',
+ 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'upload_date': '20150811',
+ 'duration': 34,
+ 'view_count': int,
+ 'formats': 'mincount:3',
+ }
+ }, {
+ 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ playlist = self._download_xml(
+ 'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
+
+ def get_item(type_, preference):
+ items = {}
+ for item in playlist.findall('./info/%s/item' % type_):
+ lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
+ if lang and label:
+ items[lang] = label.strip()
+ for p in preference:
+ if items.get(p):
+ return items[p]
+
+ query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ preferred_lang = query.get('sitelang', ('en', ))[0]
+
+ preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
+
+ title = get_item('title', preferred_langs) or video_id
+ description = get_item('description', preferred_langs)
+ thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
+ upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
+ duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
+ view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
+
+ language_preference = qualities(preferred_langs[::-1])
+
+ formats = []
+ for file_ in playlist.findall('./files/file'):
+ video_url = xpath_text(file_, './url')
+ if not video_url:
+ continue
+ lang = xpath_text(file_, './lg')
+ formats.append({
+ 'url': video_url,
+ 'format_id': lang,
+ 'format_note': xpath_text(file_, './lglabel'),
+ 'language_preference': language_preference(lang)
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnmail,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py
index d872d828f..493d38af8 100644
--- a/youtube_dl/extractor/everyonesmixtape.py
+++ b/youtube_dl/extractor/everyonesmixtape.py
@@ -3,11 +3,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -42,7 +40,7 @@ class EveryonesMixtapeIE(InfoExtractor):
playlist_id = mobj.group('id')
pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id
- pllist_req = compat_urllib_request.Request(pllist_url)
+ pllist_req = sanitized_Request(pllist_url)
pllist_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist_list = self._download_json(
@@ -55,7 +53,7 @@ class EveryonesMixtapeIE(InfoExtractor):
raise ExtractorError('Playlist id not found')
pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no
- pl_req = compat_urllib_request.Request(pl_url)
+ pl_req = sanitized_Request(pl_url)
pl_req.add_header('X-Requested-With', 'XMLHttpRequest')
playlist = self._download_json(
pl_req, playlist_id, note='Downloading playlist info')
diff --git a/youtube_dl/extractor/expotv.py b/youtube_dl/extractor/expotv.py
index a38b773e8..1585a03bb 100644
--- a/youtube_dl/extractor/expotv.py
+++ b/youtube_dl/extractor/expotv.py
@@ -33,20 +33,27 @@ class ExpoTVIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
player_key = self._search_regex(
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
- config_url = 'http://client.expotv.com/video/config/%s/%s' % (
- video_id, player_key)
config = self._download_json(
- config_url, video_id,
- note='Downloading video configuration')
+ 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key),
+ video_id, 'Downloading video configuration')
- formats = [{
- 'url': fcfg['file'],
- 'height': int_or_none(fcfg.get('height')),
- 'format_note': fcfg.get('label'),
- 'ext': self._search_regex(
- r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
- 'file extension', default=None),
- } for fcfg in config['sources']]
+ formats = []
+ for fcfg in config['sources']:
+ media_url = fcfg.get('file')
+ if not media_url:
+ continue
+ if fcfg.get('type') == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
+ else:
+ formats.append({
+ 'url': media_url,
+ 'height': int_or_none(fcfg.get('height')),
+ 'format_id': fcfg.get('label'),
+ 'ext': self._search_regex(
+ r'filename=.*\.([a-z0-9_A-Z]+)&', media_url,
+ 'file extension', default=None) or fcfg.get('type'),
+ })
self._sort_formats(formats)
title = self._og_search_title(webpage)
diff --git a/youtube_dl/extractor/extremetube.py b/youtube_dl/extractor/extremetube.py
index c826a5404..3403581fd 100644
--- a/youtube_dl/extractor/extremetube.py
+++ b/youtube_dl/extractor/extremetube.py
@@ -3,23 +3,20 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_parse_qs,
- compat_urllib_request,
-)
from ..utils import (
- qualities,
+ int_or_none,
+ sanitized_Request,
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
+ _VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)'
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
'info_dict': {
- 'id': '652431',
+ 'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
@@ -29,14 +26,18 @@ class ExtremeTubeIE(InfoExtractor):
}, {
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
'only_matching': True,
+ }, {
+ 'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.extremetube.com/video/652431',
+ 'only_matching': True,
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- url = 'http://www.' + mobj.group('url')
+ video_id = self._match_id(url)
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
@@ -49,20 +50,36 @@ class ExtremeTubeIE(InfoExtractor):
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
- flash_vars = compat_parse_qs(self._search_regex(
- r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars'))
+ flash_vars = self._parse_json(
+ self._search_regex(
+ r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'),
+ video_id)
formats = []
- quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p'])
- for k, vals in flash_vars.items():
- m = re.match(r'quality_(?P<quality>[0-9]+p)$', k)
- if m is not None:
- formats.append({
- 'format_id': m.group('quality'),
- 'quality': quality(m.group('quality')),
- 'url': vals[0],
+ for quality_key, video_url in flash_vars.items():
+ height = int_or_none(self._search_regex(
+ r'quality_(\d+)[pP]$', quality_key, 'height', default=None))
+ if not height:
+ continue
+ f = {
+ 'url': video_url,
+ }
+ mobj = re.search(
+ r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
+ if mobj:
+ height = int(mobj.group('height'))
+ bitrate = int(mobj.group('bitrate'))
+ f.update({
+ 'format_id': '%dp-%dk' % (height, bitrate),
+ 'height': height,
+ 'tbr': bitrate,
})
-
+ else:
+ f.update({
+ 'format_id': '%dp' % height,
+ 'height': height,
+ })
+ formats.append(f)
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index 178a7ca4c..39c481068 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -7,15 +7,14 @@ import socket
from .common import InfoExtractor
from ..compat import (
compat_http_client,
- compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
- compat_urllib_request,
)
from ..utils import (
+ error_to_compat_str,
ExtractorError,
- int_or_none,
limit_length,
+ sanitized_Request,
urlencode_postdata,
get_element_by_id,
clean_html,
@@ -74,7 +73,7 @@ class FacebookIE(InfoExtractor):
if useremail is None:
return
- login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
+ login_page_req = sanitized_Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
@@ -95,7 +94,7 @@ class FacebookIE(InfoExtractor):
'timezone': '-60',
'trynum': '1',
}
- request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
+ request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
@@ -110,14 +109,14 @@ class FacebookIE(InfoExtractor):
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
- check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
+ check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_warning('unable to log in: %s' % compat_str(err))
+ self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
@@ -142,16 +141,20 @@ class FacebookIE(InfoExtractor):
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse_unquote(data['params'])
params = json.loads(params_raw)
- video_data = params['video_data'][0]
formats = []
- for quality in ['sd', 'hd']:
- src = video_data.get('%s_src' % quality)
- if src is not None:
- formats.append({
- 'format_id': quality,
- 'url': src,
- })
+ for format_id, f in params['video_data'].items():
+ if not f or not isinstance(f, list):
+ continue
+ for quality in ('sd', 'hd'):
+ for src_type in ('src', 'src_no_ratelimit'):
+ src = f[0].get('%s_%s' % (quality, src_type))
+ if src:
+ formats.append({
+ 'format_id': '%s_%s_%s' % (format_id, quality, src_type),
+ 'url': src,
+ 'preference': -10 if format_id == 'progressive' else 0,
+ })
if not formats:
raise ExtractorError('Cannot find video formats')
@@ -161,7 +164,7 @@ class FacebookIE(InfoExtractor):
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
- webpage, 'alternative title', fatal=False)
+ webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
@@ -171,7 +174,5 @@ class FacebookIE(InfoExtractor):
'id': video_id,
'title': video_title,
'formats': formats,
- 'duration': int_or_none(video_data.get('video_duration')),
- 'thumbnail': video_data.get('thumbnail_src'),
'uploader': uploader,
}
diff --git a/youtube_dl/extractor/faz.py b/youtube_dl/extractor/faz.py
index cebdd0193..d9a868119 100644
--- a/youtube_dl/extractor/faz.py
+++ b/youtube_dl/extractor/faz.py
@@ -38,7 +38,7 @@ class FazIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
config_xml_url = self._search_regex(
- r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
+ r'(?:var\s+)?videoXMLURL\s*=\s*"([^"]+)', webpage, 'config xml url')
config = self._download_xml(
config_xml_url, video_id, 'Downloading config xml')
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index a406945e8..4c81271d3 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -12,6 +12,7 @@ from ..compat import (
from ..utils import (
encode_dict,
ExtractorError,
+ sanitized_Request,
)
@@ -36,8 +37,8 @@ class FC2IE(InfoExtractor):
'params': {
'username': 'ytdl@yt-dl.org',
'password': '(snip)',
- 'skip': 'requires actual password'
- }
+ },
+ 'skip': 'requires actual password',
}, {
'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
'only_matching': True,
@@ -57,7 +58,7 @@ class FC2IE(InfoExtractor):
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
@@ -66,7 +67,7 @@ class FC2IE(InfoExtractor):
return False
# this is also needed
- login_redir = compat_urllib_request.Request('http://id.fc2.com/?mode=redirect&login=done')
+ login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done')
self._download_webpage(
login_redir, None, note='Login redirect', errnote='Login redirect failed')
diff --git a/youtube_dl/extractor/fczenit.py b/youtube_dl/extractor/fczenit.py
new file mode 100644
index 000000000..f1f150ef2
--- /dev/null
+++ b/youtube_dl/extractor/fczenit.py
@@ -0,0 +1,41 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class FczenitIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/gl(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://fc-zenit.ru/video/gl6785/',
+ 'md5': '458bacc24549173fe5a5aa29174a5606',
+ 'info_dict': {
+ 'id': '6785',
+ 'ext': 'mp4',
+ 'title': '«Зенит-ТВ»: как Олег Шатов играл против «Урала»',
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ video_title = self._html_search_regex(r'<div class=\"photoalbum__title\">([^<]+)', webpage, 'title')
+
+ bitrates_raw = self._html_search_regex(r'bitrates:.*\n(.*)\]', webpage, 'video URL')
+ bitrates = re.findall(r'url:.?\'(.+?)\'.*?bitrate:.?([0-9]{3}?)', bitrates_raw)
+
+ formats = [{
+ 'url': furl,
+ 'tbr': tbr,
+ } for furl, tbr in bitrates]
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': video_title,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py
index 157094e8c..2955965d9 100644
--- a/youtube_dl/extractor/fivemin.py
+++ b/youtube_dl/extractor/fivemin.py
@@ -2,11 +2,15 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_str,
compat_urllib_parse,
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
+ parse_duration,
+ replace_extension,
)
@@ -28,6 +32,7 @@ class FiveMinIE(InfoExtractor):
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
+ 'duration': 177,
},
},
{
@@ -38,9 +43,52 @@ class FiveMinIE(InfoExtractor):
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
+ 'duration': 184,
},
},
]
+ _ERRORS = {
+ 'ErrorVideoNotExist': 'We\'re sorry, but the video you are trying to watch does not exist.',
+ 'ErrorVideoNoLongerAvailable': 'We\'re sorry, but the video you are trying to watch is no longer available.',
+ 'ErrorVideoRejected': 'We\'re sorry, but the video you are trying to watch has been removed.',
+ 'ErrorVideoUserNotGeo': 'We\'re sorry, but the video you are trying to watch cannot be viewed from your current location.',
+ 'ErrorVideoLibraryRestriction': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
+ 'ErrorExposurePermission': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.',
+ }
+ _QUALITIES = {
+ 1: {
+ 'width': 640,
+ 'height': 360,
+ },
+ 2: {
+ 'width': 854,
+ 'height': 480,
+ },
+ 4: {
+ 'width': 1280,
+ 'height': 720,
+ },
+ 8: {
+ 'width': 1920,
+ 'height': 1080,
+ },
+ 16: {
+ 'width': 640,
+ 'height': 360,
+ },
+ 32: {
+ 'width': 854,
+ 'height': 480,
+ },
+ 64: {
+ 'width': 1280,
+ 'height': 720,
+ },
+ 128: {
+ 'width': 640,
+ 'height': 360,
+ },
+ }
def _real_extract(self, url):
video_id = self._match_id(url)
@@ -59,26 +107,36 @@ class FiveMinIE(InfoExtractor):
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
video_id)
if not response['success']:
- err_msg = response['errorMessage']
- if err_msg == 'ErrorVideoUserNotGeo':
- msg = 'Video not available from your location'
- else:
- msg = 'Aol said: %s' % err_msg
- raise ExtractorError(msg, expected=True, video_id=video_id)
+ raise ExtractorError(
+ '%s said: %s' % (
+ self.IE_NAME,
+ self._ERRORS.get(response['errorMessage'], response['errorMessage'])),
+ expected=True)
info = response['binding'][0]
- second_id = compat_str(int(video_id[:-2]) + 1)
formats = []
- for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]:
- if any(r['ID'] == quality for r in info['Renditions']):
+ parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs(
+ compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0])
+ for rendition in info['Renditions']:
+ if rendition['RenditionType'] == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(rendition['Url'], video_id, m3u8_id='hls'))
+ elif rendition['RenditionType'] == 'aac':
+ continue
+ else:
+ rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType'])))
+ quality = self._QUALITIES.get(rendition['ID'], {})
formats.append({
- 'format_id': compat_str(quality),
- 'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality),
- 'height': height,
+ 'format_id': '%s-%d' % (rendition['RenditionType'], rendition['ID']),
+ 'url': rendition_url,
+ 'width': quality.get('width'),
+ 'height': quality.get('height'),
})
+ self._sort_formats(formats)
return {
'id': video_id,
'title': info['Title'],
+ 'thumbnail': info.get('ThumbURL'),
+ 'duration': parse_duration(info.get('Duration')),
'formats': formats,
}
diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py
index 190d9f9ad..5f6e65dae 100644
--- a/youtube_dl/extractor/fktv.py
+++ b/youtube_dl/extractor/fktv.py
@@ -1,13 +1,10 @@
from __future__ import unicode_literals
-import re
-import random
-import json
-
from .common import InfoExtractor
from ..utils import (
- get_element_by_id,
clean_html,
+ determine_ext,
+ js_to_json,
)
@@ -17,66 +14,38 @@ class FKTVIE(InfoExtractor):
_TEST = {
'url': 'http://fernsehkritik.tv/folge-1',
+ 'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79',
'info_dict': {
- 'id': '00011',
- 'ext': 'flv',
+ 'id': '1',
+ 'ext': 'mp4',
'title': 'Folge 1 vom 10. April 2007',
- 'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
+ 'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
- episode = int(self._match_id(url))
-
- video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%s.jpg' % episode
- start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%s/Start' % episode,
- episode)
- playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
- 'playlist', flags=re.DOTALL)
- files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
-
- videos = []
- for i, _ in enumerate(files, 1):
- video_id = '%04d%d' % (episode, i)
- video_url = 'http://fernsehkritik.tv/js/directme.php?file=%s%s.flv' % (episode, '' if i == 1 else '-%d' % i)
- videos.append({
- 'ext': 'flv',
- 'id': video_id,
- 'url': video_url,
- 'title': clean_html(get_element_by_id('eptitle', start_webpage)),
- 'description': clean_html(get_element_by_id('contentlist', start_webpage)),
- 'thumbnail': video_thumbnail
- })
- return {
- '_type': 'multi_video',
- 'entries': videos,
- 'id': 'folge-%s' % episode,
- }
-
-
-class FKTVPosteckeIE(InfoExtractor):
- IE_NAME = 'fernsehkritik.tv:postecke'
- _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
- _TEST = {
- 'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
- 'md5': '262f0adbac80317412f7e57b4808e5c4',
- 'info_dict': {
- 'id': '0120',
- 'ext': 'flv',
- 'title': 'Postecke 120',
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- episode = int(mobj.group('ep'))
+ episode = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://fernsehkritik.tv/folge-%s/play' % episode, episode)
+ title = clean_html(self._html_search_regex(
+ '<h3>([^<]+)</h3>', webpage, 'title'))
+ thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False)
+ sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json)
+
+ formats = []
+ for source in sources:
+ furl = source.get('src')
+ if furl:
+ formats.append({
+ 'url': furl,
+ 'format_id': determine_ext(furl),
+ })
+ self._sort_formats(formats)
- server = random.randint(2, 4)
- video_id = '%04d' % episode
- video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
- video_title = 'Postecke %d' % episode
return {
- 'id': video_id,
- 'url': video_url,
- 'title': video_title,
+ 'id': episode,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': thumbnail,
}
diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py
index 2fe76d661..91cd46e76 100644
--- a/youtube_dl/extractor/flickr.py
+++ b/youtube_dl/extractor/flickr.py
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_request
from ..utils import (
ExtractorError,
find_xpath_attr,
+ sanitized_Request,
)
@@ -30,7 +30,7 @@ class FlickrIE(InfoExtractor):
video_id = mobj.group('id')
video_uploader_id = mobj.group('uploader_id')
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
- req = compat_urllib_request.Request(webpage_url)
+ req = sanitized_Request(webpage_url)
req.add_header(
'User-Agent',
# it needs a more recent version
diff --git a/youtube_dl/extractor/footyroom.py b/youtube_dl/extractor/footyroom.py
index 4c7dbca40..370fd006f 100644
--- a/youtube_dl/extractor/footyroom.py
+++ b/youtube_dl/extractor/footyroom.py
@@ -13,6 +13,7 @@ class FootyRoomIE(InfoExtractor):
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
+ 'skip': 'Video for this match is not available',
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py
index 3bb4f6239..fc4a5a0fb 100644
--- a/youtube_dl/extractor/fourtube.py
+++ b/youtube_dl/extractor/fourtube.py
@@ -3,12 +3,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
parse_duration,
parse_iso8601,
+ sanitized_Request,
str_to_int,
)
@@ -46,10 +44,10 @@ class FourTubeIE(InfoExtractor):
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
uploader_id = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
- webpage, 'uploader id')
+ webpage, 'uploader id', fatal=False)
uploader = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
- webpage, 'uploader')
+ webpage, 'uploader', fatal=False)
categories_html = self._search_regex(
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
@@ -68,13 +66,24 @@ class FourTubeIE(InfoExtractor):
webpage, 'like count', fatal=False))
duration = parse_duration(self._html_search_meta('duration', webpage))
- params_js = self._search_regex(
- r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
- webpage, 'initialization parameters'
- )
- params = self._parse_json('[%s]' % params_js, video_id)
- media_id = params[0]
- sources = ['%s' % p for p in params[2]]
+ media_id = self._search_regex(
+ r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage,
+ 'media id', default=None, group='id')
+ sources = [
+ quality
+ for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)]
+ if not (media_id and sources):
+ player_js = self._download_webpage(
+ self._search_regex(
+ r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2',
+ webpage, 'player JS', group='url'),
+ video_id, 'Downloading player JS')
+ params_js = self._search_regex(
+ r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
+ player_js, 'initialization parameters')
+ params = self._parse_json('[%s]' % params_js, video_id)
+ media_id = params[0]
+ sources = ['%s' % p for p in params[2]]
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
media_id, '+'.join(sources))
@@ -82,7 +91,7 @@ class FourTubeIE(InfoExtractor):
b'Content-Type': b'application/x-www-form-urlencoded',
b'Origin': b'http://www.4tube.com',
}
- token_req = compat_urllib_request.Request(token_url, b'{}', headers)
+ token_req = sanitized_Request(token_url, b'{}', headers)
tokens = self._download_json(token_req, video_id)
formats = [{
'url': tokens[format]['token'],
diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index 129984a5f..8e60cf60f 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -83,6 +83,14 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
if subtitle:
title += ' - %s' % subtitle
+ subtitles = {}
+ subtitles_list = [{
+ 'url': subformat['url'],
+ 'ext': subformat.get('format'),
+ } for subformat in info.get('subtitles', []) if subformat.get('url')]
+ if subtitles_list:
+ subtitles['fr'] = subtitles_list
+
return {
'id': video_id,
'title': title,
@@ -91,20 +99,27 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
+ 'subtitles': subtitles,
}
class PluzzIE(FranceTVBaseInfoExtractor):
IE_NAME = 'pluzz.francetv.fr'
- _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
+ _VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html'
# Can't use tests, videos expire in 7 days
def _real_extract(self, url):
- title = re.match(self._VALID_URL, url).group(1)
- webpage = self._download_webpage(url, title)
- video_id = self._search_regex(
- r'data-diffusion="(\d+)"', webpage, 'ID')
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ video_id = self._html_search_meta(
+ 'id_video', webpage, 'video id', default=None)
+ if not video_id:
+ video_id = self._search_regex(
+ r'data-diffusion=["\'](\d+)', webpage, 'video id')
+
return self._extract_video(video_id, 'Pluzz')
@@ -120,6 +135,9 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
'title': 'Soir 3',
'upload_date': '20130826',
'timestamp': 1377548400,
+ 'subtitles': {
+ 'fr': 'mincount:2',
+ },
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
diff --git a/youtube_dl/extractor/funimation.py b/youtube_dl/extractor/funimation.py
new file mode 100644
index 000000000..d1a95d87f
--- /dev/null
+++ b/youtube_dl/extractor/funimation.py
@@ -0,0 +1,193 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ clean_html,
+ determine_ext,
+ encode_dict,
+ int_or_none,
+ sanitized_Request,
+ ExtractorError,
+ urlencode_postdata
+)
+
+
+class FunimationIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)'
+
+ _NETRC_MACHINE = 'funimation'
+
+ _TESTS = [{
+ 'url': 'http://www.funimation.com/shows/air/videos/official/breeze',
+ 'info_dict': {
+ 'id': '658',
+ 'display_id': 'breeze',
+ 'ext': 'mp4',
+ 'title': 'Air - 1 - Breeze',
+ 'description': 'md5:1769f43cd5fc130ace8fd87232207892',
+ 'thumbnail': 're:https?://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play',
+ 'info_dict': {
+ 'id': '31128',
+ 'display_id': 'role-play',
+ 'ext': 'mp4',
+ 'title': '.hack//SIGN - 1 - Role Play',
+ 'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd',
+ 'thumbnail': 're:https?://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview',
+ 'info_dict': {
+ 'id': '9635',
+ 'display_id': 'broadcast-dub-preview',
+ 'ext': 'mp4',
+ 'title': 'Attack on Titan: Junior High - Broadcast Dub Preview',
+ 'description': 'md5:f8ec49c0aff702a7832cd81b8a44f803',
+ 'thumbnail': 're:https?://.*\.(?:jpg|png)',
+ },
+ }]
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ return
+ data = urlencode_postdata(encode_dict({
+ 'email_field': username,
+ 'password_field': password,
+ }))
+ login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
+ 'Content-Type': 'application/x-www-form-urlencoded'
+ })
+ login_page = self._download_webpage(
+ login_request, None, 'Logging in as %s' % username)
+ if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')):
+ return
+ error = self._html_search_regex(
+ r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>',
+ login_page, 'error messages', default=None)
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+ raise ExtractorError('Unable to log in')
+
+ def _real_initialize(self):
+ self._login()
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ errors = []
+ formats = []
+
+ ERRORS_MAP = {
+ 'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn',
+ 'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut',
+ 'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut',
+ 'ERROR_VIDEO_EXPIRED': 'videoExpired',
+ 'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable',
+ 'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription',
+ 'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription',
+ 'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding',
+ 'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN',
+ 'ERROR_STREAM_NOT_FOUND': 'streamNotFound',
+ }
+
+ USER_AGENTS = (
+ # PC UA is served with m3u8 that provides some bonus lower quality formats
+ ('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'),
+ # Mobile UA allows to extract direct links and also does not fail when
+ # PC UA fails with hulu error (e.g.
+ # http://www.funimation.com/shows/hacksign/videos/official/role-play)
+ ('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'),
+ )
+
+ for kind, user_agent in USER_AGENTS:
+ request = sanitized_Request(url)
+ request.add_header('User-Agent', user_agent)
+ webpage = self._download_webpage(
+ request, display_id, 'Downloading %s webpage' % kind)
+
+ playlist = self._parse_json(
+ self._search_regex(
+ r'var\s+playersData\s*=\s*(\[.+?\]);\n',
+ webpage, 'players data'),
+ display_id)[0]['playlist']
+
+ items = next(item['items'] for item in playlist if item.get('items'))
+ item = next(item for item in items if item.get('itemAK') == display_id)
+
+ error_messages = {}
+ video_error_messages = self._search_regex(
+ r'var\s+videoErrorMessages\s*=\s*({.+?});\n',
+ webpage, 'error messages', default=None)
+ if video_error_messages:
+ error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False)
+ if error_messages_json:
+ for _, error in error_messages_json.items():
+ type_ = error.get('type')
+ description = error.get('description')
+ content = error.get('content')
+ if type_ == 'text' and description and content:
+ error_message = ERRORS_MAP.get(description)
+ if error_message:
+ error_messages[error_message] = content
+
+ for video in item.get('videoSet', []):
+ auth_token = video.get('authToken')
+ if not auth_token:
+ continue
+ funimation_id = video.get('FUNImationID') or video.get('videoId')
+ preference = 1 if video.get('languageMode') == 'dub' else 0
+ if not auth_token.startswith('?'):
+ auth_token = '?%s' % auth_token
+ for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)):
+ format_url = video.get('%sUrl' % quality)
+ if not format_url:
+ continue
+ if not format_url.startswith(('http', '//')):
+ errors.append(format_url)
+ continue
+ if determine_ext(format_url) == 'm3u8':
+ m3u8_formats = self._extract_m3u8_formats(
+ format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native',
+ preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ else:
+ tbr = int_or_none(self._search_regex(
+ r'-(\d+)[Kk]', format_url, 'tbr', default=None))
+ formats.append({
+ 'url': format_url + auth_token,
+ 'format_id': '%s-http-%dp' % (funimation_id, height),
+ 'height': height,
+ 'tbr': tbr,
+ 'preference': preference,
+ })
+
+ if not formats and errors:
+ raise ExtractorError(
+ '%s returned error: %s'
+ % (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))),
+ expected=True)
+
+ self._sort_formats(formats)
+
+ title = item['title']
+ artist = item.get('artist')
+ if artist:
+ title = '%s - %s' % (artist, title)
+ description = self._og_search_description(webpage) or item.get('description')
+ thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl')
+ video_id = item.get('itemId') or display_id
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py
index f5f13689c..7f21d7410 100644
--- a/youtube_dl/extractor/funnyordie.py
+++ b/youtube_dl/extractor/funnyordie.py
@@ -45,11 +45,20 @@ class FunnyOrDieIE(InfoExtractor):
links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0)
- bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates')
- bitrates = [int(b) for b in bitrates.rstrip(',').split(',')]
- bitrates.sort()
+ m3u8_url = self._search_regex(
+ r'<source[^>]+src=(["\'])(?P<url>.+?/master\.m3u8)\1',
+ webpage, 'm3u8 url', default=None, group='url')
formats = []
+
+ m3u8_formats = self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+
+ bitrates = [int(bitrate) for bitrate in re.findall(r'[,/]v(\d+)[,/]', m3u8_url)]
+ bitrates.sort()
+
for bitrate in bitrates:
for link in links:
formats.append({
diff --git a/youtube_dl/extractor/gameinformer.py b/youtube_dl/extractor/gameinformer.py
new file mode 100644
index 000000000..25870c131
--- /dev/null
+++ b/youtube_dl/extractor/gameinformer.py
@@ -0,0 +1,43 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import int_or_none
+
+
+class GameInformerIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx'
+ _TEST = {
+ 'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
+ 'info_dict': {
+ 'id': '4515472681001',
+ 'ext': 'm3u8',
+ 'title': 'Replay - Animal Crossing',
+ 'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
+ 'timestamp': 1443457610706,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+
+ bc_api_url = self._search_regex(r"getVideo\('([^']+)'", webpage, 'brightcove api url')
+ json_data = self._download_json(
+ bc_api_url + '&video_fields=id,name,shortDescription,publishedDate,videoStillURL,length,IOSRenditions',
+ display_id)
+
+ return {
+ 'id': compat_str(json_data['id']),
+ 'display_id': display_id,
+ 'url': json_data['IOSRenditions'][0]['url'],
+ 'title': json_data['name'],
+ 'description': json_data.get('shortDescription'),
+ 'timestamp': int_or_none(json_data.get('publishedDate')),
+ 'duration': int_or_none(json_data.get('length')),
+ }
diff --git a/youtube_dl/extractor/gametrailers.py b/youtube_dl/extractor/gametrailers.py
index a6ab795ae..c3f031d9c 100644
--- a/youtube_dl/extractor/gametrailers.py
+++ b/youtube_dl/extractor/gametrailers.py
@@ -1,19 +1,62 @@
from __future__ import unicode_literals
-from .mtv import MTVServicesInfoExtractor
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+ url_basename,
+)
-class GametrailersIE(MTVServicesInfoExtractor):
- _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
+class GametrailersIE(InfoExtractor):
+ _VALID_URL = r'http://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)'
+
_TEST = {
- 'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
- 'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7',
+ 'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review',
+ 'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a',
'info_dict': {
- 'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d',
+ 'id': '2983958',
'ext': 'mp4',
- 'title': 'E3 2013: Debut Trailer',
- 'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
+ 'display_id': '116437-Just-Cause-3-Review',
+ 'title': 'Just Cause 3 - Review',
+ 'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?',
},
}
- _FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ title = self._html_search_regex(
+ r'<title>(.+?)\|', webpage, 'title').strip()
+ embed_url = self._proto_relative_url(
+ self._search_regex(
+ r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage,
+ 'embed url'),
+ scheme='http:')
+ video_id = url_basename(embed_url)
+ embed_page = self._download_webpage(embed_url, video_id)
+ embed_vars_json = self._search_regex(
+ r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page,
+ 'embed vars')
+ info = self._parse_json(embed_vars_json, video_id)
+
+ formats = []
+ for media in info['media']:
+ if media['mediaPurpose'] == 'play':
+ formats.append({
+ 'url': media['uri'],
+ 'height': media['height'],
+ 'width:': media['width'],
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': info.get('thumbUri'),
+ 'description': self._og_search_description(webpage),
+ 'duration': int_or_none(info.get('videoLengthInSeconds')),
+ 'age_limit': parse_age_limit(info.get('audienceRating')),
+ }
diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py
index a6834db43..3befd3e7b 100644
--- a/youtube_dl/extractor/gdcvault.py
+++ b/youtube_dl/extractor/gdcvault.py
@@ -3,13 +3,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
remove_end,
HEADRequest,
+ sanitized_Request,
)
@@ -125,7 +123,7 @@ class GDCVaultIE(InfoExtractor):
'password': password,
}
- request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
+ request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index ec748ed9f..c2e8f9b62 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -4,12 +4,13 @@ from __future__ import unicode_literals
import os
import re
+import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
+ compat_etree_fromstring,
compat_urllib_parse_unquote,
- compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
@@ -20,7 +21,7 @@ from ..utils import (
HEADRequest,
is_html,
orderedSet,
- parse_xml,
+ sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
@@ -29,7 +30,10 @@ from ..utils import (
url_basename,
xpath_text,
)
-from .brightcove import BrightcoveIE
+from .brightcove import (
+ BrightcoveLegacyIE,
+ BrightcoveNewIE,
+)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
@@ -49,6 +53,8 @@ from .dailymotion import DailymotionCloudIE
from .onionstudios import OnionStudiosIE
from .snagfilms import SnagFilmsEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
+from .mtv import MTVServicesEmbeddedIE
+from .pladform import PladformIE
class GenericIE(InfoExtractor):
@@ -139,6 +145,7 @@ class GenericIE(InfoExtractor):
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
+ 'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
@@ -231,6 +238,22 @@ class GenericIE(InfoExtractor):
}
},
{
+ # redirect in Refresh HTTP header
+ 'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
+ 'info_dict': {
+ 'id': 'pO8h3EaFRdo',
+ 'ext': 'mp4',
+ 'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
+ 'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
+ 'upload_date': '20150917',
+ 'uploader_id': 'brtvofficial',
+ 'uploader': 'Boiler Room',
+ },
+ 'params': {
+ 'skip_download': False,
+ },
+ },
+ {
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
@@ -256,7 +279,7 @@ class GenericIE(InfoExtractor):
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
{
- 'add_ie': ['Brightcove'],
+ 'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
@@ -280,7 +303,7 @@ class GenericIE(InfoExtractor):
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
- 'add_ie': ['Brightcove'],
+ 'add_ie': ['BrightcoveLegacy'],
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
@@ -295,7 +318,7 @@ class GenericIE(InfoExtractor):
},
{
# https://github.com/rg3/youtube-dl/issues/3541
- 'add_ie': ['Brightcove'],
+ 'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
@@ -317,6 +340,7 @@ class GenericIE(InfoExtractor):
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
+ 'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
@@ -328,6 +352,7 @@ class GenericIE(InfoExtractor):
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
+ 'duration': 135.427,
},
'params': {
'skip_download': True,
@@ -801,6 +826,19 @@ class GenericIE(InfoExtractor):
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
+ # Kaltura embed protected with referrer
+ {
+ 'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero',
+ 'info_dict': {
+ 'id': '1_g4fbemnq',
+ 'ext': 'mp4',
+ 'title': 'Violetta - Achter De Schermen - Ruggero',
+ 'description': 'Achter de schermen met Ruggero',
+ 'timestamp': 1435133761,
+ 'upload_date': '20150624',
+ 'uploader_id': 'echojecka',
+ },
+ },
# Eagle.Platform embed (generic URL)
{
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
@@ -925,8 +963,9 @@ class GenericIE(InfoExtractor):
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
- 'description': 'VIDEO: Index/Match versus VLOOKUP.',
+ 'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
+ 'duration': 191.933,
},
'params': {
# m3u8 downloads
@@ -1012,6 +1051,31 @@ class GenericIE(InfoExtractor):
'ext': 'mp4',
'title': 'cinemasnob',
},
+ },
+ # BrightcoveInPageEmbed embed
+ {
+ 'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
+ 'info_dict': {
+ 'id': '4238694884001',
+ 'ext': 'flv',
+ 'title': 'Tabletop: Dread, Last Thoughts',
+ 'description': 'Tabletop: Dread, Last Thoughts',
+ 'duration': 51690,
+ },
+ },
+ # JWPlayer with M3U8
+ {
+ 'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video',
+ 'info_dict': {
+ 'id': 'playlist',
+ 'ext': 'mp4',
+ 'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ',
+ 'uploader': 'ren.tv',
+ },
+ 'params': {
+ # m3u8 downloads
+ 'skip_download': True,
+ }
}
]
@@ -1155,7 +1219,7 @@ class GenericIE(InfoExtractor):
full_response = None
if head_response is False:
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
@@ -1184,7 +1248,7 @@ class GenericIE(InfoExtractor):
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
@@ -1219,7 +1283,7 @@ class GenericIE(InfoExtractor):
# Is it an RSS feed, a SMIL file or a XSPF playlist?
try:
- doc = parse_xml(webpage)
+ doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
@@ -1271,14 +1335,14 @@ class GenericIE(InfoExtractor):
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
- # Look for BrightCove:
- bc_urls = BrightcoveIE._extract_brightcove_urls(webpage)
+ # Look for Brightcove Legacy Studio embeds
+ bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen('Brightcove video detected.')
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
- 'ie_key': 'Brightcove'
+ 'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
@@ -1288,6 +1352,11 @@ class GenericIE(InfoExtractor):
'entries': entries,
}
+ # Look for Brightcove New Studio embeds
+ bc_urls = BrightcoveNewIE._extract_urls(webpage)
+ if bc_urls:
+ return _playlist_from_matches(bc_urls, ie='BrightcoveNew')
+
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
@@ -1436,7 +1505,7 @@ class GenericIE(InfoExtractor):
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
- return OoyalaIE._build_url_result(mobj.group('ec'))
+ return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
@@ -1444,7 +1513,7 @@ class GenericIE(InfoExtractor):
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
- embeds, getter=lambda v: OoyalaIE._url_for_embed_code(v['provider_video_id']), ie='Ooyala')
+ embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
@@ -1594,12 +1663,9 @@ class GenericIE(InfoExtractor):
return self.url_result(url, ie='Vulture')
# Look for embedded mtvservices player
- mobj = re.search(
- r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"',
- webpage)
- if mobj is not None:
- url = unescapeHTML(mobj.group('url'))
- return self.url_result(url, ie='MTVServicesEmbedded')
+ mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
+ if mtvservices_url:
+ return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
@@ -1638,7 +1704,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
- r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
+ r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
@@ -1656,10 +1722,12 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
- mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) or
- re.search(r'(?s)(["\'])(?:https?:)?//cdnapisec\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?\1.*?entry_id\s*:\s*(["\'])(?P<id>[^\2]+?)\2', webpage))
+ mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_?[Ii]d'\s*:\s*'(?P<id>[^']+)',", webpage) or
+ re.search(r'(?s)(?P<q1>["\'])(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?(?P=q1).*?entry_?[Ii]d\s*:\s*(?P<q2>["\'])(?P<id>.+?)(?P=q2)', webpage))
if mobj is not None:
- return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura')
+ return self.url_result(smuggle_url(
+ 'kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(),
+ {'source_url': url}), 'Kaltura')
# Look for Eagle.Platform embeds
mobj = re.search(
@@ -1674,10 +1742,9 @@ class GenericIE(InfoExtractor):
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
- mobj = re.search(
- r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage)
- if mobj is not None:
- return self.url_result(mobj.group('url'), 'Pladform')
+ pladform_url = PladformIE._extract_url(webpage)
+ if pladform_url:
+ return self.url_result(pladform_url)
# Look for Playwire embeds
mobj = re.search(
@@ -1704,7 +1771,7 @@ class GenericIE(InfoExtractor):
# Look for UDN embeds
mobj = re.search(
- r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._VALID_URL, webpage)
+ r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
@@ -1808,6 +1875,9 @@ class GenericIE(InfoExtractor):
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
+ # In python 2 response HTTP headers are bytestrings
+ if sys.version_info < (3, 0) and isinstance(refresh_header, str):
+ refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
@@ -1821,6 +1891,7 @@ class GenericIE(InfoExtractor):
entries = []
for video_url in found:
+ video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
@@ -1832,25 +1903,24 @@ class GenericIE(InfoExtractor):
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
+ entry_info_dict = {
+ 'id': video_id,
+ 'uploader': video_uploader,
+ 'title': video_title,
+ 'age_limit': age_limit,
+ }
+
ext = determine_ext(video_url)
if ext == 'smil':
- entries.append({
- 'id': video_id,
- 'formats': self._extract_smil_formats(video_url, video_id),
- 'uploader': video_uploader,
- 'title': video_title,
- 'age_limit': age_limit,
- })
+ entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
+ elif ext == 'm3u8':
+ entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
else:
- entries.append({
- 'id': video_id,
- 'url': video_url,
- 'uploader': video_uploader,
- 'title': video_title,
- 'age_limit': age_limit,
- })
+ entry_info_dict['url'] = video_url
+
+ entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 33d6432a6..c65ef6bcf 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -14,79 +14,58 @@ from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
+ str_or_none,
)
class GloboIE(InfoExtractor):
- _VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
+ _VALID_URL = '(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})'
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s'
- _VIDEOID_REGEXES = [
- r'\bdata-video-id="(\d+)"',
- r'\bdata-player-videosids="(\d+)"',
- r'<div[^>]+\bid="(\d+)"',
- ]
-
_RESIGN_EXPIRATION = 86400
- _TESTS = [
- {
- 'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
- 'md5': '03ebf41cb7ade43581608b7d9b71fab0',
- 'info_dict': {
- 'id': '3654973',
- 'ext': 'mp4',
- 'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
- 'duration': 251.585,
- 'uploader': 'SporTV',
- 'uploader_id': 698,
- 'like_count': int,
- }
- },
- {
- 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
- 'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
- 'info_dict': {
- 'id': '3607726',
- 'ext': 'mp4',
- 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
- 'duration': 103.204,
- 'uploader': 'Globo.com',
- 'uploader_id': 265,
- 'like_count': int,
- }
- },
- {
- 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
- 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
- 'info_dict': {
- 'id': '3652183',
- 'ext': 'mp4',
- 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
- 'duration': 110.711,
- 'uploader': 'Rede Globo',
- 'uploader_id': 196,
- 'like_count': int,
- }
+ _TESTS = [{
+ 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
+ 'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
+ 'info_dict': {
+ 'id': '3607726',
+ 'ext': 'mp4',
+ 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
+ 'duration': 103.204,
+ 'uploader': 'Globo.com',
+ 'uploader_id': '265',
},
- {
- 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',
- 'md5': 'c1defca721ce25b2354e927d3e4b3dec',
- 'info_dict': {
- 'id': '3928201',
- 'ext': 'mp4',
- 'title': 'Ator e diretor argentino, Ricado Darín fala sobre utopias e suas perdas',
- 'duration': 1472.906,
- 'uploader': 'Canal Brasil',
- 'uploader_id': 705,
- 'like_count': int,
- }
+ }, {
+ 'url': 'http://globoplay.globo.com/v/4581987/',
+ 'md5': 'f36a1ecd6a50da1577eee6dd17f67eff',
+ 'info_dict': {
+ 'id': '4581987',
+ 'ext': 'mp4',
+ 'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP',
+ 'duration': 137.973,
+ 'uploader': 'Rede Globo',
+ 'uploader_id': '196',
},
- ]
-
- class MD5():
+ }, {
+ 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://globosatplay.globo.com/globonews/v/4472924/',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html',
+ 'only_matching': True,
+ }]
+
+ class MD5:
HEX_FORMAT_LOWERCASE = 0
HEX_FORMAT_UPPERCASE = 1
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
@@ -353,9 +332,6 @@ class GloboIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
-
video = self._download_json(
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
@@ -364,7 +340,7 @@ class GloboIE(InfoExtractor):
formats = []
for resource in video['resources']:
resource_id = resource.get('_id')
- if not resource_id:
+ if not resource_id or resource_id.endswith('manifest'):
continue
security = self._download_json(
@@ -393,20 +369,23 @@ class GloboIE(InfoExtractor):
resource_url = resource['url']
signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash')
if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'):
- formats.extend(self._extract_m3u8_formats(signed_url, resource_id, 'mp4'))
+ m3u8_formats = self._extract_m3u8_formats(
+ signed_url, resource_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
else:
formats.append({
'url': signed_url,
- 'format_id': resource_id,
- 'height': resource.get('height'),
+ 'format_id': 'http-%s' % resource_id,
+ 'height': int_or_none(resource.get('height')),
})
self._sort_formats(formats)
duration = float_or_none(video.get('duration'), 1000)
- like_count = int_or_none(video.get('likes'))
uploader = video.get('channel')
- uploader_id = video.get('channel_id')
+ uploader_id = str_or_none(video.get('channel_id'))
return {
'id': video_id,
@@ -414,6 +393,46 @@ class GloboIE(InfoExtractor):
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
- 'like_count': like_count,
'formats': formats
}
+
+
+class GloboArticleIE(InfoExtractor):
+ _VALID_URL = 'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/]+)\.html'
+
+ _VIDEOID_REGEXES = [
+ r'\bdata-video-id=["\'](\d{7,})',
+ r'\bdata-player-videosids=["\'](\d{7,})',
+ r'\bvideosIDs\s*:\s*["\'](\d{7,})',
+ r'\bdata-id=["\'](\d{7,})',
+ r'<div[^>]+\bid=["\'](\d{7,})',
+ ]
+
+ _TESTS = [{
+ 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
+ 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
+ 'info_dict': {
+ 'id': '3652183',
+ 'ext': 'mp4',
+ 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
+ 'duration': 110.711,
+ 'uploader': 'Rede Globo',
+ 'uploader_id': '196',
+ }
+ }, {
+ 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html',
+ 'only_matching': True,
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+ webpage = self._download_webpage(url, display_id)
+ video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
+ return self.url_result('globo:%s' % video_id, 'Globo')
diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py
index fcefe54cd..731bacd67 100644
--- a/youtube_dl/extractor/googleplus.py
+++ b/youtube_dl/extractor/googleplus.py
@@ -61,7 +61,7 @@ class GooglePlusIE(InfoExtractor):
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
- r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
+ r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)]
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/gputechconf.py b/youtube_dl/extractor/gputechconf.py
new file mode 100644
index 000000000..145b55bf3
--- /dev/null
+++ b/youtube_dl/extractor/gputechconf.py
@@ -0,0 +1,55 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ xpath_element,
+ xpath_text,
+ int_or_none,
+ parse_duration,
+)
+
+
+class GPUTechConfIE(InfoExtractor):
+ _VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html'
+ _TEST = {
+ 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html',
+ 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798',
+ 'info_dict': {
+ 'id': '5156',
+ 'ext': 'mp4',
+ 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis',
+ 'duration': 1219,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ root_path = self._search_regex(r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', 'http://evt.dispeak.com/nvidia/events/gtc15/')
+ xml_file_id = self._search_regex(r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id')
+
+ doc = self._download_xml('%sxml/%s.xml' % (root_path, xml_file_id), video_id)
+
+ metadata = xpath_element(doc, 'metadata')
+ http_host = xpath_text(metadata, 'httpHost', 'http host', True)
+ mbr_videos = xpath_element(metadata, 'MBRVideos')
+
+ formats = []
+ for mbr_video in mbr_videos.findall('MBRVideo'):
+ stream_name = xpath_text(mbr_video, 'streamName')
+ if stream_name:
+ formats.append({
+ 'url': 'http://%s/%s' % (http_host, stream_name.replace('mp4:', '')),
+ 'tbr': int_or_none(xpath_text(mbr_video, 'bitrate')),
+ })
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': xpath_text(metadata, 'title'),
+ 'duration': parse_duration(xpath_text(metadata, 'endTime')),
+ 'creator': xpath_text(metadata, 'speaker'),
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/groupon.py b/youtube_dl/extractor/groupon.py
index 8b9e0e2f8..63c05b6a6 100644
--- a/youtube_dl/extractor/groupon.py
+++ b/youtube_dl/extractor/groupon.py
@@ -18,6 +18,8 @@ class GrouponIE(InfoExtractor):
'id': 'tubGNycTo_9Uxg82uESj4i61EYX8nyuf',
'ext': 'mp4',
'title': 'Bikram Yoga Huntington Beach | Orange County',
+ 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
+ 'duration': 44.961,
},
}],
'params': {
diff --git a/youtube_dl/extractor/hearthisat.py b/youtube_dl/extractor/hearthisat.py
index a19b31ac0..7d8698655 100644
--- a/youtube_dl/extractor/hearthisat.py
+++ b/youtube_dl/extractor/hearthisat.py
@@ -4,12 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
+ sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
@@ -47,7 +45,7 @@ class HearThisAtIE(InfoExtractor):
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
- req = compat_urllib_request.Request(self._PLAYLIST_URL, payload)
+ req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
diff --git a/youtube_dl/extractor/hostingbulk.py b/youtube_dl/extractor/hostingbulk.py
deleted file mode 100644
index a3154cfde..000000000
--- a/youtube_dl/extractor/hostingbulk.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
-from ..utils import (
- ExtractorError,
- int_or_none,
- urlencode_postdata,
-)
-
-
-class HostingBulkIE(InfoExtractor):
- _VALID_URL = r'''(?x)
- https?://(?:www\.)?hostingbulk\.com/
- (?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
- _FILE_DELETED_REGEX = r'<b>File Not Found</b>'
- _TEST = {
- 'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
- 'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
- 'info_dict': {
- 'id': 'n0ulw1hv20fm',
- 'ext': 'mp4',
- 'title': 'md5:5afeba33f48ec87219c269e054afd622',
- 'filesize': 6816081,
- 'thumbnail': 're:^http://.*\.jpg$',
- }
- }
-
- def _real_extract(self, url):
- video_id = self._match_id(url)
- url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
-
- # Custom request with cookie to set language to English, so our file
- # deleted regex would work.
- request = compat_urllib_request.Request(
- url, headers={'Cookie': 'lang=english'})
- webpage = self._download_webpage(request, video_id)
-
- if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
- raise ExtractorError('Video %s does not exist' % video_id,
- expected=True)
-
- title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
- filesize = int_or_none(
- self._search_regex(
- r'<small>\((\d+)\sbytes?\)</small>',
- webpage,
- 'filesize',
- fatal=False
- )
- )
- thumbnail = self._search_regex(
- r'<img src="([^"]+)".+?class="pic"',
- webpage, 'thumbnail', fatal=False)
-
- fields = self._hidden_inputs(webpage)
-
- request = compat_urllib_request.Request(url, urlencode_postdata(fields))
- request.add_header('Content-type', 'application/x-www-form-urlencoded')
- response = self._request_webpage(request, video_id,
- 'Submiting download request')
- video_url = response.geturl()
-
- formats = [{
- 'format_id': 'sd',
- 'filesize': filesize,
- 'url': video_url,
- }]
-
- return {
- 'id': video_id,
- 'title': title,
- 'thumbnail': thumbnail,
- 'formats': formats,
- }
diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py
index 651784b73..31e219945 100644
--- a/youtube_dl/extractor/hotnewhiphop.py
+++ b/youtube_dl/extractor/hotnewhiphop.py
@@ -3,13 +3,11 @@ from __future__ import unicode_literals
import base64
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
HEADRequest,
+ sanitized_Request,
)
@@ -41,7 +39,7 @@ class HotNewHipHopIE(InfoExtractor):
('mediaType', 's'),
('mediaId', video_id),
])
- r = compat_urllib_request.Request(
+ r = sanitized_Request(
'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
r.add_header('Content-Type', 'application/x-www-form-urlencoded')
mkd = self._download_json(
diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py
index 16677f179..e8f51e545 100644
--- a/youtube_dl/extractor/howcast.py
+++ b/youtube_dl/extractor/howcast.py
@@ -16,6 +16,7 @@ class HowcastIE(InfoExtractor):
'description': 'md5:dbe792e5f6f1489027027bf2eba188a3',
'timestamp': 1276081287,
'upload_date': '20100609',
+ 'duration': 56.823,
},
'params': {
# m3u8 download
diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py
index aa0724a02..b3706fe6d 100644
--- a/youtube_dl/extractor/hypem.py
+++ b/youtube_dl/extractor/hypem.py
@@ -4,12 +4,10 @@ import json
import time
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -30,15 +28,12 @@ class HypemIE(InfoExtractor):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
- data_encoded = compat_urllib_parse.urlencode(data)
- complete_url = url + "?" + data_encoded
- request = compat_urllib_request.Request(complete_url)
+ request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
- cookie = urlh.headers.get('Set-Cookie', '')
html_tracks = self._html_search_regex(
- r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>',
+ r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
@@ -48,15 +43,14 @@ class HypemIE(InfoExtractor):
key = track['key']
track_id = track['id']
- artist = track['artist']
title = track['song']
- serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
- request = compat_urllib_request.Request(
- serve_url, '', {'Content-Type': 'application/json'})
- request.add_header('cookie', cookie)
+ request = sanitized_Request(
+ 'http://hypem.com/serve/source/%s/%s' % (track_id, key),
+ '', {'Content-Type': 'application/json'})
song_data = self._download_json(request, track_id, 'Downloading metadata')
- final_url = song_data["url"]
+ final_url = song_data['url']
+ artist = track.get('artist')
return {
'id': track_id,
diff --git a/youtube_dl/extractor/iconosquare.py b/youtube_dl/extractor/iconosquare.py
index 70e4c0d41..a39f422e9 100644
--- a/youtube_dl/extractor/iconosquare.py
+++ b/youtube_dl/extractor/iconosquare.py
@@ -1,7 +1,11 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ get_element_by_id,
+ remove_end,
+)
class IconosquareIE(InfoExtractor):
@@ -12,7 +16,7 @@ class IconosquareIE(InfoExtractor):
'info_dict': {
'id': '522207370455279102_24101272',
'ext': 'mp4',
- 'title': 'Instagram media by @aguynamedpatrick (Patrick Janelle)',
+ 'title': 'Instagram photo by @aguynamedpatrick (Patrick Janelle)',
'description': 'md5:644406a9ec27457ed7aa7a9ebcd4ce3d',
'timestamp': 1376471991,
'upload_date': '20130814',
@@ -29,8 +33,7 @@ class IconosquareIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
media = self._parse_json(
- self._search_regex(
- r'window\.media\s*=\s*({.+?});\n', webpage, 'media'),
+ get_element_by_id('mediaJson', webpage),
video_id)
formats = [{
@@ -41,9 +44,7 @@ class IconosquareIE(InfoExtractor):
} for format_id, f in media['videos'].items()]
self._sort_formats(formats)
- title = self._html_search_regex(
- r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
- webpage, 'title')
+ title = remove_end(self._og_search_title(webpage), ' - via Iconosquare')
timestamp = int_or_none(media.get('created_time') or media.get('caption', {}).get('created_time'))
description = media.get('caption', {}).get('text')
@@ -61,6 +62,14 @@ class IconosquareIE(InfoExtractor):
'height': int_or_none(t.get('height'))
} for thumbnail_id, t in media.get('images', {}).items()]
+ comments = [{
+ 'id': comment.get('id'),
+ 'text': comment['text'],
+ 'timestamp': int_or_none(comment.get('created_time')),
+ 'author': comment.get('from', {}).get('full_name'),
+ 'author_id': comment.get('from', {}).get('username'),
+ } for comment in media.get('comments', {}).get('data', []) if 'text' in comment]
+
return {
'id': video_id,
'title': title,
@@ -72,4 +81,5 @@ class IconosquareIE(InfoExtractor):
'comment_count': comment_count,
'like_count': like_count,
'formats': formats,
+ 'comments': comments,
}
diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py
index 4bb574cf3..02e1e428e 100644
--- a/youtube_dl/extractor/imdb.py
+++ b/youtube_dl/extractor/imdb.py
@@ -4,8 +4,8 @@ import re
import json
from .common import InfoExtractor
-from ..compat import (
- compat_urlparse,
+from ..utils import (
+ qualities,
)
@@ -30,24 +30,33 @@ class ImdbIE(InfoExtractor):
descr = self._html_search_regex(
r'(?s)<span itemprop="description">(.*?)</span>',
webpage, 'description', fatal=False)
- available_formats = re.findall(
- r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
- flags=re.MULTILINE)
+ player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id
+ player_page = self._download_webpage(
+ player_url, video_id, 'Downloading player page')
+ # the player page contains the info for the default format, we have to
+ # fetch other pages for the rest of the formats
+ extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page)
+ format_pages = [
+ self._download_webpage(
+ f_url, video_id, 'Downloading info for %s format' % f_name)
+ for f_url, f_name in extra_formats]
+ format_pages.append(player_page)
+
+ quality = qualities(['SD', '480p', '720p'])
formats = []
- for f_id, f_path in available_formats:
- f_path = f_path.strip()
- format_page = self._download_webpage(
- compat_urlparse.urljoin(url, f_path),
- 'Downloading info for %s format' % f_id)
+ for format_page in format_pages:
json_data = self._search_regex(
r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>',
format_page, 'json data', flags=re.DOTALL)
info = json.loads(json_data)
format_info = info['videoPlayerObject']['video']
+ f_id = format_info['ffname']
formats.append({
'format_id': f_id,
'url': format_info['videoInfoList'][0]['videoUrl'],
+ 'quality': quality(f_id),
})
+ self._sort_formats(formats)
return {
'id': video_id,
diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py
index 71cfd12c5..016af2084 100644
--- a/youtube_dl/extractor/infoq.py
+++ b/youtube_dl/extractor/infoq.py
@@ -1,3 +1,5 @@
+# coding: utf-8
+
from __future__ import unicode_literals
import base64
@@ -5,8 +7,9 @@ import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
- compat_urlparse,
+ compat_parse_qs,
)
+from ..utils import determine_ext
class InfoQIE(InfoExtractor):
@@ -16,7 +19,7 @@ class InfoQIE(InfoExtractor):
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
- 'id': '12-jan-pythonthings',
+ 'id': 'A-Few-of-My-Favorite-Python-Things',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
@@ -24,40 +27,84 @@ class InfoQIE(InfoExtractor):
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
+ }, {
+ 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',
+ 'md5': '4918d0cca1497f2244572caf626687ef',
+ 'info_dict': {
+ 'id': 'openstack-continued-delivery',
+ 'title': 'OpenStack持续交付之路',
+ 'ext': 'flv',
+ 'description': 'md5:308d981fb28fa42f49f9568322c683ff',
+ },
}]
- def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ def _extract_bokecc_videos(self, webpage, video_id):
+ # TODO: bokecc.com is a Chinese video cloud platform
+ # It should have an independent extractor but I don't have other
+ # examples using bokecc
+ player_params_str = self._html_search_regex(
+ r'<script[^>]+src="http://p\.bokecc\.com/player\?([^"]+)',
+ webpage, 'player params', default=None)
- video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
- video_description = self._html_search_meta('description', webpage, 'description')
+ player_params = compat_parse_qs(player_params_str)
+
+ info_xml = self._download_xml(
+ 'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % (
+ player_params['siteid'][0], player_params['vid'][0]), video_id)
+
+ return [{
+ 'format_id': 'bokecc',
+ 'url': quality.find('./copy').attrib['playurl'],
+ 'preference': int(quality.attrib['value']),
+ } for quality in info_xml.findall('./video/quality')]
+ def _extract_rtmp_videos(self, webpage):
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
- r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
+ r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
+
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
- video_filename = playpath.split('/')[-1]
- video_id, extension = video_filename.split('.')
-
- http_base = self._search_regex(
- r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
- 'HTTP base URL')
-
- formats = [{
+ return [{
'format_id': 'rtmp',
'url': video_url,
- 'ext': extension,
+ 'ext': determine_ext(playpath),
'play_path': playpath,
- }, {
+ }]
+
+ def _extract_http_videos(self, webpage):
+ http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL')
+
+ policy = self._search_regex(r'InfoQConstants.scp\s*=\s*\'([^\']+)\'', webpage, 'policy')
+ signature = self._search_regex(r'InfoQConstants.scs\s*=\s*\'([^\']+)\'', webpage, 'signature')
+ key_pair_id = self._search_regex(r'InfoQConstants.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id')
+
+ return [{
'format_id': 'http',
- 'url': compat_urlparse.urljoin(url, http_base) + real_id,
+ 'url': http_video_url,
+ 'http_headers': {
+ 'Cookie': 'CloudFront-Policy=%s; CloudFront-Signature=%s; CloudFront-Key-Pair-Id=%s' % (
+ policy, signature, key_pair_id),
+ },
}]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
+ video_description = self._html_search_meta('description', webpage, 'description')
+
+ if '/cn/' in url:
+ # for China videos, HTTP video URL exists but always fails with 403
+ formats = self._extract_bokecc_videos(webpage, video_id)
+ else:
+ formats = self._extract_rtmp_videos(webpage) + self._extract_http_videos(webpage)
+
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py
index 3d78f78c4..c158f2064 100644
--- a/youtube_dl/extractor/instagram.py
+++ b/youtube_dl/extractor/instagram.py
@@ -10,8 +10,8 @@ from ..utils import (
class InstagramIE(InfoExtractor):
- _VALID_URL = r'https://instagram\.com/p/(?P<id>[\da-zA-Z]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+)'
+ _TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
@@ -21,7 +21,10 @@ class InstagramIE(InfoExtractor):
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
- }
+ }, {
+ 'url': 'https://instagram.com/p/-Cmh1cukG2/',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py
index 821c8ec10..36baf3245 100644
--- a/youtube_dl/extractor/iprima.py
+++ b/youtube_dl/extractor/iprima.py
@@ -6,12 +6,10 @@ from random import random
from math import floor
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
ExtractorError,
remove_end,
+ sanitized_Request,
)
@@ -61,7 +59,7 @@ class IPrimaIE(InfoExtractor):
(floor(random() * 1073741824), floor(random() * 1073741824))
)
- req = compat_urllib_request.Request(player_url)
+ req = sanitized_Request(player_url)
req.add_header('Referer', url)
playerpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index 393e67e35..c3731a110 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -95,6 +95,10 @@ class IqiyiIE(InfoExtractor):
('10', 'h1'),
]
+ @staticmethod
+ def md5_text(text):
+ return hashlib.md5(text.encode('utf-8')).hexdigest()
+
def construct_video_urls(self, data, video_id, _uuid):
def do_xor(x, y):
a = y % 3
@@ -121,7 +125,7 @@ class IqiyiIE(InfoExtractor):
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
)['t']
t = str(int(math.floor(int(tm) / (600.0))))
- return hashlib.md5((t + mg + x).encode('utf8')).hexdigest()
+ return self.md5_text(t + mg + x)
video_urls_dict = {}
for format_item in data['vp']['tkl'][0]['vs']:
@@ -179,20 +183,19 @@ class IqiyiIE(InfoExtractor):
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
tm = str(int(time.time()))
+ tail = tm + tvid
param = {
'key': 'fvip',
- 'src': hashlib.md5(b'youtube-dl').hexdigest(),
+ 'src': self.md5_text('youtube-dl'),
'tvId': tvid,
'vid': video_id,
'vinfo': 1,
'tm': tm,
- 'enc': hashlib.md5(
- (enc_key + tm + tvid).encode('utf8')).hexdigest(),
+ 'enc': self.md5_text(enc_key + tail),
'qyid': _uuid,
'tn': random.random(),
'um': 0,
- 'authkey': hashlib.md5(
- (tm + tvid).encode('utf8')).hexdigest()
+ 'authkey': self.md5_text(self.md5_text('') + tail),
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
@@ -201,7 +204,9 @@ class IqiyiIE(InfoExtractor):
return raw_data
def get_enc_key(self, swf_url, video_id):
- enc_key = '3601ba290e4f4662848c710e2122007e' # last update at 2015-08-10 for Zombie
+ # TODO: automatic key extraction
+ # last update at 2015-12-18 for Zombie::bite
+ enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1]
return enc_key
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/ivi.py b/youtube_dl/extractor/ivi.py
index e82594444..029878d24 100644
--- a/youtube_dl/extractor/ivi.py
+++ b/youtube_dl/extractor/ivi.py
@@ -5,11 +5,9 @@ import re
import json
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -78,7 +76,7 @@ class IviIE(InfoExtractor):
]
}
- request = compat_urllib_request.Request(api_url, json.dumps(data))
+ request = sanitized_Request(api_url, json.dumps(data))
video_json_page = self._download_webpage(
request, video_id, 'Downloading video JSON')
diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py
index 1df084d87..eef7daa29 100644
--- a/youtube_dl/extractor/jeuxvideo.py
+++ b/youtube_dl/extractor/jeuxvideo.py
@@ -28,7 +28,7 @@ class JeuxVideoIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
- title = self._html_search_meta('name', webpage)
+ title = self._html_search_meta('name', webpage) or self._og_search_title(webpage)
config_url = self._html_search_regex(
r'data-src="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index 3dca0e566..583b1a5ad 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -2,12 +2,18 @@
from __future__ import unicode_literals
import re
+import base64
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import (
+ compat_urllib_parse,
+ compat_urlparse,
+)
from ..utils import (
+ clean_html,
ExtractorError,
int_or_none,
+ unsmuggle_url,
)
@@ -16,7 +22,7 @@ class KalturaIE(InfoExtractor):
(?:
kaltura:(?P<partner_id_s>\d+):(?P<id_s>[0-9a-z_]+)|
https?://
- (:?(?:www|cdnapisec)\.)?kaltura\.com/
+ (:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/
(?:
(?:
# flash player
@@ -121,31 +127,47 @@ class KalturaIE(InfoExtractor):
video_id, actions, note='Downloading video info JSON')
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
+
mobj = re.match(self._VALID_URL, url)
partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5')
entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5')
info, source_data = self._get_video_info(entry_id, partner_id)
- formats = [{
- 'format_id': '%(fileExt)s-%(bitrate)s' % f,
- 'ext': f['fileExt'],
- 'tbr': f['bitrate'],
- 'fps': f.get('frameRate'),
- 'filesize_approx': int_or_none(f.get('size'), invscale=1024),
- 'container': f.get('containerFormat'),
- 'vcodec': f.get('videoCodecId'),
- 'height': f.get('height'),
- 'width': f.get('width'),
- 'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']),
- } for f in source_data['flavorAssets']]
+ source_url = smuggled_data.get('source_url')
+ if source_url:
+ referrer = base64.b64encode(
+ '://'.join(compat_urlparse.urlparse(source_url)[:2])
+ .encode('utf-8')).decode('utf-8')
+ else:
+ referrer = None
+
+ formats = []
+ for f in source_data['flavorAssets']:
+ video_url = '%s/flavorId/%s' % (info['dataUrl'], f['id'])
+ if referrer:
+ video_url += '?referrer=%s' % referrer
+ formats.append({
+ 'format_id': '%(fileExt)s-%(bitrate)s' % f,
+ 'ext': f.get('fileExt'),
+ 'tbr': int_or_none(f['bitrate']),
+ 'fps': int_or_none(f.get('frameRate')),
+ 'filesize_approx': int_or_none(f.get('size'), invscale=1024),
+ 'container': f.get('containerFormat'),
+ 'vcodec': f.get('videoCodecId'),
+ 'height': int_or_none(f.get('height')),
+ 'width': int_or_none(f.get('width')),
+ 'url': video_url,
+ })
+ self._check_formats(formats, entry_id)
self._sort_formats(formats)
return {
'id': entry_id,
'title': info['name'],
'formats': formats,
- 'description': info.get('description'),
+ 'description': clean_html(info.get('description')),
'thumbnail': info.get('thumbnailUrl'),
'duration': info.get('duration'),
'timestamp': info.get('createdAt'),
diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py
index c0956ba09..94a03d277 100644
--- a/youtube_dl/extractor/keek.py
+++ b/youtube_dl/extractor/keek.py
@@ -1,46 +1,39 @@
+# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
+ _VALID_URL = r'https?://(?:www\.)?keek\.com/keek/(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
- 'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
- 'md5': '09c5c109067536c1cec8bac8c21fea05',
+ 'url': 'https://www.keek.com/keek/NODfbab',
+ 'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
- 'uploader': 'youtube-dl project',
- 'uploader_id': 'ytdl',
- 'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
+ 'title': 'md5:35d42050a3ece241d5ddd7fdcc6fd896',
+ 'uploader': 'ytdl',
+ 'uploader_id': 'eGT5bab',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
- video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
- thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
- raw_desc = self._html_search_meta('description', webpage)
- if raw_desc:
- uploader = self._html_search_regex(
- r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
- uploader_id = self._html_search_regex(
- r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
- else:
- uploader = None
- uploader_id = None
-
return {
'id': video_id,
- 'url': video_url,
+ 'url': self._og_search_video_url(webpage),
'ext': 'mp4',
- 'title': self._og_search_title(webpage),
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
+ 'title': self._og_search_description(webpage).strip(),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'uploader': self._search_regex(
+ r'data-username=(["\'])(?P<uploader>.+?)\1', webpage,
+ 'uploader', fatal=False, group='uploader'),
+ 'uploader_id': self._search_regex(
+ r'data-user-id=(["\'])(?P<uploader_id>.+?)\1', webpage,
+ 'uploader id', fatal=False, group='uploader_id'),
}
diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py
index 82eddec51..126ca13df 100644
--- a/youtube_dl/extractor/keezmovies.py
+++ b/youtube_dl/extractor/keezmovies.py
@@ -1,12 +1,11 @@
from __future__ import unicode_literals
-import os
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_urlparse,
- compat_urllib_request,
+from ..utils import (
+ sanitized_Request,
+ url_basename,
)
@@ -14,19 +13,20 @@ class KeezMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)'
_TEST = {
'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711',
- 'md5': '6e297b7e789329923fcf83abb67c9289',
+ 'md5': '1c1e75d22ffa53320f45eeb07bc4cdc0',
'info_dict': {
'id': '1214711',
'ext': 'mp4',
'title': 'Petite Asian Lady Mai Playing In Bathtub',
'age_limit': 18,
+ 'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
@@ -38,21 +38,29 @@ class KeezMoviesIE(InfoExtractor):
video_title = self._html_search_regex(
r'<h1 [^>]*>([^<]+)', webpage, 'title')
- video_url = self._html_search_regex(
- r'(?s)html5VideoPlayer = .*?src="([^"]+)"', webpage, 'video URL')
- path = compat_urllib_parse_urlparse(video_url).path
- extension = os.path.splitext(path)[1][1:]
- format = path.split('/')[4].split('_')[:2]
- format = "-".join(format)
+ flashvars = self._parse_json(self._search_regex(
+ r'var\s+flashvars\s*=\s*([^;]+);', webpage, 'flashvars'), video_id)
+
+ formats = []
+ for height in (180, 240, 480):
+ if flashvars.get('quality_%dp' % height):
+ video_url = flashvars['quality_%dp' % height]
+ a_format = {
+ 'url': video_url,
+ 'height': height,
+ 'format_id': '%dp' % height,
+ }
+ filename_parts = url_basename(video_url).split('_')
+ if len(filename_parts) >= 2 and re.match(r'\d+[Kk]', filename_parts[1]):
+ a_format['tbr'] = int(filename_parts[1][:-1])
+ formats.append(a_format)
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'title': video_title,
- 'url': video_url,
- 'ext': extension,
- 'format': format,
- 'format_id': format,
+ 'formats': formats,
'age_limit': age_limit,
+ 'thumbnail': flashvars.get('image_url')
}
diff --git a/youtube_dl/extractor/kuwo.py b/youtube_dl/extractor/kuwo.py
index fa233377d..0c8ed5d07 100644
--- a/youtube_dl/extractor/kuwo.py
+++ b/youtube_dl/extractor/kuwo.py
@@ -57,6 +57,7 @@ class KuwoIE(KuwoBaseIE):
'upload_date': '20080122',
'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c'
},
+ 'skip': 'this song has been offline because of copyright issues',
}, {
'url': 'http://www.kuwo.cn/yinyue/6446136/',
'info_dict': {
@@ -76,9 +77,11 @@ class KuwoIE(KuwoBaseIE):
webpage = self._download_webpage(
url, song_id, note='Download song detail info',
errnote='Unable to get song detail info')
+ if '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage:
+ raise ExtractorError('this song has been offline because of copyright issues', expected=True)
song_name = self._html_search_regex(
- r'<h1[^>]+title="([^"]+)">', webpage, 'song name')
+ r'(?s)class="(?:[^"\s]+\s+)*title(?:\s+[^"\s]+)*".*?<h1[^>]+title="([^"]+)"', webpage, 'song name')
singer_name = self._html_search_regex(
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
webpage, 'singer name', fatal=False)
diff --git a/youtube_dl/extractor/letv.py b/youtube_dl/extractor/letv.py
index a28abb0f0..be648000e 100644
--- a/youtube_dl/extractor/letv.py
+++ b/youtube_dl/extractor/letv.py
@@ -8,14 +8,15 @@ import time
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_request,
- compat_urlparse,
+ compat_ord,
)
from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
+ sanitized_Request,
int_or_none,
+ encode_data_uri,
)
@@ -25,15 +26,16 @@ class LetvIE(InfoExtractor):
_TESTS = [{
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
- 'md5': 'cab23bd68d5a8db9be31c9a222c1e8df',
+ 'md5': 'edadcfe5406976f42f9f266057ee5e40',
'info_dict': {
'id': '22005890',
'ext': 'mp4',
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
- 'timestamp': 1424747397,
- 'upload_date': '20150224',
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
- }
+ },
+ 'params': {
+ 'hls_prefer_native': True,
+ },
}, {
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
'info_dict': {
@@ -42,16 +44,22 @@ class LetvIE(InfoExtractor):
'title': '美人天下01',
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
},
+ 'params': {
+ 'hls_prefer_native': True,
+ },
}, {
'note': 'This video is available only in Mainland China, thus a proxy is needed',
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
- 'md5': 'f80936fbe20fb2f58648e81386ff7927',
+ 'md5': '2424c74948a62e5f31988438979c5ad1',
'info_dict': {
'id': '1118082',
'ext': 'mp4',
'title': '与龙共舞 完整版',
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
},
+ 'params': {
+ 'hls_prefer_native': True,
+ },
'skip': 'Only available in China',
}]
@@ -74,6 +82,27 @@ class LetvIE(InfoExtractor):
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
return _loc3_
+ # see M3U8Encryption class in KLetvPlayer.swf
+ @staticmethod
+ def decrypt_m3u8(encrypted_data):
+ if encrypted_data[:5].decode('utf-8').lower() != 'vc_01':
+ return encrypted_data
+ encrypted_data = encrypted_data[5:]
+
+ _loc4_ = bytearray()
+ while encrypted_data:
+ b = compat_ord(encrypted_data[0])
+ _loc4_.extend([b // 16, b & 0x0f])
+ encrypted_data = encrypted_data[1:]
+ idx = len(_loc4_) - 11
+ _loc4_ = _loc4_[idx:] + _loc4_[:idx]
+ _loc7_ = bytearray()
+ while _loc4_:
+ _loc7_.append(_loc4_[0] * 16 + _loc4_[1])
+ _loc4_ = _loc4_[2:]
+
+ return bytes(_loc7_)
+
def _real_extract(self, url):
media_id = self._match_id(url)
page = self._download_webpage(url, media_id)
@@ -85,7 +114,7 @@ class LetvIE(InfoExtractor):
'tkey': self.calc_time_key(int(time.time())),
'domain': 'www.letv.com'
}
- play_json_req = compat_urllib_request.Request(
+ play_json_req = sanitized_Request(
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
@@ -115,23 +144,28 @@ class LetvIE(InfoExtractor):
for format_id in formats:
if format_id in dispatch:
media_url = playurl['domain'][0] + dispatch[format_id][0]
-
- # Mimic what flvxz.com do
- url_parts = list(compat_urlparse.urlparse(media_url))
- qs = dict(compat_urlparse.parse_qs(url_parts[4]))
- qs.update({
- 'platid': '14',
- 'splatid': '1401',
- 'tss': 'no',
- 'retry': 1
+ media_url += '&' + compat_urllib_parse.urlencode({
+ 'm3v': 1,
+ 'format': 1,
+ 'expect': 3,
+ 'rateid': format_id,
})
- url_parts[4] = compat_urllib_parse.urlencode(qs)
- media_url = compat_urlparse.urlunparse(url_parts)
+
+ nodes_data = self._download_json(
+ media_url, media_id,
+ 'Download JSON metadata for format %s' % format_id)
+
+ req = self._request_webpage(
+ nodes_data['nodelist'][0]['location'], media_id,
+ note='Downloading m3u8 information for format %s' % format_id)
+
+ m3u8_data = self.decrypt_m3u8(req.read())
url_info_dict = {
- 'url': media_url,
+ 'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'),
'ext': determine_ext(dispatch[format_id][1]),
'format_id': format_id,
+ 'protocol': 'm3u8',
}
if format_id[-1:] == 'p':
diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py
new file mode 100644
index 000000000..fb03dd527
--- /dev/null
+++ b/youtube_dl/extractor/limelight.py
@@ -0,0 +1,229 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ float_or_none,
+ int_or_none,
+)
+
+
+class LimelightBaseIE(InfoExtractor):
+ _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s'
+ _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json'
+
+ def _call_playlist_service(self, item_id, method, fatal=True):
+ return self._download_json(
+ self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method),
+ item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal)
+
+ def _call_api(self, organization_id, item_id, method):
+ return self._download_json(
+ self._API_URL % (organization_id, self._API_PATH, item_id, method),
+ item_id, 'Downloading API %s JSON' % method)
+
+ def _extract(self, item_id, pc_method, mobile_method, meta_method):
+ pc = self._call_playlist_service(item_id, pc_method)
+ metadata = self._call_api(pc['orgId'], item_id, meta_method)
+ mobile = self._call_playlist_service(item_id, mobile_method, fatal=False)
+ return pc, mobile, metadata
+
+ def _extract_info(self, streams, mobile_urls, properties):
+ video_id = properties['media_id']
+ formats = []
+
+ for stream in streams:
+ stream_url = stream.get('url')
+ if not stream_url:
+ continue
+ if '.f4m' in stream_url:
+ formats.extend(self._extract_f4m_formats(stream_url, video_id))
+ else:
+ fmt = {
+ 'url': stream_url,
+ 'abr': float_or_none(stream.get('audioBitRate')),
+ 'vbr': float_or_none(stream.get('videoBitRate')),
+ 'fps': float_or_none(stream.get('videoFrameRate')),
+ 'width': int_or_none(stream.get('videoWidthInPixels')),
+ 'height': int_or_none(stream.get('videoHeightInPixels')),
+ 'ext': determine_ext(stream_url)
+ }
+ rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', stream_url)
+ if rtmp:
+ format_id = 'rtmp'
+ if stream.get('videoBitRate'):
+ format_id += '-%d' % int_or_none(stream['videoBitRate'])
+ fmt.update({
+ 'url': rtmp.group('url'),
+ 'play_path': rtmp.group('playpath'),
+ 'app': rtmp.group('app'),
+ 'ext': 'flv',
+ 'format_id': format_id,
+ })
+ formats.append(fmt)
+
+ for mobile_url in mobile_urls:
+ media_url = mobile_url.get('mobileUrl')
+ if not media_url:
+ continue
+ format_id = mobile_url.get('targetMediaPlatform')
+ if determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ preference=-1, m3u8_id=format_id))
+ else:
+ formats.append({
+ 'url': media_url,
+ 'format_id': format_id,
+ 'preference': -1,
+ })
+
+ self._sort_formats(formats)
+
+ title = properties['title']
+ description = properties.get('description')
+ timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date'))
+ duration = float_or_none(properties.get('duration_in_milliseconds'), 1000)
+ filesize = int_or_none(properties.get('total_storage_in_bytes'))
+ categories = [properties.get('category')]
+ tags = properties.get('tags', [])
+ thumbnails = [{
+ 'url': thumbnail['url'],
+ 'width': int_or_none(thumbnail.get('width')),
+ 'height': int_or_none(thumbnail.get('height')),
+ } for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')]
+
+ subtitles = {}
+ for caption in properties.get('captions', {}):
+ lang = caption.get('language_code')
+ subtitles_url = caption.get('url')
+ if lang and subtitles_url:
+ subtitles[lang] = [{
+ 'url': subtitles_url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'formats': formats,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'filesize': filesize,
+ 'categories': categories,
+ 'tags': tags,
+ 'thumbnails': thumbnails,
+ 'subtitles': subtitles,
+ }
+
+
+class LimelightMediaIE(LimelightBaseIE):
+ IE_NAME = 'limelight'
+ _VALID_URL = r'(?:limelight:media:|http://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})'
+ _TESTS = [{
+ 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
+ 'info_dict': {
+ 'id': '3ffd040b522b4485b6d84effc750cd86',
+ 'ext': 'flv',
+ 'title': 'HaP and the HB Prince Trailer',
+ 'description': 'md5:8005b944181778e313d95c1237ddb640',
+ 'thumbnail': 're:^https?://.*\.jpeg$',
+ 'duration': 144.23,
+ 'timestamp': 1244136834,
+ 'upload_date': '20090604',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # video with subtitles
+ 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335',
+ 'info_dict': {
+ 'id': 'a3e00274d4564ec4a9b29b9466432335',
+ 'ext': 'flv',
+ 'title': '3Play Media Overview Video',
+ 'description': '',
+ 'thumbnail': 're:^https?://.*\.jpeg$',
+ 'duration': 78.101,
+ 'timestamp': 1338929955,
+ 'upload_date': '20120605',
+ 'subtitles': 'mincount:9',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }]
+ _PLAYLIST_SERVICE_PATH = 'media'
+ _API_PATH = 'media'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ pc, mobile, metadata = self._extract(
+ video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties')
+
+ return self._extract_info(
+ pc['playlistItems'][0].get('streams', []),
+ mobile['mediaList'][0].get('mobileUrls', []) if mobile else [],
+ metadata)
+
+
+class LimelightChannelIE(LimelightBaseIE):
+ IE_NAME = 'limelight:channel'
+ _VALID_URL = r'(?:limelight:channel:|http://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})'
+ _TEST = {
+ 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
+ 'info_dict': {
+ 'id': 'ab6a524c379342f9b23642917020c082',
+ 'title': 'Javascript Sample Code',
+ },
+ 'playlist_mincount': 3,
+ }
+ _PLAYLIST_SERVICE_PATH = 'channel'
+ _API_PATH = 'channels'
+
+ def _real_extract(self, url):
+ channel_id = self._match_id(url)
+
+ pc, mobile, medias = self._extract(
+ channel_id, 'getPlaylistByChannelId',
+ 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media')
+
+ entries = [
+ self._extract_info(
+ pc['playlistItems'][i].get('streams', []),
+ mobile['mediaList'][i].get('mobileUrls', []) if mobile else [],
+ medias['media_list'][i])
+ for i in range(len(medias['media_list']))]
+
+ return self.playlist_result(entries, channel_id, pc['title'])
+
+
+class LimelightChannelListIE(LimelightBaseIE):
+ IE_NAME = 'limelight:channel_list'
+ _VALID_URL = r'(?:limelight:channel_list:|http://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})'
+ _TEST = {
+ 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
+ 'info_dict': {
+ 'id': '301b117890c4465c8179ede21fd92e2b',
+ 'title': 'Website - Hero Player',
+ },
+ 'playlist_mincount': 2,
+ }
+ _PLAYLIST_SERVICE_PATH = 'channel_list'
+
+ def _real_extract(self, url):
+ channel_list_id = self._match_id(url)
+
+ channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById')
+
+ entries = [
+ self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel')
+ for channel in channel_list['channelList']]
+
+ return self.playlist_result(entries, channel_list_id, channel_list['title'])
diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py
index 378117270..d4e1ae99d 100644
--- a/youtube_dl/extractor/lynda.py
+++ b/youtube_dl/extractor/lynda.py
@@ -7,12 +7,12 @@ from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
)
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
+ sanitized_Request,
)
@@ -25,7 +25,7 @@ class LyndaBaseIE(InfoExtractor):
self._login()
def _login(self):
- (username, password) = self._get_login_info()
+ username, password = self._get_login_info()
if username is None:
return
@@ -35,7 +35,7 @@ class LyndaBaseIE(InfoExtractor):
'remember': 'false',
'stayPut': 'false'
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
login_page = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -64,7 +64,7 @@ class LyndaBaseIE(InfoExtractor):
'remember': 'false',
'stayPut': 'false',
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
login_page = self._download_webpage(
request, None,
@@ -82,6 +82,15 @@ class LyndaBaseIE(InfoExtractor):
expected=True)
raise ExtractorError('Unable to log in')
+ def _logout(self):
+ username, _ = self._get_login_info()
+ if username is None:
+ return
+
+ self._download_webpage(
+ 'http://www.lynda.com/ajax/logout.aspx', None,
+ 'Logging out', 'Unable to log out', fatal=False)
+
class LyndaIE(LyndaBaseIE):
IE_NAME = 'lynda'
@@ -108,50 +117,47 @@ class LyndaIE(LyndaBaseIE):
def _real_extract(self, url):
video_id = self._match_id(url)
- page = self._download_webpage(
+ video = self._download_json(
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
video_id, 'Downloading video JSON')
- video_json = json.loads(page)
- if 'Status' in video_json:
+ if 'Status' in video:
raise ExtractorError(
- 'lynda returned error: %s' % video_json['Message'], expected=True)
+ 'lynda returned error: %s' % video['Message'], expected=True)
- if video_json['HasAccess'] is False:
+ if video.get('HasAccess') is False:
self.raise_login_required('Video %s is only available for members' % video_id)
- video_id = compat_str(video_json['ID'])
- duration = video_json['DurationInSeconds']
- title = video_json['Title']
+ video_id = compat_str(video.get('ID') or video_id)
+ duration = int_or_none(video.get('DurationInSeconds'))
+ title = video['Title']
formats = []
- fmts = video_json.get('Formats')
+ fmts = video.get('Formats')
if fmts:
- formats.extend([
- {
- 'url': fmt['Url'],
- 'ext': fmt['Extension'],
- 'width': fmt['Width'],
- 'height': fmt['Height'],
- 'filesize': fmt['FileSize'],
- 'format_id': str(fmt['Resolution'])
- } for fmt in fmts])
-
- prioritized_streams = video_json.get('PrioritizedStreams')
+ formats.extend([{
+ 'url': f['Url'],
+ 'ext': f.get('Extension'),
+ 'width': int_or_none(f.get('Width')),
+ 'height': int_or_none(f.get('Height')),
+ 'filesize': int_or_none(f.get('FileSize')),
+ 'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None,
+ } for f in fmts if f.get('Url')])
+
+ prioritized_streams = video.get('PrioritizedStreams')
if prioritized_streams:
- formats.extend([
- {
+ for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
+ formats.extend([{
'url': video_url,
'width': int_or_none(format_id),
- 'format_id': format_id,
- } for format_id, video_url in prioritized_streams['0'].items()
- ])
+ 'format_id': '%s-%s' % (prioritized_stream_id, format_id),
+ } for format_id, video_url in prioritized_stream.items()])
self._check_formats(formats, video_id)
self._sort_formats(formats)
- subtitles = self.extract_subtitles(video_id, page)
+ subtitles = self.extract_subtitles(video_id)
return {
'id': video_id,
@@ -182,7 +188,7 @@ class LyndaIE(LyndaBaseIE):
if srt:
return srt
- def _get_subtitles(self, video_id, webpage):
+ def _get_subtitles(self, video_id):
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
subs = self._download_json(url, None, False)
if subs:
@@ -204,12 +210,13 @@ class LyndaCourseIE(LyndaBaseIE):
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
- page = self._download_webpage(
+ course = self._download_json(
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
- course_json = json.loads(page)
- if 'Status' in course_json and course_json['Status'] == 'NotFound':
+ self._logout()
+
+ if course.get('Status') == 'NotFound':
raise ExtractorError(
'Course %s does not exist' % course_id, expected=True)
@@ -219,12 +226,13 @@ class LyndaCourseIE(LyndaBaseIE):
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
# by single video API anymore
- for chapter in course_json['Chapters']:
- for video in chapter['Videos']:
- if video['HasAccess'] is False:
+ for chapter in course['Chapters']:
+ for video in chapter.get('Videos', []):
+ if video.get('HasAccess') is False:
unaccessible_videos += 1
continue
- videos.append(video['ID'])
+ if video.get('ID'):
+ videos.append(video['ID'])
if unaccessible_videos > 0:
self._downloader.report_warning(
@@ -237,6 +245,6 @@ class LyndaCourseIE(LyndaBaseIE):
'Lynda')
for video_id in videos]
- course_title = course_json['Title']
+ course_title = course.get('Title')
return self.playlist_result(entries, course_id, course_title)
diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py
index fc7499958..88334889e 100644
--- a/youtube_dl/extractor/mdr.py
+++ b/youtube_dl/extractor/mdr.py
@@ -1,64 +1,169 @@
+# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ parse_duration,
+ parse_iso8601,
+ xpath_text,
+)
class MDRIE(InfoExtractor):
- _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)'
+ IE_DESC = 'MDR.DE and KiKA'
+ _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
- # No tests, MDR regularily deletes its videos
- _TEST = {
+ _TESTS = [{
+ # MDR regularily deletes its videos
'url': 'http://www.mdr.de/fakt/video189002.html',
'only_matching': True,
- }
+ }, {
+ # audio
+ 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html',
+ 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa',
+ 'info_dict': {
+ 'id': '1312272',
+ 'ext': 'mp3',
+ 'title': 'Feuilleton vom 30. Oktober 2015',
+ 'duration': 250,
+ 'uploader': 'MITTELDEUTSCHER RUNDFUNK',
+ },
+ }, {
+ 'url': 'http://www.kika.de/baumhaus/videos/video19636.html',
+ 'md5': '4930515e36b06c111213e80d1e4aad0e',
+ 'info_dict': {
+ 'id': '19636',
+ 'ext': 'mp4',
+ 'title': 'Baumhaus vom 30. Oktober 2015',
+ 'duration': 134,
+ 'uploader': 'KIKA',
+ },
+ }, {
+ 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html',
+ 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357',
+ 'info_dict': {
+ 'id': '8182',
+ 'ext': 'mp4',
+ 'title': 'Beutolomäus und der geheime Weihnachtswunsch',
+ 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd',
+ 'timestamp': 1419047100,
+ 'upload_date': '20141220',
+ 'duration': 4628,
+ 'uploader': 'KIKA',
+ },
+ }, {
+ 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
- m = re.match(self._VALID_URL, url)
- video_id = m.group('video_id')
- domain = m.group('domain')
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ data_url = self._search_regex(
+ r'dataURL\s*:\s*(["\'])(?P<url>/.+/(?:video|audio)[0-9]+-avCustom\.xml)\1',
+ webpage, 'data url', group='url')
- # determine title and media streams from webpage
- html = self._download_webpage(url, video_id)
+ doc = self._download_xml(
+ compat_urlparse.urljoin(url, data_url), video_id)
- title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title')
- xmlurl = self._search_regex(
- r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL')
+ title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True)
- doc = self._download_xml(domain + xmlurl, video_id)
formats = []
- for a in doc.findall('./assets/asset'):
- url_el = a.find('./progressiveDownloadUrl')
- if url_el is None:
- continue
- abr = int(a.find('bitrateAudio').text) // 1000
- media_type = a.find('mediaType').text
- format = {
- 'abr': abr,
- 'filesize': int(a.find('fileSize').text),
- 'url': url_el.text,
- }
-
- vbr_el = a.find('bitrateVideo')
- if vbr_el is None:
- format.update({
- 'vcodec': 'none',
- 'format_id': '%s-%d' % (media_type, abr),
- })
- else:
- vbr = int(vbr_el.text) // 1000
- format.update({
- 'vbr': vbr,
- 'width': int(a.find('frameWidth').text),
- 'height': int(a.find('frameHeight').text),
- 'format_id': '%s-%d' % (media_type, vbr),
- })
- formats.append(format)
+ processed_urls = []
+ for asset in doc.findall('./assets/asset'):
+ for source in (
+ 'progressiveDownload',
+ 'dynamicHttpStreamingRedirector',
+ 'adaptiveHttpStreamingRedirector'):
+ url_el = asset.find('./%sUrl' % source)
+ if url_el is None:
+ continue
+
+ video_url = url_el.text
+ if video_url in processed_urls:
+ continue
+
+ processed_urls.append(video_url)
+
+ vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
+ abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
+
+ ext = determine_ext(url_el.text)
+ if ext == 'm3u8':
+ url_formats = self._extract_m3u8_formats(
+ video_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ preference=0, m3u8_id='HLS', fatal=False)
+ elif ext == 'f4m':
+ url_formats = self._extract_f4m_formats(
+ video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id,
+ preference=0, f4m_id='HDS', fatal=False)
+ else:
+ media_type = xpath_text(asset, './mediaType', 'media type', default='MP4')
+ vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000)
+ abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000)
+ filesize = int_or_none(xpath_text(asset, './fileSize', 'file size'))
+
+ f = {
+ 'url': video_url,
+ 'format_id': '%s-%d' % (media_type, vbr or abr),
+ 'filesize': filesize,
+ 'abr': abr,
+ 'preference': 1,
+ }
+
+ if vbr:
+ width = int_or_none(xpath_text(asset, './frameWidth', 'width'))
+ height = int_or_none(xpath_text(asset, './frameHeight', 'height'))
+ f.update({
+ 'vbr': vbr,
+ 'width': width,
+ 'height': height,
+ })
+
+ url_formats = [f]
+
+ if not url_formats:
+ continue
+
+ if not vbr:
+ for f in url_formats:
+ abr = f.get('tbr') or abr
+ if 'tbr' in f:
+ del f['tbr']
+ f.update({
+ 'abr': abr,
+ 'vcodec': 'none',
+ })
+
+ formats.extend(url_formats)
+
self._sort_formats(formats)
+ description = xpath_text(doc, './broadcast/broadcastDescription', 'description')
+ timestamp = parse_iso8601(
+ xpath_text(
+ doc, [
+ './broadcast/broadcastDate',
+ './broadcast/broadcastStartDate',
+ './broadcast/broadcastEndDate'],
+ 'timestamp', default=None))
+ duration = parse_duration(xpath_text(doc, './duration', 'duration'))
+ uploader = xpath_text(doc, './rights', 'uploader')
+
return {
'id': video_id,
'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'uploader': uploader,
'formats': formats,
}
diff --git a/youtube_dl/extractor/megavideoz.py b/youtube_dl/extractor/megavideoz.py
deleted file mode 100644
index af7ff07ea..000000000
--- a/youtube_dl/extractor/megavideoz.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# encoding: utf-8
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- float_or_none,
- xpath_text,
-)
-
-
-class MegaVideozIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?megavideoz\.eu/video/(?P<id>[^/]+)(?:/(?P<display_id>[^/]+))?'
- _TEST = {
- 'url': 'http://megavideoz.eu/video/WM6UB919XMXH/SMPTE-Universal-Film-Leader',
- 'info_dict': {
- 'id': '48723',
- 'display_id': 'SMPTE-Universal-Film-Leader',
- 'ext': 'mp4',
- 'title': 'SMPTE Universal Film Leader',
- 'thumbnail': 're:https?://.*?\.jpg',
- 'duration': 10.93,
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- display_id = mobj.group('display_id') or video_id
-
- webpage = self._download_webpage(url, display_id)
-
- if any(p in webpage for p in ('>Video Not Found<', '>404 Error<')):
- raise ExtractorError('Video %s does not exist' % video_id, expected=True)
-
- config = self._download_xml(
- self._search_regex(
- r"var\s+cnf\s*=\s*'([^']+)'", webpage, 'cnf url'),
- display_id)
-
- video_url = xpath_text(config, './file', 'video url', fatal=True)
- title = xpath_text(config, './title', 'title', fatal=True)
- thumbnail = xpath_text(config, './image', 'thumbnail')
- duration = float_or_none(xpath_text(config, './duration', 'duration'))
- video_id = xpath_text(config, './mediaid', 'video id') or video_id
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'url': video_url,
- 'title': title,
- 'thumbnail': thumbnail,
- 'duration': duration
- }
diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py
index 6e2e73a51..67d6271e1 100644
--- a/youtube_dl/extractor/metacafe.py
+++ b/youtube_dl/extractor/metacafe.py
@@ -7,12 +7,12 @@ from ..compat import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
- compat_urllib_request,
)
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
+ sanitized_Request,
)
@@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor):
'filters': '0',
'submit': "Continue - I'm over 18",
}
- request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+ request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age')
@@ -142,7 +142,7 @@ class MetacafeIE(InfoExtractor):
return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
# Retrieve video webpage to extract further information
- req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
+ req = sanitized_Request('http://www.metacafe.com/watch/%s/' % video_id)
# AnyClip videos require the flashversion cookie so that we get the link
# to the mp4 file
@@ -154,10 +154,10 @@ class MetacafeIE(InfoExtractor):
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
video_url = None
- mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
+ mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse_unquote(mobj.group(1))
- video_ext = mediaURL[-3:]
+ video_ext = determine_ext(mediaURL)
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
@@ -229,7 +229,7 @@ class MetacafeIE(InfoExtractor):
age_limit = (
18
- if re.search(r'"contentRating":"restricted"', webpage)
+ if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage)
else 0)
if isinstance(video_url, list):
diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py
index 14934b7ec..e46b23a6f 100644
--- a/youtube_dl/extractor/minhateca.py
+++ b/youtube_dl/extractor/minhateca.py
@@ -2,14 +2,12 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
int_or_none,
parse_duration,
parse_filesize,
+ sanitized_Request,
)
@@ -39,7 +37,7 @@ class MinhatecaIE(InfoExtractor):
('fileId', video_id),
('__RequestVerificationToken', token),
]
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
'http://minhateca.com.br/action/License/Download',
data=compat_urllib_parse.urlencode(token_data))
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/miomio.py b/youtube_dl/extractor/miomio.py
index a784fc5fb..170ebd9eb 100644
--- a/youtube_dl/extractor/miomio.py
+++ b/youtube_dl/extractor/miomio.py
@@ -8,6 +8,7 @@ from ..utils import (
xpath_text,
int_or_none,
ExtractorError,
+ sanitized_Request,
)
@@ -51,6 +52,8 @@ class MioMioIE(InfoExtractor):
mioplayer_path = self._search_regex(
r'src="(/mioplayer/[^"]+)"', webpage, 'ref_path')
+ http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path}
+
xml_config = self._search_regex(
r'flashvars="type=(?:sina|video)&amp;(.+?)&amp;',
webpage, 'xml config')
@@ -60,14 +63,12 @@ class MioMioIE(InfoExtractor):
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
video_id)
- # the following xml contains the actual configuration information on the video file(s)
- vid_config = self._download_xml(
+ vid_config_request = sanitized_Request(
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
- video_id)
+ headers=http_headers)
- http_headers = {
- 'Referer': 'http://www.miomio.tv%s' % mioplayer_path,
- }
+ # the following xml contains the actual configuration information on the video file(s)
+ vid_config = self._download_xml(vid_config_request, video_id)
if not int_or_none(xpath_text(vid_config, 'timelength')):
raise ExtractorError('Unable to load videos!', expected=True)
diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py
index f088ab9e2..29ca45778 100644
--- a/youtube_dl/extractor/mit.py
+++ b/youtube_dl/extractor/mit.py
@@ -86,7 +86,7 @@ class MITIE(TechTVMITIE):
webpage = self._download_webpage(url, page_title)
embed_url = self._search_regex(
r'<iframe .*?src="(.+?)"', webpage, 'embed url')
- return self.url_result(embed_url, ie='TechTVMIT')
+ return self.url_result(embed_url)
class OCWMITIE(InfoExtractor):
diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py
index 852d72266..c595f2077 100644
--- a/youtube_dl/extractor/mitele.py
+++ b/youtube_dl/extractor/mitele.py
@@ -1,74 +1,89 @@
from __future__ import unicode_literals
-import json
-
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
+ encode_dict,
get_element_by_attribute,
- parse_duration,
- strip_jsonp,
+ int_or_none,
)
class MiTeleIE(InfoExtractor):
- IE_NAME = 'mitele.es'
+ IE_DESC = 'mitele.es'
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
+ 'md5': '0ff1a13aebb35d9bc14081ff633dd324',
'info_dict': {
- 'id': '0fce117d',
- 'ext': 'mp4',
- 'title': 'Programa 144 - Tor, la web invisible',
- 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+ 'id': '0NF1jJnxS1Wu3pHrmvFyw2',
'display_id': 'programa-144',
+ 'ext': 'flv',
+ 'title': 'Tor, la web invisible',
+ 'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
+ 'thumbnail': 're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
- 'params': {
- # m3u8 download
- 'skip_download': True,
- },
}]
def _real_extract(self, url):
- episode = self._match_id(url)
- webpage = self._download_webpage(url, episode)
- embed_data_json = self._search_regex(
- r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
- ).replace('\'', '"')
- embed_data = json.loads(embed_data_json)
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+
+ config_url = self._search_regex(
+ r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url')
+ config_url = compat_urlparse.urljoin(url, config_url)
- domain = embed_data['mediaUrl']
- if not domain.startswith('http'):
- # only happens in telecinco.es videos
- domain = 'http://' + domain
- info_url = compat_urlparse.urljoin(
- domain,
- compat_urllib_parse_unquote(embed_data['flashvars']['host'])
- )
- info_el = self._download_xml(info_url, episode).find('./video/info')
+ config = self._download_json(
+ config_url, display_id, 'Downloading config JSON')
- video_link = info_el.find('videoUrl/link').text
- token_query = compat_urllib_parse.urlencode({'id': video_link})
- token_info = self._download_json(
- embed_data['flashvars']['ov_tk'] + '?' + token_query,
- episode,
- transform_source=strip_jsonp
- )
- formats = self._extract_m3u8_formats(
- token_info['tokenizedUrl'], episode, ext='mp4')
+ mmc = self._download_json(
+ config['services']['mmc'], display_id, 'Downloading mmc JSON')
+
+ formats = []
+ for location in mmc['locations']:
+ gat = self._proto_relative_url(location.get('gat'), 'http:')
+ bas = location.get('bas')
+ loc = location.get('loc')
+ ogn = location.get('ogn')
+ if None in (gat, bas, loc, ogn):
+ continue
+ token_data = {
+ 'bas': bas,
+ 'icd': loc,
+ 'ogn': ogn,
+ 'sta': '0',
+ }
+ media = self._download_json(
+ '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
+ display_id, 'Downloading %s JSON' % location['loc'])
+ file_ = media.get('file')
+ if not file_:
+ continue
+ formats.extend(self._extract_f4m_formats(
+ file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
+ display_id, f4m_id=loc))
+
+ title = self._search_regex(
+ r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title')
+
+ video_id = self._search_regex(
+ r'data-media-id\s*=\s*"([^"]+)"', webpage,
+ 'data media id', default=None) or display_id
+ thumbnail = config.get('poster', {}).get('imageUrl')
+ duration = int_or_none(mmc.get('duration'))
return {
- 'id': embed_data['videoId'],
- 'display_id': episode,
- 'title': info_el.find('title').text,
- 'formats': formats,
+ 'id': video_id,
+ 'display_id': display_id,
+ 'title': title,
'description': get_element_by_attribute('class', 'text', webpage),
- 'thumbnail': info_el.find('thumb').text,
- 'duration': parse_duration(info_el.find('duration').text),
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py
index d47aeceda..c2b7ed9ab 100644
--- a/youtube_dl/extractor/mixcloud.py
+++ b/youtube_dl/extractor/mixcloud.py
@@ -64,7 +64,8 @@ class MixcloudIE(InfoExtractor):
preview_url = self._search_regex(
r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')
- song_url = preview_url.replace('/previews/', '/c/originals/')
+ song_url = re.sub(r'audiocdn(\d+)', r'stream\1', preview_url)
+ song_url = song_url.replace('/previews/', '/c/originals/')
if not self._check_url(song_url, track_id, 'mp3'):
song_url = song_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
if not self._check_url(song_url, track_id, 'm4a'):
diff --git a/youtube_dl/extractor/moevideo.py b/youtube_dl/extractor/moevideo.py
index 5a66302f6..d930b9634 100644
--- a/youtube_dl/extractor/moevideo.py
+++ b/youtube_dl/extractor/moevideo.py
@@ -5,13 +5,11 @@ import json
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
+ sanitized_Request,
)
@@ -80,7 +78,7 @@ class MoeVideoIE(InfoExtractor):
]
r_json = json.dumps(r)
post = compat_urllib_parse.urlencode({'r': r_json})
- req = compat_urllib_request.Request(self._API_URL, post)
+ req = sanitized_Request(self._API_URL, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
response = self._download_json(req, video_id)
diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py
index 9bf99a54a..f8226cbb2 100644
--- a/youtube_dl/extractor/mofosex.py
+++ b/youtube_dl/extractor/mofosex.py
@@ -7,8 +7,8 @@ from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
- compat_urllib_request,
)
+from ..utils import sanitized_Request
class MofosexIE(InfoExtractor):
@@ -29,7 +29,7 @@ class MofosexIE(InfoExtractor):
video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
index 69e4bcd1a..f6bf94f2f 100644
--- a/youtube_dl/extractor/moniker.py
+++ b/youtube_dl/extractor/moniker.py
@@ -5,19 +5,17 @@ import os.path
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
remove_start,
+ sanitized_Request,
)
class MonikerIE(InfoExtractor):
IE_DESC = 'allmyvideos.net and vidspot.net'
- _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?:(?:2|v)/v-)?(?P<id>[a-zA-Z0-9_-]+)'
_TESTS = [{
'url': 'http://allmyvideos.net/jih3nce3x6wn',
@@ -46,6 +44,18 @@ class MonikerIE(InfoExtractor):
}, {
'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
'only_matching': True,
+ }, {
+ 'url': 'http://vidspot.net/2/v-ywDf99',
+ 'md5': '5f8254ce12df30479428b0152fb8e7ba',
+ 'info_dict': {
+ 'id': 'ywDf99',
+ 'ext': 'mp4',
+ 'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)',
+ 'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.',
+ },
+ }, {
+ 'url': 'http://allmyvideos.net/v/v-HXZm5t',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -64,18 +74,30 @@ class MonikerIE(InfoExtractor):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
- fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
- data = dict(fields)
+ builtin_url = self._search_regex(
+ r'<iframe[^>]+src=(["\'])(?P<url>.+?/builtin-.+?)\1',
+ orig_webpage, 'builtin URL', default=None, group='url')
- post = compat_urllib_parse.urlencode(data)
- headers = {
- b'Content-Type': b'application/x-www-form-urlencoded',
- }
- req = compat_urllib_request.Request(url, post, headers)
- webpage = self._download_webpage(
- req, video_id, note='Downloading video page ...')
+ if builtin_url:
+ req = sanitized_Request(builtin_url)
+ req.add_header('Referer', url)
+ webpage = self._download_webpage(req, video_id, 'Downloading builtin page')
+ title = self._og_search_title(orig_webpage).strip()
+ description = self._og_search_description(orig_webpage).strip()
+ else:
+ fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
+ data = dict(fields)
+
+ post = compat_urllib_parse.urlencode(data)
+ headers = {
+ b'Content-Type': b'application/x-www-form-urlencoded',
+ }
+ req = sanitized_Request(url, post, headers)
+ webpage = self._download_webpage(
+ req, video_id, note='Downloading video page ...')
- title = os.path.splitext(data['fname'])[0]
+ title = os.path.splitext(data['fname'])[0]
+ description = None
# Could be several links with different quality
links = re.findall(r'"file" : "?(.+?)",', webpage)
@@ -89,5 +111,6 @@ class MonikerIE(InfoExtractor):
return {
'id': video_id,
'title': title,
+ 'description': description,
'formats': formats,
}
diff --git a/youtube_dl/extractor/mooshare.py b/youtube_dl/extractor/mooshare.py
index 7603af5e2..7cc7f054f 100644
--- a/youtube_dl/extractor/mooshare.py
+++ b/youtube_dl/extractor/mooshare.py
@@ -3,12 +3,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -59,7 +57,7 @@ class MooshareIE(InfoExtractor):
'hash': hash_key,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/movieclips.py b/youtube_dl/extractor/movieclips.py
index 04e17d055..1564cb71f 100644
--- a/youtube_dl/extractor/movieclips.py
+++ b/youtube_dl/extractor/movieclips.py
@@ -1,80 +1,40 @@
+# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from ..compat import (
- compat_str,
-)
-from ..utils import (
- ExtractorError,
- clean_html,
-)
+from ..utils import sanitized_Request
class MovieClipsIE(InfoExtractor):
- _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
+ _VALID_URL = r'https?://(?:www.)?movieclips\.com/videos/(?P<id>[^/?#]+)'
_TEST = {
- 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
+ 'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597?autoPlay=true&playlistId=5',
'info_dict': {
- 'id': 'Wy7ZU',
- 'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
+ 'id': 'pKIGmG83AqD9',
+ 'display_id': 'warcraft-trailer-1-561180739597',
'ext': 'mp4',
- 'title': 'My Week with Marilyn - Do You Love Me?',
- 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
+ 'title': 'Warcraft Trailer 1',
+ 'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.',
'thumbnail': 're:^https?://.*\.jpg$',
},
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
+ 'add_ie': ['ThePlatform'],
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- display_id = mobj.group('display_id')
- show_id = display_id or video_id
-
- config = self._download_xml(
- 'http://config.movieclips.com/player/config/%s' % video_id,
- show_id, 'Downloading player config')
-
- if config.find('./country-region').text == 'false':
- raise ExtractorError(
- '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
-
- properties = config.find('./video/properties')
- smil_file = properties.attrib['smil_file']
+ display_id = self._match_id(url)
- smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
- base_url = smil.find('./head/meta').attrib['base']
-
- formats = []
- for video in smil.findall('./body/switch/video'):
- vbr = int(video.attrib['system-bitrate']) / 1000
- src = video.attrib['src']
- formats.append({
- 'url': base_url,
- 'play_path': src,
- 'ext': src.split(':')[0],
- 'vbr': vbr,
- 'format_id': '%dk' % vbr,
- })
-
- self._sort_formats(formats)
-
- title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
- description = clean_html(compat_str(properties.attrib['clip_description']))
- thumbnail = properties.attrib['image']
- categories = properties.attrib['clip_categories'].split(',')
+ req = sanitized_Request(url)
+ # it doesn't work if it thinks the browser it's too old
+ req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)')
+ webpage = self._download_webpage(req, display_id)
+ theplatform_link = self._html_search_regex(r'src="(http://player.theplatform.com/p/.*?)"', webpage, 'theplatform link')
+ title = self._html_search_regex(r'<title[^>]*>([^>]+)-\s*\d+\s*|\s*Movieclips.com</title>', webpage, 'title')
+ description = self._html_search_meta('description', webpage)
return {
- 'id': video_id,
- 'display_id': display_id,
+ '_type': 'url_transparent',
+ 'url': theplatform_link,
'title': title,
+ 'display_id': display_id,
'description': description,
- 'thumbnail': thumbnail,
- 'categories': categories,
- 'formats': formats,
}
diff --git a/youtube_dl/extractor/movshare.py b/youtube_dl/extractor/movshare.py
deleted file mode 100644
index 6101063f2..000000000
--- a/youtube_dl/extractor/movshare.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class MovShareIE(NovaMovIE):
- IE_NAME = 'movshare'
- IE_DESC = 'MovShare'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'movshare\.(?:net|sx|ag)'}
-
- _HOST = 'www.movshare.net'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>'
- _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.movshare.net/video/559e28be54d96',
- 'md5': 'abd31a2132947262c50429e1d16c1bfd',
- 'info_dict': {
- 'id': '559e28be54d96',
- 'ext': 'flv',
- 'title': 'dissapeared image',
- 'description': 'optical illusion dissapeared image magic illusion',
- }
- }
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index a597714e9..d887583e6 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -5,7 +5,6 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_request,
compat_str,
)
from ..utils import (
@@ -13,6 +12,7 @@ from ..utils import (
find_xpath_attr,
fix_xml_ampersands,
HEADRequest,
+ sanitized_Request,
unescapeHTML,
url_basename,
RegexNotFoundError,
@@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
- req = compat_urllib_request.Request(webpage_url)
+ req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
@@ -200,7 +200,13 @@ class MTVServicesInfoExtractor(InfoExtractor):
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
- webpage, 'mgid')
+ webpage, 'mgid', default=None)
+
+ if not mgid:
+ sm4_embed = self._html_search_meta(
+ 'sm4:video:embed', webpage, 'sm4 embed', default='')
+ mgid = self._search_regex(
+ r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
videos_info = self._get_videos_info(mgid)
return videos_info
@@ -222,6 +228,13 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
},
}
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
+ if mobj:
+ return mobj.group('url')
+
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
site_id = uri.replace(video_id, '')
diff --git a/youtube_dl/extractor/musicvault.py b/youtube_dl/extractor/musicvault.py
deleted file mode 100644
index 0e46ac7c1..000000000
--- a/youtube_dl/extractor/musicvault.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-
-
-class MusicVaultIE(InfoExtractor):
- _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
- _TEST = {
- 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
- 'md5': '3adcbdb3dcc02d647539e53f284ba171',
- 'info_dict': {
- 'id': '1010863',
- 'ext': 'mp4',
- 'uploader_id': 'the-allman-brothers-band',
- 'title': 'Straight from the Heart',
- 'duration': 244,
- 'uploader': 'The Allman Brothers Band',
- 'thumbnail': 're:^https?://.*/thumbnail/.*',
- 'upload_date': '20131219',
- 'location': 'Capitol Theatre (Passaic, NJ)',
- 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
- 'timestamp': int,
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, display_id)
-
- thumbnail = self._search_regex(
- r'<meta itemprop="thumbnail" content="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
-
- data_div = self._search_regex(
- r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
- uploader = self._html_search_regex(
- r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
- title = self._html_search_regex(
- r'<h2.*?>(.*?)</h2>', data_div, 'title')
- location = self._html_search_regex(
- r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
-
- kaltura_id = self._search_regex(
- r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
- webpage, 'kaltura ID')
- wid = self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid')
-
- return {
- 'id': mobj.group('id'),
- '_type': 'url_transparent',
- 'url': 'kaltura:%s:%s' % (wid, kaltura_id),
- 'ie_key': 'Kaltura',
- 'display_id': display_id,
- 'uploader_id': mobj.group('uploader_id'),
- 'thumbnail': thumbnail,
- 'description': self._html_search_meta('description', webpage),
- 'location': location,
- 'title': title,
- 'uploader': uploader,
- }
diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py
index c96f472a3..36ab388b2 100644
--- a/youtube_dl/extractor/myvideo.py
+++ b/youtube_dl/extractor/myvideo.py
@@ -11,10 +11,10 @@ from ..compat import (
compat_ord,
compat_urllib_parse,
compat_urllib_parse_unquote,
- compat_urllib_request,
)
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -83,7 +83,7 @@ class MyVideoIE(InfoExtractor):
mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage)
if mobj is not None:
- request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
+ request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '')
response = self._download_webpage(request, video_id,
'Downloading video info')
info = json.loads(base64.b64decode(response).decode('utf-8'))
diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py
index 925967753..1f5fc2145 100644
--- a/youtube_dl/extractor/naver.py
+++ b/youtube_dl/extractor/naver.py
@@ -10,7 +10,6 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
- clean_html,
)
@@ -46,11 +45,11 @@ class NaverIE(InfoExtractor):
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
- m_error = re.search(
- r'(?s)<div class="(?:nation_error|nation_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
- webpage)
- if m_error:
- raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
+ error = self._html_search_regex(
+ r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
+ webpage, 'error', default=None)
+ if error:
+ raise ExtractorError(error, expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
diff --git a/youtube_dl/extractor/nba.py b/youtube_dl/extractor/nba.py
index 944096e1c..7c6b7841d 100644
--- a/youtube_dl/extractor/nba.py
+++ b/youtube_dl/extractor/nba.py
@@ -1,63 +1,102 @@
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
- remove_end,
parse_duration,
+ int_or_none,
+ xpath_text,
+ xpath_attr,
)
class NBAIE(InfoExtractor):
- _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
+ _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)?video/(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
- 'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
+ 'md5': '9e7729d3010a9c71506fd1248f74e4f4',
'info_dict': {
- 'id': '0021200253-okc-bkn-recap.nba',
- 'ext': 'mp4',
+ 'id': '0021200253-okc-bkn-recap',
+ 'ext': 'flv',
'title': 'Thunder vs. Nets',
'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
+ 'timestamp': 1354638466,
+ 'upload_date': '20121204',
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}, {
- 'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
+ 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
+ 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4',
'info_dict': {
- 'id': '0041400301-cle-atl-recap.nba',
+ 'id': '0041400301-cle-atl-recap',
'ext': 'mp4',
- 'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1',
+ 'title': 'Hawks vs. Cavaliers Game 1',
'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d',
'duration': 228,
- },
- 'params': {
- 'skip_download': True,
+ 'timestamp': 1432134543,
+ 'upload_date': '20150520',
}
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
-
- video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
+ path, video_id = re.match(self._VALID_URL, url).groups()
+ if path.startswith('nba/'):
+ path = path[3:]
+ video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id)
+ video_id = xpath_text(video_info, 'slug')
+ title = xpath_text(video_info, 'headline')
+ description = xpath_text(video_info, 'description')
+ duration = parse_duration(xpath_text(video_info, 'length'))
+ timestamp = int_or_none(xpath_attr(video_info, 'dateCreated', 'uts'))
- shortened_video_id = video_id.rpartition('/')[2]
- title = remove_end(
- self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
+ thumbnails = []
+ for image in video_info.find('images'):
+ thumbnails.append({
+ 'id': image.attrib.get('cut'),
+ 'url': image.text,
+ 'width': int_or_none(image.attrib.get('width')),
+ 'height': int_or_none(image.attrib.get('height')),
+ })
- description = self._og_search_description(webpage)
- duration_str = self._html_search_meta(
- 'duration', webpage, 'duration', default=None)
- if not duration_str:
- duration_str = self._html_search_regex(
- r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False)
- duration = parse_duration(duration_str)
+ formats = []
+ for video_file in video_info.findall('.//file'):
+ video_url = video_file.text
+ if video_url.startswith('/'):
+ continue
+ if video_url.endswith('.m3u8'):
+ m3u8_formats = self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ elif video_url.endswith('.f4m'):
+ f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.4.1.1', video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+ else:
+ key = video_file.attrib.get('bitrate')
+ format_info = {
+ 'format_id': key,
+ 'url': video_url,
+ }
+ mobj = re.search(r'(\d+)x(\d+)(?:_(\d+))?', key)
+ if mobj:
+ format_info.update({
+ 'width': int(mobj.group(1)),
+ 'height': int(mobj.group(2)),
+ 'tbr': int_or_none(mobj.group(3)),
+ })
+ formats.append(format_info)
+ self._sort_formats(formats)
return {
- 'id': shortened_video_id,
- 'url': video_url,
+ 'id': video_id,
'title': title,
'description': description,
'duration': duration,
+ 'timestamp': timestamp,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index e683d24c4..340c922bd 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -3,14 +3,12 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_str,
- compat_HTTPError,
-)
+from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
find_xpath_attr,
lowercase_escape,
+ smuggle_url,
unescapeHTML,
)
@@ -62,12 +60,13 @@ class NBCIE(InfoExtractor):
theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex(
[
r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"',
+ r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"',
r'"embedURL"\s*:\s*"([^"]+)"'
],
webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/')))
if theplatform_url.startswith('//'):
theplatform_url = 'http:' + theplatform_url
- return self.url_result(theplatform_url)
+ return self.url_result(smuggle_url(theplatform_url, {'source_url': url}))
class NBCSportsVPlayerIE(InfoExtractor):
@@ -187,7 +186,7 @@ class NBCNewsIE(InfoExtractor):
'title': info.find('headline').text,
'ext': 'flv',
'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
- 'description': compat_str(info.find('caption').text),
+ 'description': info.find('caption').text,
'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
}
else:
diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py
index 79a13958b..894c51399 100644
--- a/youtube_dl/extractor/ndr.py
+++ b/youtube_dl/extractor/ndr.py
@@ -1,130 +1,387 @@
-# encoding: utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
+ determine_ext,
int_or_none,
+ parse_iso8601,
qualities,
- parse_duration,
)
class NDRBaseIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ display_id = next(group for group in mobj.groups() if group)
+ webpage = self._download_webpage(url, display_id)
+ return self._extract_embed(webpage, display_id)
- page = self._download_webpage(url, video_id, 'Downloading page')
- title = self._og_search_title(page).strip()
- description = self._og_search_description(page)
- if description:
- description = description.strip()
+class NDRIE(NDRBaseIE):
+ IE_NAME = 'ndr'
+ IE_DESC = 'NDR.de - Norddeutscher Rundfunk'
+ _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html'
+ _TESTS = [{
+ # httpVideo, same content id
+ 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
+ 'md5': '6515bc255dc5c5f8c85bbc38e035a659',
+ 'info_dict': {
+ 'id': 'hafengeburtstag988',
+ 'display_id': 'Party-Poette-und-Parade',
+ 'ext': 'mp4',
+ 'title': 'Party, Pötte und Parade',
+ 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c',
+ 'uploader': 'ndrtv',
+ 'timestamp': 1431108900,
+ 'upload_date': '20150510',
+ 'duration': 3498,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideo, different content id
+ 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html',
+ 'md5': '1043ff203eab307f0c51702ec49e9a71',
+ 'info_dict': {
+ 'id': 'osna272',
+ 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch',
+ 'ext': 'mp4',
+ 'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights',
+ 'description': 'md5:32e9b800b3d2d4008103752682d5dc01',
+ 'uploader': 'ndrtv',
+ 'timestamp': 1442059200,
+ 'upload_date': '20150912',
+ 'duration': 510,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpAudio, same content id
+ 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html',
+ 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
+ 'info_dict': {
+ 'id': 'audio51535',
+ 'display_id': 'La-Valette-entgeht-der-Hinrichtung',
+ 'ext': 'mp3',
+ 'title': 'La Valette entgeht der Hinrichtung',
+ 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
+ 'uploader': 'ndrinfo',
+ 'timestamp': 1290626100,
+ 'upload_date': '20140729',
+ 'duration': 884,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html',
+ 'only_matching': True,
+ }]
+
+ def _extract_embed(self, webpage, display_id):
+ embed_url = self._html_search_meta(
+ 'embedURL', webpage, 'embed URL', fatal=True)
+ description = self._search_regex(
+ r'<p[^>]+itemprop="description">([^<]+)</p>',
+ webpage, 'description', default=None) or self._og_search_description(webpage)
+ timestamp = parse_iso8601(
+ self._search_regex(
+ r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"',
+ webpage, 'upload date', fatal=False))
+ return {
+ '_type': 'url_transparent',
+ 'url': embed_url,
+ 'display_id': display_id,
+ 'description': description,
+ 'timestamp': timestamp,
+ }
- duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', default=None))
- if not duration:
- duration = parse_duration(self._html_search_regex(
- r'(<span class="min">\d+</span>:<span class="sec">\d+</span>)',
- page, 'duration', default=None))
- formats = []
+class NJoyIE(NDRBaseIE):
+ IE_NAME = 'njoy'
+ IE_DESC = 'N-JOY'
+ _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html'
+ _TESTS = [{
+ # httpVideo, same content id
+ 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
+ 'md5': 'cb63be60cd6f9dd75218803146d8dc67',
+ 'info_dict': {
+ 'id': 'comedycontest2480',
+ 'display_id': 'Benaissa-beim-NDR-Comedy-Contest',
+ 'ext': 'mp4',
+ 'title': 'Benaissa beim NDR Comedy Contest',
+ 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39',
+ 'uploader': 'ndrtv',
+ 'upload_date': '20141129',
+ 'duration': 654,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideo, different content id
+ 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html',
+ 'md5': '417660fffa90e6df2fda19f1b40a64d8',
+ 'info_dict': {
+ 'id': 'dockville882',
+ 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-',
+ 'ext': 'mp4',
+ 'title': '"Ich hab noch nie" mit Felix Jaehn',
+ 'description': 'md5:85dd312d53be1b99e1f998a16452a2f3',
+ 'uploader': 'njoy',
+ 'upload_date': '20150822',
+ 'duration': 211,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html',
+ 'only_matching': True,
+ }]
+
+ def _extract_embed(self, webpage, display_id):
+ video_id = self._search_regex(
+ r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id')
+ description = self._search_regex(
+ r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>',
+ webpage, 'description', fatal=False)
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'NDREmbedBase',
+ 'url': 'ndr:%s' % video_id,
+ 'display_id': display_id,
+ 'description': description,
+ }
+
+
+class NDREmbedBaseIE(InfoExtractor):
+ IE_NAME = 'ndr:embed:base'
+ _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)'
+ _TESTS = [{
+ 'url': 'ndr:soundcheck3366',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id') or mobj.group('id_s')
+
+ ppjson = self._download_json(
+ 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id)
- mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
- if mp3_url:
- formats.append({
- 'url': mp3_url.group('audio'),
- 'format_id': 'mp3',
- })
+ playlist = ppjson['playlist']
- thumbnail = None
+ formats = []
+ quality_key = qualities(('xs', 's', 'm', 'l', 'xl'))
- video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
- if video_url:
- thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
- if thumbnails:
- quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
- largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
- thumbnail = 'http://www.ndr.de' + largest[0]
+ for format_id, f in playlist.items():
+ src = f.get('src')
+ if not src:
+ continue
+ ext = determine_ext(src, None)
+ if ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds'))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ src, video_id, m3u8_id='hls', entry_protocol='m3u8_native'))
+ else:
+ quality = f.get('quality')
+ ff = {
+ 'url': src,
+ 'format_id': quality or format_id,
+ 'quality': quality_key(quality),
+ }
+ type_ = f.get('type')
+ if type_ and type_.split('/')[0] == 'audio':
+ ff['vcodec'] = 'none'
+ ff['ext'] = ext or 'mp3'
+ formats.append(ff)
+ self._sort_formats(formats)
- for format_id in 'lo', 'hi', 'hq':
- formats.append({
- 'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
- 'format_id': format_id,
- })
+ config = playlist['config']
- if not formats:
- raise ExtractorError('No media links available for %s' % video_id)
+ live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive']
+ title = config['title']
+ if live:
+ title = self._live_title(title)
+ uploader = ppjson.get('config', {}).get('branding')
+ upload_date = ppjson.get('config', {}).get('publicationDate')
+ duration = int_or_none(config.get('duration'))
+
+ thumbnails = [{
+ 'id': thumbnail.get('quality') or thumbnail_id,
+ 'url': thumbnail['src'],
+ 'preference': quality_key(thumbnail.get('quality')),
+ } for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')]
return {
'id': video_id,
'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
+ 'is_live': live,
+ 'uploader': uploader if uploader != '-' else None,
+ 'upload_date': upload_date[0:8] if upload_date else None,
'duration': duration,
+ 'thumbnails': thumbnails,
'formats': formats,
}
-class NDRIE(NDRBaseIE):
- IE_NAME = 'ndr'
- IE_DESC = 'NDR.de - Mediathek'
- _VALID_URL = r'https?://www\.ndr\.de/.+?(?P<id>\d+)\.html'
-
- _TESTS = [
- {
- 'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
- 'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
- 'note': 'Video file',
- 'info_dict': {
- 'id': '25866',
- 'ext': 'mp4',
- 'title': 'Kartoffeltage in der Lewitz',
- 'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
- 'duration': 166,
- },
- 'skip': '404 Not found',
- },
- {
- 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
- 'md5': 'dadc003c55ae12a5d2f6bd436cd73f59',
- 'info_dict': {
- 'id': '988',
- 'ext': 'mp4',
- 'title': 'Party, Pötte und Parade',
- 'description': 'Hunderttausende feiern zwischen Speicherstadt und St. Pauli den 826. Hafengeburtstag. Die NDR Sondersendung zeigt die schönsten und spektakulärsten Bilder vom Auftakt.',
- 'duration': 3498,
- },
- },
- {
- 'url': 'http://www.ndr.de/info/audio51535.html',
- 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
- 'note': 'Audio file',
- 'info_dict': {
- 'id': '51535',
- 'ext': 'mp3',
- 'title': 'La Valette entgeht der Hinrichtung',
- 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
- 'duration': 884,
- }
- }
- ]
-
+class NDREmbedIE(NDREmbedBaseIE):
+ IE_NAME = 'ndr:embed'
+ _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
+ _TESTS = [{
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html',
+ 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9',
+ 'info_dict': {
+ 'id': 'ndraktuell28488',
+ 'ext': 'mp4',
+ 'title': 'Norddeutschland begrüßt Flüchtlinge',
+ 'is_live': False,
+ 'uploader': 'ndrtv',
+ 'upload_date': '20150907',
+ 'duration': 132,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html',
+ 'md5': '002085c44bae38802d94ae5802a36e78',
+ 'info_dict': {
+ 'id': 'soundcheck3366',
+ 'ext': 'mp4',
+ 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen',
+ 'is_live': False,
+ 'uploader': 'ndr2',
+ 'upload_date': '20150912',
+ 'duration': 3554,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/info/audio51535-player.html',
+ 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
+ 'info_dict': {
+ 'id': 'audio51535',
+ 'ext': 'mp3',
+ 'title': 'La Valette entgeht der Hinrichtung',
+ 'is_live': False,
+ 'uploader': 'ndrinfo',
+ 'upload_date': '20140729',
+ 'duration': 884,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html',
+ 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c',
+ 'info_dict': {
+ 'id': 'visite11010',
+ 'ext': 'mp4',
+ 'title': 'Visite - die ganze Sendung',
+ 'is_live': False,
+ 'uploader': 'ndrtv',
+ 'upload_date': '20150902',
+ 'duration': 3525,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpVideoLive
+ 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html',
+ 'info_dict': {
+ 'id': 'livestream217',
+ 'ext': 'flv',
+ 'title': 're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
+ 'is_live': True,
+ 'upload_date': '20150910',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.ndr.de/fernsehen/doku952-player.html',
+ 'only_matching': True,
+ }]
-class NJoyIE(NDRBaseIE):
- IE_NAME = 'N-JOY'
- _VALID_URL = r'https?://www\.n-joy\.de/.+?(?P<id>\d+)\.html'
- _TEST = {
- 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
- 'md5': 'cb63be60cd6f9dd75218803146d8dc67',
+class NJoyEmbedIE(NDREmbedBaseIE):
+ IE_NAME = 'njoy:embed'
+ _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
+ _TESTS = [{
+ # httpVideo
+ 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html',
+ 'md5': '8483cbfe2320bd4d28a349d62d88bd74',
'info_dict': {
- 'id': '2480',
+ 'id': 'doku948',
'ext': 'mp4',
- 'title': 'Benaissa beim NDR Comedy Contest',
- 'description': 'Von seinem sehr "behaarten" Leben lässt sich Benaissa trotz aller Schwierigkeiten nicht unterkriegen.',
- 'duration': 654,
- }
- }
+ 'title': 'Zehn Jahre Reeperbahn Festival - die Doku',
+ 'is_live': False,
+ 'upload_date': '20150807',
+ 'duration': 1011,
+ },
+ }, {
+ # httpAudio
+ 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html',
+ 'md5': 'd989f80f28ac954430f7b8a48197188a',
+ 'info_dict': {
+ 'id': 'stefanrichter100',
+ 'ext': 'mp3',
+ 'title': 'Interview mit einem Augenzeugen',
+ 'is_live': False,
+ 'uploader': 'njoy',
+ 'upload_date': '20150909',
+ 'duration': 140,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # httpAudioLive, no explicit ext
+ 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html',
+ 'info_dict': {
+ 'id': 'webradioweltweit100',
+ 'ext': 'mp3',
+ 'title': 're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
+ 'is_live': True,
+ 'uploader': 'njoy',
+ 'upload_date': '20150810',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html',
+ 'only_matching': True,
+ }]
diff --git a/youtube_dl/extractor/neteasemusic.py b/youtube_dl/extractor/neteasemusic.py
index a8e0a64ed..15eca825a 100644
--- a/youtube_dl/extractor/neteasemusic.py
+++ b/youtube_dl/extractor/neteasemusic.py
@@ -8,11 +8,11 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_request,
compat_urllib_parse,
compat_str,
compat_itertools_count,
)
+from ..utils import sanitized_Request
class NetEaseMusicBaseIE(InfoExtractor):
@@ -40,7 +40,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
if not details:
continue
formats.append({
- 'url': 'http://m1.music.126.net/%s/%s.%s' %
+ 'url': 'http://m5.music.126.net/%s/%s.%s' %
(cls._encrypt(details['dfsId']), details['dfsId'],
details['extension']),
'ext': details.get('extension'),
@@ -56,7 +56,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
return int(round(ms / 1000.0))
def query_api(self, endpoint, video_id, note):
- req = compat_urllib_request.Request('%s%s' % (self._API_BASE, endpoint))
+ req = sanitized_Request('%s%s' % (self._API_BASE, endpoint))
req.add_header('Referer', self._API_BASE)
return self._download_json(req, video_id, note)
diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py
index c10784f6b..d1688457f 100644
--- a/youtube_dl/extractor/nextmedia.py
+++ b/youtube_dl/extractor/nextmedia.py
@@ -126,7 +126,8 @@ class AppleDailyIE(NextMediaIE):
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd',
'upload_date': '20150128',
- }
+ },
+ 'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
# No thumbnail
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/',
@@ -140,10 +141,19 @@ class AppleDailyIE(NextMediaIE):
},
'expected_warnings': [
'video thumbnail',
- ]
+ ],
+ 'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/',
- 'only_matching': True,
+ 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d',
+ 'info_dict': {
+ 'id': '35770334',
+ 'ext': 'mp4',
+ 'title': '咖啡占卜測 XU裝熟指數',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748',
+ 'upload_date': '20140417',
+ },
}]
_URL_PATTERN = r'\{url: \'(.+)\'\}'
diff --git a/youtube_dl/extractor/nfb.py b/youtube_dl/extractor/nfb.py
index ea077254b..5bd15f7a7 100644
--- a/youtube_dl/extractor/nfb.py
+++ b/youtube_dl/extractor/nfb.py
@@ -1,10 +1,8 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
class NFBIE(InfoExtractor):
@@ -40,8 +38,9 @@ class NFBIE(InfoExtractor):
uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
page, 'director name', fatal=False)
- request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
- compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+ request = sanitized_Request(
+ 'https://www.nfb.ca/film/%s/player_config' % video_id,
+ compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
diff --git a/youtube_dl/extractor/nfl.py b/youtube_dl/extractor/nfl.py
index dc54634a5..200874d68 100644
--- a/youtube_dl/extractor/nfl.py
+++ b/youtube_dl/extractor/nfl.py
@@ -16,53 +16,118 @@ from ..utils import (
class NFLIE(InfoExtractor):
IE_NAME = 'nfl.com'
- _VALID_URL = r'''(?x)https?://
- (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
- (?:.+?/)*
- (?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
- _TESTS = [
- {
- 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
- 'md5': '394ef771ddcd1354f665b471d78ec4c6',
- 'info_dict': {
- 'id': '0ap3000000398478',
- 'ext': 'mp4',
- 'title': 'Week 3: Redskins vs. Eagles highlights',
- 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
- 'upload_date': '20140921',
- 'timestamp': 1411337580,
- 'thumbnail': 're:^https?://.*\.jpg$',
- }
+ _VALID_URL = r'''(?x)
+ https?://
+ (?P<host>
+ (?:www\.)?
+ (?:
+ (?:
+ nfl|
+ buffalobills|
+ miamidolphins|
+ patriots|
+ newyorkjets|
+ baltimoreravens|
+ bengals|
+ clevelandbrowns|
+ steelers|
+ houstontexans|
+ colts|
+ jaguars|
+ titansonline|
+ denverbroncos|
+ kcchiefs|
+ raiders|
+ chargers|
+ dallascowboys|
+ giants|
+ philadelphiaeagles|
+ redskins|
+ chicagobears|
+ detroitlions|
+ packers|
+ vikings|
+ atlantafalcons|
+ panthers|
+ neworleanssaints|
+ buccaneers|
+ azcardinals|
+ stlouisrams|
+ 49ers|
+ seahawks
+ )\.com|
+ .+?\.clubs\.nfl\.com
+ )
+ )/
+ (?:.+?/)*
+ (?P<id>[^/#?&]+)
+ '''
+ _TESTS = [{
+ 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
+ 'md5': '394ef771ddcd1354f665b471d78ec4c6',
+ 'info_dict': {
+ 'id': '0ap3000000398478',
+ 'ext': 'mp4',
+ 'title': 'Week 3: Redskins vs. Eagles highlights',
+ 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
+ 'upload_date': '20140921',
+ 'timestamp': 1411337580,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
+ 'info_dict': {
+ 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
+ 'ext': 'mp4',
+ 'title': 'LIVE: Post Game vs. Browns',
+ 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
+ 'upload_date': '20131229',
+ 'timestamp': 1388354455,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
+ 'info_dict': {
+ 'id': '0ap3000000467607',
+ 'ext': 'mp4',
+ 'title': 'Frustrations flare on the field',
+ 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
+ 'timestamp': 1422850320,
+ 'upload_date': '20150202',
},
- {
- 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
- 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
- 'info_dict': {
- 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
- 'ext': 'mp4',
- 'title': 'LIVE: Post Game vs. Browns',
- 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
- 'upload_date': '20131229',
- 'timestamp': 1388354455,
- 'thumbnail': 're:^https?://.*\.jpg$',
- }
+ }, {
+ 'url': 'http://www.patriots.com/video/2015/09/18/10-days-gillette',
+ 'md5': '4c319e2f625ffd0b481b4382c6fc124c',
+ 'info_dict': {
+ 'id': 'n-238346',
+ 'ext': 'mp4',
+ 'title': '10 Days at Gillette',
+ 'description': 'md5:8cd9cd48fac16de596eadc0b24add951',
+ 'timestamp': 1442618809,
+ 'upload_date': '20150918',
},
- {
- 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish',
- 'info_dict': {
- 'id': '0ap3000000467607',
- 'ext': 'mp4',
- 'title': 'Frustrations flare on the field',
- 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
- 'timestamp': 1422850320,
- 'upload_date': '20150202',
- },
+ }, {
+ # lowercase data-contentid
+ 'url': 'http://www.steelers.com/news/article-1/Tomlin-on-Ben-getting-Vick-ready/56399c96-4160-48cf-a7ad-1d17d4a3aef7',
+ 'info_dict': {
+ 'id': '12693586-6ea9-4743-9c1c-02c59e4a5ef2',
+ 'ext': 'mp4',
+ 'title': 'Tomlin looks ahead to Ravens on a short week',
+ 'description': 'md5:32f3f7b139f43913181d5cbb24ecad75',
+ 'timestamp': 1443459651,
+ 'upload_date': '20150928',
},
- {
- 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
- 'only_matching': True,
- }
- ]
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.buffalobills.com/video/videos/Rex_Ryan_Show_World_Wide_Rex/b1dcfab2-3190-4bb1-bfc0-d6e603d6601a',
+ 'only_matching': True,
+ }]
@staticmethod
def prepend_host(host, url):
@@ -95,13 +160,14 @@ class NFLIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
- r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL',
- default='static/content/static/config/video/config.json'))
+ r'(?:(?:config|configURL)\s*:\s*|<nflcs:avplayer[^>]+data-config\s*=\s*)(["\'])(?P<config>.+?)\1',
+ webpage, 'config URL', default='static/content/static/config/video/config.json',
+ group='config'))
# For articles, the id in the url is not the video id
video_id = self._search_regex(
- r'contentId\s*:\s*"([^"]+)"', webpage, 'video id', default=video_id)
- config = self._download_json(config_url, video_id,
- note='Downloading player config')
+ r'(?:<nflcs:avplayer[^>]+data-content[Ii]d\s*=\s*|content[Ii]d\s*:\s*)(["\'])(?P<id>.+?)\1',
+ webpage, 'video id', default=video_id, group='id')
+ config = self._download_json(config_url, video_id, 'Downloading player config')
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py
index 279b18386..e98a5ef89 100644
--- a/youtube_dl/extractor/nhl.py
+++ b/youtube_dl/extractor/nhl.py
@@ -72,7 +72,7 @@ class NHLBaseInfoExtractor(InfoExtractor):
class NHLIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com'
- _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console)?(?:\?(?:.*?[?&])?)(?:id|hlg)=(?P<id>[-0-9a-zA-Z,]+)'
+ _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/(?:console|embed)?(?:\?(?:.*?[?&])?)(?:id|hlg|playlist)=(?P<id>[-0-9a-zA-Z,]+)'
_TESTS = [{
'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
@@ -136,6 +136,9 @@ class NHLIE(NHLBaseInfoExtractor):
'params': {
'skip_download': True, # Requires rtmpdump
}
+ }, {
+ 'url': 'http://video.nhl.com/videocenter/embed?playlist=836127',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -146,9 +149,9 @@ class NHLIE(NHLBaseInfoExtractor):
class NHLNewsIE(NHLBaseInfoExtractor):
IE_NAME = 'nhl.com:news'
IE_DESC = 'NHL news'
- _VALID_URL = r'https?://(?:www\.)?nhl\.com/ice/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
+ _VALID_URL = r'https?://(?:.+?\.)?nhl\.com/(?:ice|club)/news\.html?(?:\?(?:.*?[?&])?)id=(?P<id>[-0-9a-zA-Z]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.nhl.com/ice/news.htm?id=750727',
'md5': '4b3d1262e177687a3009937bd9ec0be8',
'info_dict': {
@@ -159,13 +162,26 @@ class NHLNewsIE(NHLBaseInfoExtractor):
'duration': 37,
'upload_date': '20150128',
},
- }
+ }, {
+ # iframe embed
+ 'url': 'http://sabres.nhl.com/club/news.htm?id=780189',
+ 'md5': '9f663d1c006c90ac9fb82777d4294e12',
+ 'info_dict': {
+ 'id': '836127',
+ 'ext': 'mp4',
+ 'title': 'Morning Skate: OTT vs. BUF (9/23/15)',
+ 'description': "Brian Duff chats with Tyler Ennis prior to Buffalo's first preseason home game.",
+ 'duration': 93,
+ 'upload_date': '20150923',
+ },
+ }]
def _real_extract(self, url):
news_id = self._match_id(url)
webpage = self._download_webpage(url, news_id)
video_id = self._search_regex(
- [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'"],
+ [r'pVid(\d+)', r"nlid\s*:\s*'(\d+)'",
+ r'<iframe[^>]+src=["\']https?://video.*?\.nhl\.com/videocenter/embed\?.*\bplaylist=(\d+)'],
webpage, 'video id')
return self._real_extract_video(video_id)
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index bda1cff05..586e52a4a 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -8,7 +8,6 @@ import datetime
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
@@ -17,6 +16,7 @@ from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
+ sanitized_Request,
xpath_text,
determine_ext,
)
@@ -102,7 +102,7 @@ class NiconicoIE(InfoExtractor):
'password': password,
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
request, None, note='Logging in', errnote='Unable to log in')
@@ -145,7 +145,7 @@ class NiconicoIE(InfoExtractor):
'k': thumb_play_key,
'v': video_id
})
- flv_info_request = compat_urllib_request.Request(
+ flv_info_request = sanitized_Request(
'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
{'Content-Type': 'application/x-www-form-urlencoded'})
flv_info_webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py
index 7f842b5c2..a06d38afd 100644
--- a/youtube_dl/extractor/ninegag.py
+++ b/youtube_dl/extractor/ninegag.py
@@ -1,7 +1,6 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
from ..utils import str_to_int
@@ -9,61 +8,93 @@ from ..utils import str_to_int
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
- _VALID_URL = r'''(?x)^https?://(?:www\.)?9gag\.tv/
- (?:
- v/(?P<numid>[0-9]+)|
- p/(?P<id>[a-zA-Z0-9]+)/(?P<display_id>[^?#/]+)
- )
- '''
+ _VALID_URL = r'https?://(?:www\.)?9gag(?:\.com/tv|\.tv)/(?:p|embed)/(?P<id>[a-zA-Z0-9]+)(?:/(?P<display_id>[^?#/]+))?'
_TESTS = [{
- "url": "http://9gag.tv/v/1912",
- "info_dict": {
- "id": "1912",
- "ext": "mp4",
- "description": "This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)",
- "title": "\"People Are Awesome 2013\" Is Absolutely Awesome",
+ 'url': 'http://9gag.com/tv/p/Kk2X5/people-are-awesome-2013-is-absolutely-awesome',
+ 'info_dict': {
+ 'id': 'Kk2X5',
+ 'ext': 'mp4',
+ 'description': 'This 3-minute video will make you smile and then make you feel untalented and insignificant. Anyway, you should share this awesomeness. (Thanks, Dino!)',
+ 'title': '\"People Are Awesome 2013\" Is Absolutely Awesome',
'uploader_id': 'UCdEH6EjDKwtTe-sO2f0_1XA',
'uploader': 'CompilationChannel',
'upload_date': '20131110',
- "view_count": int,
- "thumbnail": "re:^https?://",
+ 'view_count': int,
},
- 'add_ie': ['Youtube']
+ 'add_ie': ['Youtube'],
}, {
- 'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',
+ 'url': 'http://9gag.com/tv/p/aKolP3',
'info_dict': {
- 'id': 'KklwM',
+ 'id': 'aKolP3',
'ext': 'mp4',
- 'display_id': 'alternate-banned-opening-scene-of-gravity',
- "description": "While Gravity was a pretty awesome movie already, YouTuber Krishna Shenoi came up with a way to improve upon it, introducing a much better solution to Sandra Bullock's seemingly endless tumble in space. The ending is priceless.",
- 'title': "Banned Opening Scene Of \"Gravity\" That Changes The Whole Movie",
- 'uploader': 'Krishna Shenoi',
- 'upload_date': '20140401',
- 'uploader_id': 'krishnashenoi93',
+ 'title': 'This Guy Travelled 11 countries In 44 days Just To Make This Amazing Video',
+ 'description': "I just saw more in 1 minute than I've seen in 1 year. This guy's video is epic!!",
+ 'uploader_id': 'rickmereki',
+ 'uploader': 'Rick Mereki',
+ 'upload_date': '20110803',
+ 'view_count': int,
},
+ 'add_ie': ['Vimeo'],
+ }, {
+ 'url': 'http://9gag.com/tv/p/KklwM',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://9gag.tv/p/Kk2X5',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://9gag.com/tv/embed/a5Dmvl',
+ 'only_matching': True,
}]
+ _EXTERNAL_VIDEO_PROVIDER = {
+ '1': {
+ 'url': '%s',
+ 'ie_key': 'Youtube',
+ },
+ '2': {
+ 'url': 'http://player.vimeo.com/video/%s',
+ 'ie_key': 'Vimeo',
+ },
+ '3': {
+ 'url': 'http://instagram.com/p/%s',
+ 'ie_key': 'Instagram',
+ },
+ '4': {
+ 'url': 'http://vine.co/v/%s',
+ 'ie_key': 'Vine',
+ },
+ }
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('numid') or mobj.group('id')
+ video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(url, display_id)
- post_view = json.loads(self._html_search_regex(
- r'var postView = new app\.PostView\({\s*post:\s*({.+?}),\s*posts:\s*prefetchedCurrentPost', webpage, 'post view'))
+ post_view = self._parse_json(
+ self._search_regex(
+ r'var\s+postView\s*=\s*new\s+app\.PostView\({\s*post:\s*({.+?})\s*,\s*posts:\s*prefetchedCurrentPost',
+ webpage, 'post view'),
+ display_id)
- youtube_id = post_view['videoExternalId']
+ ie_key = None
+ source_url = post_view.get('sourceUrl')
+ if not source_url:
+ external_video_id = post_view['videoExternalId']
+ external_video_provider = post_view['videoExternalProvider']
+ source_url = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['url'] % external_video_id
+ ie_key = self._EXTERNAL_VIDEO_PROVIDER[external_video_provider]['ie_key']
title = post_view['title']
- description = post_view['description']
- view_count = str_to_int(post_view['externalView'])
+ description = post_view.get('description')
+ view_count = str_to_int(post_view.get('externalView'))
thumbnail = post_view.get('thumbnail_700w') or post_view.get('ogImageUrl') or post_view.get('thumbnail_300w')
return {
'_type': 'url_transparent',
- 'url': youtube_id,
- 'ie_key': 'Youtube',
+ 'url': source_url,
+ 'ie_key': ie_key,
'id': video_id,
'display_id': display_id,
'title': title,
diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py
index a53e27b27..d440313d5 100644
--- a/youtube_dl/extractor/noco.py
+++ b/youtube_dl/extractor/noco.py
@@ -9,7 +9,7 @@ from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
+ compat_urlparse,
)
from ..utils import (
clean_html,
@@ -17,6 +17,7 @@ from ..utils import (
int_or_none,
float_or_none,
parse_iso8601,
+ sanitized_Request,
)
@@ -74,7 +75,7 @@ class NocoIE(InfoExtractor):
'username': username,
'password': password,
}
- request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
login = self._download_json(request, None, 'Logging in as %s' % username)
@@ -82,14 +83,21 @@ class NocoIE(InfoExtractor):
if 'erreur' in login:
raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True)
+ @staticmethod
+ def _ts():
+ return int(time.time() * 1000)
+
def _call_api(self, path, video_id, note, sub_lang=None):
- ts = compat_str(int(time.time() * 1000))
+ ts = compat_str(self._ts() + self._ts_offset)
tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest()
url = self._API_URL_TEMPLATE % (path, ts, tk)
if sub_lang:
url += self._SUB_LANG_TEMPLATE % sub_lang
- resp = self._download_json(url, video_id, note)
+ request = sanitized_Request(url)
+ request.add_header('Referer', self._referer)
+
+ resp = self._download_json(request, video_id, note)
if isinstance(resp, dict) and resp.get('error'):
self._raise_error(resp['error'], resp['description'])
@@ -102,8 +110,22 @@ class NocoIE(InfoExtractor):
expected=True)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
+
+ # Timestamp adjustment offset between server time and local time
+ # must be calculated in order to use timestamps closest to server's
+ # in all API requests (see https://github.com/rg3/youtube-dl/issues/7864)
+ webpage = self._download_webpage(url, video_id)
+
+ player_url = self._search_regex(
+ r'(["\'])(?P<player>https?://noco\.tv/(?:[^/]+/)+NocoPlayer.+?\.swf.*?)\1',
+ webpage, 'noco player', group='player',
+ default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf')
+
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query)
+ ts = int_or_none(qs.get('ts', [None])[0])
+ self._ts_offset = ts - self._ts() if ts else 0
+ self._referer = player_url
medias = self._call_api(
'shows/%s/medias' % video_id,
@@ -155,8 +177,8 @@ class NocoIE(InfoExtractor):
'format_id': format_id_extended,
'width': int_or_none(fmt.get('res_width')),
'height': int_or_none(fmt.get('res_lines')),
- 'abr': int_or_none(fmt.get('audiobitrate')),
- 'vbr': int_or_none(fmt.get('videobitrate')),
+ 'abr': int_or_none(fmt.get('audiobitrate'), 1000),
+ 'vbr': int_or_none(fmt.get('videobitrate'), 1000),
'filesize': int_or_none(fmt.get('filesize')),
'format_note': qualities[format_id].get('quality_name'),
'quality': qualities[format_id].get('priority'),
diff --git a/youtube_dl/extractor/nosvideo.py b/youtube_dl/extractor/nosvideo.py
index f5ef856db..eab816e49 100644
--- a/youtube_dl/extractor/nosvideo.py
+++ b/youtube_dl/extractor/nosvideo.py
@@ -4,11 +4,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
ExtractorError,
+ sanitized_Request,
urlencode_postdata,
xpath_text,
xpath_with_ns,
@@ -41,7 +39,7 @@ class NosVideoIE(InfoExtractor):
'op': 'download1',
'method_free': 'Continue to Video',
}
- req = compat_urllib_request.Request(url, urlencode_postdata(fields))
+ req = sanitized_Request(url, urlencode_postdata(fields))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
'Downloading download page')
diff --git a/youtube_dl/extractor/novamov.py b/youtube_dl/extractor/novamov.py
index 04d779890..d68c1ad79 100644
--- a/youtube_dl/extractor/novamov.py
+++ b/youtube_dl/extractor/novamov.py
@@ -3,11 +3,13 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
+ NO_DEFAULT,
+ encode_dict,
+ sanitized_Request,
+ urlencode_postdata,
)
@@ -15,15 +17,16 @@ class NovaMovIE(InfoExtractor):
IE_NAME = 'novamov'
IE_DESC = 'NovaMov'
- _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
+ _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video|mobile/#/videos)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
_VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'}
_HOST = 'www.novamov.com'
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>'
- _FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";'
+ _FILEKEY_REGEX = r'flashvars\.filekey=(?P<filekey>"?[^"]+"?);'
_TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>'
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
+ _URL_TEMPLATE = 'http://%s/video/%s'
_TEST = {
'url': 'http://www.novamov.com/video/4rurhn9x446jj',
@@ -37,20 +40,50 @@ class NovaMovIE(InfoExtractor):
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
}
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage(
- 'http://%s/video/%s' % (self._HOST, video_id), video_id, 'Downloading video page')
-
- if re.search(self._FILE_DELETED_REGEX, page) is not None:
+ def _check_existence(self, webpage, video_id):
+ if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
- filekey = self._search_regex(self._FILEKEY_REGEX, page, 'filekey')
-
- title = self._html_search_regex(self._TITLE_REGEX, page, 'title', fatal=False)
- description = self._html_search_regex(self._DESCRIPTION_REGEX, page, 'description', default='', fatal=False)
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ url = self._URL_TEMPLATE % (self._HOST, video_id)
+
+ webpage = self._download_webpage(
+ url, video_id, 'Downloading video page')
+
+ self._check_existence(webpage, video_id)
+
+ def extract_filekey(default=NO_DEFAULT):
+ filekey = self._search_regex(
+ self._FILEKEY_REGEX, webpage, 'filekey', default=default)
+ if filekey is not default and (filekey[0] != '"' or filekey[-1] != '"'):
+ return self._search_regex(
+ r'var\s+%s\s*=\s*"([^"]+)"' % re.escape(filekey), webpage, 'filekey', default=default)
+ else:
+ return filekey
+
+ filekey = extract_filekey(default=None)
+
+ if not filekey:
+ fields = self._hidden_inputs(webpage)
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', webpage,
+ 'post url', default=url, group='url')
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(url, post_url)
+ request = sanitized_Request(
+ post_url, urlencode_postdata(encode_dict(fields)))
+ request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ request.add_header('Referer', post_url)
+ webpage = self._download_webpage(
+ request, video_id, 'Downloading continue to the video page')
+ self._check_existence(webpage, video_id)
+
+ filekey = extract_filekey()
+
+ title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title', fatal=False)
+ description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False)
api_response = self._download_webpage(
'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id,
@@ -69,3 +102,89 @@ class NovaMovIE(InfoExtractor):
'title': title,
'description': description
}
+
+
+class WholeCloudIE(NovaMovIE):
+ IE_NAME = 'wholecloud'
+ IE_DESC = 'WholeCloud'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': '(?:wholecloud\.net|movshare\.(?:net|sx|ag))'}
+
+ _HOST = 'www.wholecloud.net'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>'
+ _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>'
+
+ _TEST = {
+ 'url': 'http://www.wholecloud.net/video/559e28be54d96',
+ 'md5': 'abd31a2132947262c50429e1d16c1bfd',
+ 'info_dict': {
+ 'id': '559e28be54d96',
+ 'ext': 'flv',
+ 'title': 'dissapeared image',
+ 'description': 'optical illusion dissapeared image magic illusion',
+ }
+ }
+
+
+class NowVideoIE(NovaMovIE):
+ IE_NAME = 'nowvideo'
+ IE_DESC = 'NowVideo'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'}
+
+ _HOST = 'www.nowvideo.to'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<h4>([^<]+)</h4>'
+ _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
+
+ _TEST = {
+ 'url': 'http://www.nowvideo.sx/video/f1d6fce9a968b',
+ 'md5': '12c82cad4f2084881d8bc60ee29df092',
+ 'info_dict': {
+ 'id': 'f1d6fce9a968b',
+ 'ext': 'flv',
+ 'title': 'youtubedl test video BaWjenozKc',
+ 'description': 'Description',
+ },
+ }
+
+
+class VideoWeedIE(NovaMovIE):
+ IE_NAME = 'videoweed'
+ IE_DESC = 'VideoWeed'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'}
+
+ _HOST = 'www.videoweed.es'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
+ _URL_TEMPLATE = 'http://%s/file/%s'
+
+ _TEST = {
+ 'url': 'http://www.videoweed.es/file/b42178afbea14',
+ 'md5': 'abd31a2132947262c50429e1d16c1bfd',
+ 'info_dict': {
+ 'id': 'b42178afbea14',
+ 'ext': 'flv',
+ 'title': 'optical illusion dissapeared image magic illusion',
+ 'description': ''
+ },
+ }
+
+
+class CloudTimeIE(NovaMovIE):
+ IE_NAME = 'cloudtime'
+ IE_DESC = 'CloudTime'
+
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'cloudtime\.to'}
+
+ _HOST = 'www.cloudtime.to'
+
+ _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
+ _TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>'
+
+ _TEST = None
diff --git a/youtube_dl/extractor/nowness.py b/youtube_dl/extractor/nowness.py
index 6b2f3f55a..446f5901c 100644
--- a/youtube_dl/extractor/nowness.py
+++ b/youtube_dl/extractor/nowness.py
@@ -1,64 +1,140 @@
# encoding: utf-8
from __future__ import unicode_literals
-import re
-
-from .brightcove import BrightcoveIE
+from .brightcove import (
+ BrightcoveLegacyIE,
+ BrightcoveNewIE,
+)
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..compat import compat_str
+from ..utils import (
+ ExtractorError,
+ sanitized_Request,
+)
+
+
+class NownessBaseIE(InfoExtractor):
+ def _extract_url_result(self, post):
+ if post['type'] == 'video':
+ for media in post['media']:
+ if media['type'] == 'video':
+ video_id = media['content']
+ source = media['source']
+ if source == 'brightcove':
+ player_code = self._download_webpage(
+ 'http://www.nowness.com/iframe?id=%s' % video_id, video_id,
+ note='Downloading player JavaScript',
+ errnote='Unable to download player JavaScript')
+ bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code)
+ if bc_url:
+ return self.url_result(bc_url, BrightcoveLegacyIE.ie_key())
+ bc_url = BrightcoveNewIE._extract_url(player_code)
+ if bc_url:
+ return self.url_result(bc_url, BrightcoveNewIE.ie_key())
+ raise ExtractorError('Could not find player definition')
+ elif source == 'vimeo':
+ return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
+ elif source == 'youtube':
+ return self.url_result(video_id, 'Youtube')
+ elif source == 'cinematique':
+ # youtube-dl currently doesn't support cinematique
+ # return self.url_result('http://cinematique.com/embed/%s' % video_id, 'Cinematique')
+ pass
+ def _api_request(self, url, request_path):
+ display_id = self._match_id(url)
+ request = sanitized_Request(
+ 'http://api.nowness.com/api/' + request_path % display_id,
+ headers={
+ 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
+ })
+ return display_id, self._download_json(request, display_id)
-class NownessIE(InfoExtractor):
- _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/[^?#]*?/(?P<id>[0-9]+)/(?P<slug>[^/]+?)(?:$|[?#])'
- _TESTS = [
- {
- 'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
- 'md5': '068bc0202558c2e391924cb8cc470676',
- 'info_dict': {
- 'id': '2520295746001',
- 'ext': 'mp4',
- 'title': 'Candor: The Art of Gesticulation',
- 'description': 'Candor: The Art of Gesticulation',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'uploader': 'Nowness',
- }
+class NownessIE(NownessBaseIE):
+ IE_NAME = 'nowness'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/(?:story|(?:series|category)/[^/]+)/(?P<id>[^/]+?)(?:$|[?#])'
+ _TESTS = [{
+ 'url': 'https://www.nowness.com/story/candor-the-art-of-gesticulation',
+ 'md5': '068bc0202558c2e391924cb8cc470676',
+ 'info_dict': {
+ 'id': '2520295746001',
+ 'ext': 'mp4',
+ 'title': 'Candor: The Art of Gesticulation',
+ 'description': 'Candor: The Art of Gesticulation',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
},
- {
- 'url': 'http://cn.nowness.com/day/2014/8/7/4069/kasper-bj-rke-ft-jaakko-eino-kalevi--tnr',
- 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
- 'info_dict': {
- 'id': '3716354522001',
- 'ext': 'mp4',
- 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
- 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'uploader': 'Nowness',
- }
+ }, {
+ 'url': 'https://cn.nowness.com/story/kasper-bjorke-ft-jaakko-eino-kalevi-tnr',
+ 'md5': 'e79cf125e387216f86b2e0a5b5c63aa3',
+ 'info_dict': {
+ 'id': '3716354522001',
+ 'ext': 'mp4',
+ 'title': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'description': 'Kasper Bjørke ft. Jaakko Eino Kalevi: TNR',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader': 'Nowness',
},
- ]
+ }, {
+ # vimeo
+ 'url': 'https://www.nowness.com/series/nowness-picks/jean-luc-godard-supercut',
+ 'md5': '9a5a6a8edf806407e411296ab6bc2a49',
+ 'info_dict': {
+ 'id': '130020913',
+ 'ext': 'mp4',
+ 'title': 'Bleu, Blanc, Rouge - A Godard Supercut',
+ 'description': 'md5:f0ea5f1857dffca02dbd37875d742cec',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'upload_date': '20150607',
+ 'uploader': 'Cinema Sem Lei',
+ 'uploader_id': 'cinemasemlei',
+ },
+ }]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('slug')
+ _, post = self._api_request(url, 'post/getBySlug/%s')
+ return self._extract_url_result(post)
- webpage = self._download_webpage(url, video_id)
- player_url = self._search_regex(
- r'"([^"]+/content/issue-[0-9.]+.js)"', webpage, 'player URL')
- real_id = self._search_regex(
- r'\sdata-videoId="([0-9]+)"', webpage, 'internal video ID')
- player_code = self._download_webpage(
- player_url, video_id,
- note='Downloading player JavaScript',
- errnote='Player download failed')
- player_code = player_code.replace("'+d+'", real_id)
+class NownessPlaylistIE(NownessBaseIE):
+ IE_NAME = 'nowness:playlist'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/playlist/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://www.nowness.com/playlist/3286/i-guess-thats-why-they-call-it-the-blues',
+ 'info_dict': {
+ 'id': '3286',
+ },
+ 'playlist_mincount': 8,
+ }
- bc_url = BrightcoveIE._extract_brightcove_url(player_code)
- if bc_url is None:
- raise ExtractorError('Could not find player definition')
- return {
- '_type': 'url',
- 'url': bc_url,
- 'ie_key': 'Brightcove',
- }
+ def _real_extract(self, url):
+ playlist_id, playlist = self._api_request(url, 'post?PlaylistId=%s')
+ entries = [self._extract_url_result(item) for item in playlist['items']]
+ return self.playlist_result(entries, playlist_id)
+
+
+class NownessSeriesIE(NownessBaseIE):
+ IE_NAME = 'nowness:series'
+ _VALID_URL = r'https?://(?:(?:www|cn)\.)?nowness\.com/series/(?P<id>[^/]+?)(?:$|[?#])'
+ _TEST = {
+ 'url': 'https://www.nowness.com/series/60-seconds',
+ 'info_dict': {
+ 'id': '60',
+ 'title': '60 Seconds',
+ 'description': 'One-minute wisdom in a new NOWNESS series',
+ },
+ 'playlist_mincount': 4,
+ }
+
+ def _real_extract(self, url):
+ display_id, series = self._api_request(url, 'series/getBySlug/%s')
+ entries = [self._extract_url_result(post) for post in series['posts']]
+ series_title = None
+ series_description = None
+ translations = series.get('translations', [])
+ if translations:
+ series_title = translations[0].get('title') or translations[0]['seoTitle']
+ series_description = translations[0].get('seoDescription')
+ return self.playlist_result(
+ entries, compat_str(series['id']), series_title, series_description)
diff --git a/youtube_dl/extractor/nowtv.py b/youtube_dl/extractor/nowtv.py
index c8257719f..fd107aca2 100644
--- a/youtube_dl/extractor/nowtv.py
+++ b/youtube_dl/extractor/nowtv.py
@@ -1,6 +1,8 @@
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
@@ -13,8 +15,63 @@ from ..utils import (
)
-class NowTVIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/(?:player|preview)'
+class NowTVBaseIE(InfoExtractor):
+ _VIDEO_FIELDS = (
+ 'id', 'title', 'free', 'geoblocked', 'articleLong', 'articleShort',
+ 'broadcastStartDate', 'seoUrl', 'duration', 'files',
+ 'format.defaultImage169Format', 'format.defaultImage169Logo')
+
+ def _extract_video(self, info, display_id=None):
+ video_id = compat_str(info['id'])
+
+ files = info['files']
+ if not files:
+ if info.get('geoblocked', False):
+ raise ExtractorError(
+ 'Video %s is not available from your location due to geo restriction' % video_id,
+ expected=True)
+ if not info.get('free', True):
+ raise ExtractorError(
+ 'Video %s is not available for free' % video_id, expected=True)
+
+ formats = []
+ for item in files['items']:
+ if determine_ext(item['path']) != 'f4v':
+ continue
+ app, play_path = remove_start(item['path'], '/').split('/', 1)
+ formats.append({
+ 'url': 'rtmpe://fms.rtl.de',
+ 'app': app,
+ 'play_path': 'mp4:%s' % play_path,
+ 'ext': 'flv',
+ 'page_url': 'http://rtlnow.rtl.de',
+ 'player_url': 'http://cdn.static-fra.de/now/vodplayer.swf',
+ 'tbr': int_or_none(item.get('bitrate')),
+ })
+ self._sort_formats(formats)
+
+ title = info['title']
+ description = info.get('articleLong') or info.get('articleShort')
+ timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ')
+ duration = parse_duration(info.get('duration'))
+
+ f = info.get('format', {})
+ thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo')
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id or info.get('seoUrl'),
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ }
+
+
+class NowTVIE(NowTVBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/(?:(?:list/[^/]+|jahr/\d{4}/\d{1,2})/)?(?P<id>[^/]+)/(?:player|preview)'
_TESTS = [{
# rtl
@@ -23,7 +80,7 @@ class NowTVIE(InfoExtractor):
'id': '203519',
'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
'ext': 'flv',
- 'title': 'Die neuen Bauern und eine Hochzeit',
+ 'title': 'Inka Bause stellt die neuen Bauern vor',
'description': 'md5:e234e1ed6d63cf06be5c070442612e7e',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1432580700,
@@ -133,61 +190,71 @@ class NowTVIE(InfoExtractor):
}, {
'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player',
'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.de/rtl2/zuhause-im-glueck/jahr/2015/11/eine-erschuetternde-diagnose/player',
+ 'only_matching': True,
}]
def _real_extract(self, url):
- display_id = self._match_id(url)
- display_id_split = display_id.split('/')
- if len(display_id) > 2:
- display_id = '/'.join((display_id_split[0], display_id_split[-1]))
+ mobj = re.match(self._VALID_URL, url)
+ display_id = '%s/%s' % (mobj.group('show_id'), mobj.group('id'))
info = self._download_json(
- 'https://api.nowtv.de/v3/movies/%s?fields=id,title,free,geoblocked,articleLong,articleShort,broadcastStartDate,seoUrl,duration,format,files' % display_id,
- display_id)
+ 'https://api.nowtv.de/v3/movies/%s?fields=%s'
+ % (display_id, ','.join(self._VIDEO_FIELDS)), display_id)
- video_id = compat_str(info['id'])
+ return self._extract_video(info, display_id)
- files = info['files']
- if not files:
- if info.get('geoblocked', False):
- raise ExtractorError(
- 'Video %s is not available from your location due to geo restriction' % video_id,
- expected=True)
- if not info.get('free', True):
- raise ExtractorError(
- 'Video %s is not available for free' % video_id, expected=True)
- formats = []
- for item in files['items']:
- if determine_ext(item['path']) != 'f4v':
- continue
- app, play_path = remove_start(item['path'], '/').split('/', 1)
- formats.append({
- 'url': 'rtmpe://fms.rtl.de',
- 'app': app,
- 'play_path': 'mp4:%s' % play_path,
- 'ext': 'flv',
- 'page_url': url,
- 'player_url': 'http://rtl-now.rtl.de/includes/nc_player.swf',
- 'tbr': int_or_none(item.get('bitrate')),
- })
- self._sort_formats(formats)
+class NowTVListIE(NowTVBaseIE):
+ _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/list/(?P<id>[^?/#&]+)$'
- title = info['title']
- description = info.get('articleLong') or info.get('articleShort')
- timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ')
- duration = parse_duration(info.get('duration'))
+ _SHOW_FIELDS = ('title', )
+ _SEASON_FIELDS = ('id', 'headline', 'seoheadline', )
- f = info.get('format', {})
- thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo')
+ _TESTS = [{
+ 'url': 'http://www.nowtv.at/rtl/stern-tv/list/aktuell',
+ 'info_dict': {
+ 'id': '17006',
+ 'title': 'stern TV - Aktuell',
+ },
+ 'playlist_count': 1,
+ }, {
+ 'url': 'http://www.nowtv.at/rtl/das-supertalent/list/free-staffel-8',
+ 'info_dict': {
+ 'id': '20716',
+ 'title': 'Das Supertalent - FREE Staffel 8',
+ },
+ 'playlist_count': 14,
+ }]
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'timestamp': timestamp,
- 'duration': duration,
- 'formats': formats,
- }
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ show_id = mobj.group('show_id')
+ season_id = mobj.group('id')
+
+ fields = []
+ fields.extend(self._SHOW_FIELDS)
+ fields.extend('formatTabs.%s' % field for field in self._SEASON_FIELDS)
+ fields.extend(
+ 'formatTabs.formatTabPages.container.movies.%s' % field
+ for field in self._VIDEO_FIELDS)
+
+ list_info = self._download_json(
+ 'https://api.nowtv.de/v3/formats/seo?fields=%s&name=%s.php'
+ % (','.join(fields), show_id),
+ season_id)
+
+ season = next(
+ season for season in list_info['formatTabs']['items']
+ if season.get('seoheadline') == season_id)
+
+ title = '%s - %s' % (list_info['title'], season['headline'])
+
+ entries = []
+ for container in season['formatTabPages']['items']:
+ for info in ((container.get('container') or {}).get('movies') or {}).get('items') or []:
+ entries.append(self._extract_video(info))
+
+ return self.playlist_result(
+ entries, compat_str(season.get('id') or season_id), title)
diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py
deleted file mode 100644
index 17baa9679..000000000
--- a/youtube_dl/extractor/nowvideo.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class NowVideoIE(NovaMovIE):
- IE_NAME = 'nowvideo'
- IE_DESC = 'NowVideo'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'}
-
- _HOST = 'www.nowvideo.ch'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _FILEKEY_REGEX = r'var fkzd="([^"]+)";'
- _TITLE_REGEX = r'<h4>([^<]+)</h4>'
- _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>'
-
- _TEST = {
- 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa',
- 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817',
- 'info_dict': {
- 'id': '0mw0yow7b6dxa',
- 'ext': 'flv',
- 'title': 'youtubedl test video _BaW_jenozKc.mp4',
- 'description': 'Description',
- }
- }
diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index d066a96db..6ff13050d 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -4,7 +4,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..compat import compat_urlparse
from ..utils import (
+ determine_ext,
ExtractorError,
float_or_none,
parse_duration,
@@ -47,12 +49,22 @@ class NRKIE(InfoExtractor):
'http://v8.psapi.nrk.no/mediaelement/%s' % video_id,
video_id, 'Downloading media JSON')
- if data['usageRights']['isGeoBlocked']:
- raise ExtractorError(
- 'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge',
- expected=True)
+ media_url = data.get('mediaUrl')
- video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81'
+ if not media_url:
+ if data['usageRights']['isGeoBlocked']:
+ raise ExtractorError(
+ 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge',
+ expected=True)
+
+ if determine_ext(media_url) == 'f4m':
+ formats = self._extract_f4m_formats(
+ media_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id, f4m_id='hds')
+ else:
+ formats = [{
+ 'url': media_url,
+ 'ext': 'flv',
+ }]
duration = parse_duration(data.get('duration'))
@@ -66,12 +78,11 @@ class NRKIE(InfoExtractor):
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'flv',
'title': data['title'],
'description': data['description'],
'duration': duration,
'thumbnail': thumbnail,
+ 'formats': formats,
}
@@ -196,20 +207,6 @@ class NRKTVIE(InfoExtractor):
}
]
- def _debug_print(self, txt):
- if self._downloader.params.get('verbose', False):
- self.to_screen('[debug] %s' % txt)
-
- def _get_subtitles(self, subtitlesurl, video_id, baseurl):
- url = "%s%s" % (baseurl, subtitlesurl)
- self._debug_print('%s: Subtitle url: %s' % (video_id, url))
- captions = self._download_xml(
- url, video_id, 'Downloading subtitles')
- lang = captions.get('lang', 'no')
- return {lang: [
- {'ext': 'ttml', 'url': url},
- ]}
-
def _extract_f4m(self, manifest_url, video_id):
return self._extract_f4m_formats(
manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id, f4m_id='hds')
@@ -218,7 +215,7 @@ class NRKTVIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
part_id = mobj.group('part_id')
- baseurl = mobj.group('baseurl')
+ base_url = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
@@ -278,11 +275,14 @@ class NRKTVIE(InfoExtractor):
self._sort_formats(formats)
subtitles_url = self._html_search_regex(
- r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
- webpage, 'subtitle URL', default=None)
- subtitles = None
+ r'data-subtitlesurl\s*=\s*(["\'])(?P<url>.+?)\1',
+ webpage, 'subtitle URL', default=None, group='url')
+ subtitles = {}
if subtitles_url:
- subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl)
+ subtitles['no'] = [{
+ 'ext': 'ttml',
+ 'url': compat_urlparse.urljoin(base_url, subtitles_url),
+ }]
return {
'id': video_id,
diff --git a/youtube_dl/extractor/nuvid.py b/youtube_dl/extractor/nuvid.py
index 57928f2ae..9fa7cefad 100644
--- a/youtube_dl/extractor/nuvid.py
+++ b/youtube_dl/extractor/nuvid.py
@@ -3,11 +3,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
parse_duration,
+ sanitized_Request,
unified_strdate,
)
@@ -33,7 +31,7 @@ class NuvidIE(InfoExtractor):
formats = []
for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]:
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'http://m.nuvid.com/play/%s' % video_id)
request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed)
webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py
index 66520c2c5..184c7a323 100644
--- a/youtube_dl/extractor/odnoklassniki.py
+++ b/youtube_dl/extractor/odnoklassniki.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
+ ExtractorError,
unified_strdate,
int_or_none,
qualities,
@@ -12,7 +13,7 @@ from ..utils import (
class OdnoklassnikiIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
@@ -28,6 +29,7 @@ class OdnoklassnikiIE(InfoExtractor):
'like_count': int,
'age_limit': 0,
},
+ 'skip': 'Video has been blocked',
}, {
# metadataUrl
'url': 'http://ok.ru/video/63567059965189-0',
@@ -64,6 +66,9 @@ class OdnoklassnikiIE(InfoExtractor):
}, {
'url': 'http://www.ok.ru/video/20648036891',
'only_matching': True,
+ }, {
+ 'url': 'http://www.ok.ru/videoembed/20648036891',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -72,6 +77,12 @@ class OdnoklassnikiIE(InfoExtractor):
webpage = self._download_webpage(
'http://ok.ru/video/%s' % video_id, video_id)
+ error = self._search_regex(
+ r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<',
+ webpage, 'error', default=None)
+ if error:
+ raise ExtractorError(error, expected=True)
+
player = self._parse_json(
unescapeHTML(self._search_regex(
r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id,
diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py
index a262a9f6d..8603fd692 100644
--- a/youtube_dl/extractor/ooyala.py
+++ b/youtube_dl/extractor/ooyala.py
@@ -1,108 +1,78 @@
from __future__ import unicode_literals
import re
-import json
import base64
from .common import InfoExtractor
from ..utils import (
- unescapeHTML,
- ExtractorError,
- determine_ext,
int_or_none,
+ float_or_none,
+ ExtractorError,
+ unsmuggle_url,
)
+from ..compat import compat_urllib_parse
class OoyalaBaseIE(InfoExtractor):
- def _extract_result(self, info, more_info):
- embedCode = info['embedCode']
- video_url = info.get('ipad_url') or info['url']
-
- if determine_ext(video_url) == 'm3u8':
- formats = self._extract_m3u8_formats(video_url, embedCode, ext='mp4')
- else:
- formats = [{
- 'url': video_url,
- 'ext': 'mp4',
- }]
-
- return {
- 'id': embedCode,
- 'title': unescapeHTML(info['title']),
- 'formats': formats,
- 'description': unescapeHTML(more_info['description']),
- 'thumbnail': more_info['promo'],
+ def _extract(self, content_tree_url, video_id, domain='example.org'):
+ content_tree = self._download_json(content_tree_url, video_id)['content_tree']
+ metadata = content_tree[list(content_tree)[0]]
+ embed_code = metadata['embed_code']
+ pcode = metadata.get('asset_pcode') or embed_code
+ video_info = {
+ 'id': embed_code,
+ 'title': metadata['title'],
+ 'description': metadata.get('description'),
+ 'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'),
+ 'duration': float_or_none(metadata.get('duration'), 1000),
}
- def _extract(self, player_url, video_id):
- player = self._download_webpage(player_url, video_id)
- mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
- player, 'mobile player url')
- # Looks like some videos are only available for particular devices
- # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0
- # is only available for ipad)
- # Working around with fetching URLs for all the devices found starting with 'unknown'
- # until we succeed or eventually fail for each device.
- devices = re.findall(r'device\s*=\s*"([^"]+)";', player)
- devices.remove('unknown')
- devices.insert(0, 'unknown')
- for device in devices:
- mobile_player = self._download_webpage(
- '%s&device=%s' % (mobile_url, device), video_id,
- 'Downloading mobile player JS for %s device' % device)
- videos_info = self._search_regex(
- r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
- mobile_player, 'info', fatal=False, default=None)
- if videos_info:
- break
-
- if not videos_info:
- formats = []
+ urls = []
+ formats = []
+ for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
auth_data = self._download_json(
- 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?domain=www.example.org&supportedFormats=mp4,webm' % (video_id, video_id),
- video_id)
-
- cur_auth_data = auth_data['authorization_data'][video_id]
-
- for stream in cur_auth_data['streams']:
- formats.append({
- 'url': base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8'),
- 'ext': stream.get('delivery_type'),
- 'format': stream.get('video_codec'),
- 'format_id': stream.get('profile'),
- 'width': int_or_none(stream.get('width')),
- 'height': int_or_none(stream.get('height')),
- 'abr': int_or_none(stream.get('audio_bitrate')),
- 'vbr': int_or_none(stream.get('video_bitrate')),
- })
- if formats:
- return {
- 'id': video_id,
- 'formats': formats,
- 'title': 'Ooyala video',
- }
-
- if not cur_auth_data['authorized']:
- raise ExtractorError(cur_auth_data['message'], expected=True)
-
- if not videos_info:
- raise ExtractorError('Unable to extract info')
- videos_info = videos_info.replace('\\"', '"')
- videos_more_info = self._search_regex(
- r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"')
- videos_info = json.loads(videos_info)
- videos_more_info = json.loads(videos_more_info)
-
- if videos_more_info.get('lineup'):
- videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])]
- return {
- '_type': 'playlist',
- 'id': video_id,
- 'title': unescapeHTML(videos_more_info['title']),
- 'entries': videos,
- }
- else:
- return self._extract_result(videos_info[0], videos_more_info)
+ 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?' % (pcode, embed_code) + compat_urllib_parse.urlencode({'domain': domain, 'supportedFormats': supported_format}),
+ video_id, 'Downloading %s JSON' % supported_format)
+
+ cur_auth_data = auth_data['authorization_data'][embed_code]
+
+ if cur_auth_data['authorized']:
+ for stream in cur_auth_data['streams']:
+ url = base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8')
+ if url in urls:
+ continue
+ urls.append(url)
+ delivery_type = stream['delivery_type']
+ if delivery_type == 'hls' or '.m3u8' in url:
+ m3u8_formats = self._extract_m3u8_formats(url, embed_code, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ elif delivery_type == 'hds' or '.f4m' in url:
+ f4m_formats = self._extract_f4m_formats(url, embed_code, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+ elif '.smil' in url:
+ smil_formats = self._extract_smil_formats(url, embed_code, fatal=False)
+ if smil_formats:
+ formats.extend(smil_formats)
+ else:
+ formats.append({
+ 'url': url,
+ 'ext': stream.get('delivery_type'),
+ 'vcodec': stream.get('video_codec'),
+ 'format_id': delivery_type,
+ 'width': int_or_none(stream.get('width')),
+ 'height': int_or_none(stream.get('height')),
+ 'abr': int_or_none(stream.get('audio_bitrate')),
+ 'vbr': int_or_none(stream.get('video_bitrate')),
+ 'fps': float_or_none(stream.get('framerate')),
+ })
+ else:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, cur_auth_data['message']), expected=True)
+ self._sort_formats(formats)
+
+ video_info['formats'] = formats
+ return video_info
class OoyalaIE(OoyalaBaseIE):
@@ -117,6 +87,7 @@ class OoyalaIE(OoyalaBaseIE):
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
+ 'duration': 853.386,
},
}, {
# Only available for ipad
@@ -125,7 +96,7 @@ class OoyalaIE(OoyalaBaseIE):
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
- 'description': '',
+ 'duration': 194.948,
},
},
{
@@ -136,7 +107,8 @@ class OoyalaIE(OoyalaBaseIE):
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
- 'title': 'Ooyala video',
+ 'title': 'Divide Tool Path.mp4',
+ 'duration': 204.405,
}
}
]
@@ -151,9 +123,11 @@ class OoyalaIE(OoyalaBaseIE):
ie=cls.ie_key())
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
embed_code = self._match_id(url)
- player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
- return self._extract(player_url, embed_code)
+ domain = smuggled_data.get('domain')
+ content_tree_url = 'http://player.ooyala.com/player_api/v1/content_tree/embed_code/%s/%s' % (embed_code, embed_code)
+ return self._extract(content_tree_url, embed_code, domain)
class OoyalaExternalIE(OoyalaBaseIE):
@@ -170,7 +144,7 @@ class OoyalaExternalIE(OoyalaBaseIE):
.*?&pcode=
)
(?P<pcode>.+?)
- (&|$)
+ (?:&|$)
'''
_TEST = {
@@ -179,7 +153,7 @@ class OoyalaExternalIE(OoyalaBaseIE):
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
- 'description': '',
+ 'duration': 1302000,
},
'params': {
# m3u8 download
@@ -188,9 +162,6 @@ class OoyalaExternalIE(OoyalaBaseIE):
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- partner_id = mobj.group('partner_id')
- video_id = mobj.group('id')
- pcode = mobj.group('pcode')
- player_url = 'http://player.ooyala.com/player.js?externalId=%s:%s&pcode=%s' % (partner_id, video_id, pcode)
- return self._extract(player_url, video_id)
+ partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups()
+ content_tree_url = 'http://player.ooyala.com/player_api/v1/content_tree/external_id/%s/%s:%s' % (pcode, partner_id, video_id)
+ return self._extract(content_tree_url, video_id)
diff --git a/youtube_dl/extractor/openfilm.py b/youtube_dl/extractor/openfilm.py
deleted file mode 100644
index d2ceedd01..000000000
--- a/youtube_dl/extractor/openfilm.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import unicode_literals
-
-import json
-
-from .common import InfoExtractor
-from ..compat import compat_urllib_parse_unquote_plus
-from ..utils import (
- parse_iso8601,
- parse_age_limit,
- int_or_none,
-)
-
-
-class OpenFilmIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)openfilm\.com/videos/(?P<id>.+)'
- _TEST = {
- 'url': 'http://www.openfilm.com/videos/human-resources-remastered',
- 'md5': '42bcd88c2f3ec13b65edf0f8ad1cac37',
- 'info_dict': {
- 'id': '32736',
- 'display_id': 'human-resources-remastered',
- 'ext': 'mp4',
- 'title': 'Human Resources (Remastered)',
- 'description': 'Social Engineering in the 20th Century.',
- 'thumbnail': 're:^https?://.*\.jpg$',
- 'duration': 7164,
- 'timestamp': 1334756988,
- 'upload_date': '20120418',
- 'uploader_id': '41117',
- 'view_count': int,
- 'age_limit': 0,
- },
- }
-
- def _real_extract(self, url):
- display_id = self._match_id(url)
-
- webpage = self._download_webpage(url, display_id)
-
- player = compat_urllib_parse_unquote_plus(
- self._og_search_video_url(webpage))
-
- video = json.loads(self._search_regex(
- r'\bp=({.+?})(?:&|$)', player, 'video JSON'))
-
- video_url = '%s1.mp4' % video['location']
- video_id = video.get('video_id')
- display_id = video.get('alias') or display_id
- title = video.get('title')
- description = video.get('description')
- thumbnail = video.get('main_thumb')
- duration = int_or_none(video.get('duration'))
- timestamp = parse_iso8601(video.get('dt_published'), ' ')
- uploader_id = video.get('user_id')
- view_count = int_or_none(video.get('views_count'))
- age_limit = parse_age_limit(video.get('age_limit'))
-
- return {
- 'id': video_id,
- 'display_id': display_id,
- 'url': video_url,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'timestamp': timestamp,
- 'uploader_id': uploader_id,
- 'view_count': view_count,
- 'age_limit': age_limit,
- }
diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py
index 6cdc2638b..ec8876c28 100644
--- a/youtube_dl/extractor/patreon.py
+++ b/youtube_dl/extractor/patreon.py
@@ -2,9 +2,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- js_to_json,
-)
+from ..utils import js_to_json
class PatreonIE(InfoExtractor):
@@ -65,7 +63,7 @@ class PatreonIE(InfoExtractor):
'password': password,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'https://www.patreon.com/processLogin',
compat_urllib_parse.urlencode(login_form).encode('utf-8')
)
diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py
index 683c81de3..744e4a09a 100644
--- a/youtube_dl/extractor/pbs.py
+++ b/youtube_dl/extractor/pbs.py
@@ -8,22 +8,188 @@ from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
+ strip_jsonp,
unified_strdate,
US_RATINGS,
)
class PBSIE(InfoExtractor):
+ _STATIONS = (
+ (r'(?:video|www)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/
+ (r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/
+ (r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/
+ (r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org
+ (r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org
+ (r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/
+ (r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org
+ (r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org
+ (r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/
+ (r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm
+ # (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/
+ # (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/
+ # (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/
+ (r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org
+ (r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/
+ (r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/
+ (r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/
+ (r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/
+ (r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/
+ (r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/
+ (r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv
+ (r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/
+ (r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/
+ (r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org
+ (r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/
+ (r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/
+ (r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org
+ (r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org
+ (r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/
+ (r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/
+ (r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org
+ (r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/
+ (r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org
+ # (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org
+ # (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org
+ # (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org
+ (r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org
+ (r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org
+ (r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org
+ (r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org
+ (r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/
+ (r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/
+ (r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org
+ (r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org
+ (r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org
+ (r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/
+ # (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/
+ (r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/
+ (r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org
+ (r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org
+ (r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org
+ (r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/
+ (r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net
+ (r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org
+ (r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org
+ (r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/
+ # (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org
+ (r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org
+ (r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org
+ (r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org
+ (r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/
+ (r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/
+ (r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/
+ (r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org
+ (r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/
+ # (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/
+ (r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/
+ (r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org
+ (r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/
+ (r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org
+ (r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org
+ (r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/
+ (r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv
+ (r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/
+ # (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/
+ (r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/
+ (r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org
+ (r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/
+ (r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org
+ (r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org
+ (r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/
+ (r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/
+ (r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/
+ (r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/
+ (r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net
+ (r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org
+ (r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org
+ # (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/
+ (r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org
+ (r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/
+ (r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org
+ (r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org
+ (r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org
+ (r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/
+ (r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org
+ (r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org
+ (r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org
+ (r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org
+ (r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/
+ (r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/
+ (r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org
+ # (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org
+ # (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/
+ # (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/
+ (r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org
+ (r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org
+ (r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/
+ (r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/
+ (r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5
+ (r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/
+ (r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org
+ # (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org
+ (r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/
+ (r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/
+ (r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/
+ (r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/
+ (r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org
+ (r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org
+ (r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/
+ (r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/
+ (r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org
+ (r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/
+ (r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org
+ (r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/
+ (r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu
+ (r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/
+ (r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org
+ (r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org
+ # (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/
+ (r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/
+ (r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org
+ (r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org
+ (r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/
+ (r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org
+ (r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org
+ (r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/
+ (r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org
+ (r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org
+ (r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org
+ (r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org
+ # (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org
+ (r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/
+ (r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/
+ # (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org
+ (r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/
+ (r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/
+ (r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/
+ (r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org
+ (r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/
+ # (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu
+ # (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org
+ (r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org
+ (r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org
+ # (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org
+ # (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org
+ # (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org
+ (r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/
+ (r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/
+ (r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org
+ )
+
+ IE_NAME = 'pbs'
+ IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1])
+
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
- video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
+ (?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
- video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
+ (?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
- '''
+ ''' % '|'.join(list(zip(*_STATIONS))[0])
_TESTS = [
{
@@ -108,12 +274,12 @@ class PBSIE(InfoExtractor):
{
'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/',
'info_dict': {
- 'id': '2280706814',
+ 'id': '2276541483',
'display_id': 'player',
'ext': 'mp4',
- 'title': 'American Experience - Death and the Civil War',
+ 'title': 'American Experience - Death and the Civil War, Chapter 1',
'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
- 'duration': 6705,
+ 'duration': 682,
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
@@ -134,8 +300,57 @@ class PBSIE(InfoExtractor):
'params': {
'skip_download': True, # requires ffmpeg
},
+ 'skip': 'Expired',
+ },
+ {
+ # Video embedded in iframe containing angle brackets as attribute's value (e.g.
+ # "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see
+ # https://github.com/rg3/youtube-dl/issues/7059)
+ 'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/',
+ 'info_dict': {
+ 'id': '2365546844',
+ 'display_id': 'a-chefs-life-season-3-episode-5-prickly-business',
+ 'ext': 'mp4',
+ 'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business",
+ 'description': 'md5:61db2ddf27c9912f09c241014b118ed1',
+ 'duration': 1480,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
+ },
+ {
+ # Frontline video embedded via flp2012.js
+ 'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists',
+ 'info_dict': {
+ 'id': '2070868960',
+ 'display_id': 'the-atomic-artists',
+ 'ext': 'mp4',
+ 'title': 'FRONTLINE - The Atomic Artists',
+ 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
+ 'duration': 723,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
+ },
+ {
+ 'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://watch.knpb.org/video/2365616055/',
+ 'only_matching': True,
}
]
+ _ERRORS = {
+ 101: 'We\'re sorry, but this video is not yet available.',
+ 403: 'We\'re sorry, but this video is not available in your region due to right restrictions.',
+ 404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.',
+ 410: 'This video has expired and is no longer available for online streaming.',
+ }
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -158,6 +373,7 @@ class PBSIE(InfoExtractor):
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
+ r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
]
@@ -166,9 +382,30 @@ class PBSIE(InfoExtractor):
if media_id:
return media_id, presumptive_id, upload_date
- url = self._search_regex(
- r'<iframe\s+[^>]*\s+src=["\']([^\'"]+partnerplayer[^\'"]+)["\']',
- webpage, 'player URL')
+ # Fronline video embedded via flp
+ video_id = self._search_regex(
+ r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None)
+ if video_id:
+ # pkg_id calculation is reverse engineered from
+ # http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js
+ prg_id = self._search_regex(
+ r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:]
+ if 'q' in prg_id:
+ prg_id = prg_id.split('q')[1]
+ prg_id = int(prg_id, 16)
+ getdir = self._download_json(
+ 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id,
+ presumptive_id, 'Downloading getdir JSON',
+ transform_source=strip_jsonp)
+ return getdir['mid'], presumptive_id, upload_date
+
+ for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage):
+ url = self._search_regex(
+ r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe,
+ 'player URL', default=None, group='url')
+ if url:
+ break
+
mobj = re.match(self._VALID_URL, url)
player_id = mobj.group('player_id')
@@ -196,7 +433,7 @@ class PBSIE(InfoExtractor):
return self.playlist_result(entries, display_id)
info = self._download_json(
- 'http://video.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
+ 'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
display_id)
formats = []
@@ -213,13 +450,11 @@ class PBSIE(InfoExtractor):
'Downloading %s video url info' % encoding_name)
if redirect_info['status'] == 'error':
- if redirect_info['http_code'] == 403:
- message = (
- 'The video is not available in your region due to '
- 'right restrictions')
- else:
- message = redirect_info['message']
- raise ExtractorError(message, expected=True)
+ raise ExtractorError(
+ '%s said: %s' % (
+ self.IE_NAME,
+ self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])),
+ expected=True)
format_url = redirect_info.get('url')
if not format_url:
diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py
index 8ad936758..63cc764bb 100644
--- a/youtube_dl/extractor/periscope.py
+++ b/youtube_dl/extractor/periscope.py
@@ -2,17 +2,14 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
from ..utils import parse_iso8601
class PeriscopeIE(InfoExtractor):
IE_DESC = 'Periscope'
- _VALID_URL = r'https?://(?:www\.)?periscope\.tv/w/(?P<id>[^/?#]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?periscope\.tv/[^/]+/(?P<id>[^/?#]+)'
+ # Alive example URLs can be found here http://onperiscope.com/
+ _TESTS = [{
'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',
'md5': '65b57957972e503fcbbaeed8f4fa04ca',
'info_dict': {
@@ -25,11 +22,18 @@ class PeriscopeIE(InfoExtractor):
'uploader_id': '1465763',
},
'skip': 'Expires in 24 hours',
- }
+ }, {
+ 'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://www.periscope.tv/bastaakanoggano/1OdKrlkZZjOJX',
+ 'only_matching': True,
+ }]
- def _call_api(self, method, token):
+ def _call_api(self, method, value):
+ attribute = 'token' if len(value) > 13 else 'broadcast_id'
return self._download_json(
- 'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token)
+ 'https://api.periscope.tv/api/v2/%s?%s=%s' % (method, attribute, value), value)
def _real_extract(self, url):
token = self._match_id(url)
@@ -76,24 +80,3 @@ class PeriscopeIE(InfoExtractor):
'thumbnails': thumbnails,
'formats': formats,
}
-
-
-class QuickscopeIE(InfoExtractor):
- IE_DESC = 'Quick Scope'
- _VALID_URL = r'https?://watchonperiscope\.com/broadcast/(?P<id>\d+)'
- _TEST = {
- 'url': 'https://watchonperiscope.com/broadcast/56180087',
- 'only_matching': True,
- }
-
- def _real_extract(self, url):
- broadcast_id = self._match_id(url)
- request = compat_urllib_request.Request(
- 'https://watchonperiscope.com/api/accessChannel', compat_urllib_parse.urlencode({
- 'broadcast_id': broadcast_id,
- 'entry_ticket': '',
- 'from_push': 'false',
- 'uses_sessions': 'true',
- }).encode('utf-8'))
- return self.url_result(
- self._download_json(request, broadcast_id)['share_url'], 'Periscope')
diff --git a/youtube_dl/extractor/pladform.py b/youtube_dl/extractor/pladform.py
index 551c8c9f0..bc559d1df 100644
--- a/youtube_dl/extractor/pladform.py
+++ b/youtube_dl/extractor/pladform.py
@@ -1,6 +1,8 @@
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
ExtractorError,
@@ -44,6 +46,13 @@ class PladformIE(InfoExtractor):
'only_matching': True,
}]
+ @staticmethod
+ def _extract_url(webpage):
+ mobj = re.search(
+ r'<iframe[^>]+src="(?P<url>(?:https?:)?//out\.pladform\.ru/player\?.+?)"', webpage)
+ if mobj:
+ return mobj.group('url')
+
def _real_extract(self, url):
video_id = self._match_id(url)
diff --git a/youtube_dl/extractor/played.py b/youtube_dl/extractor/played.py
index 8a1c296dd..2856af96f 100644
--- a/youtube_dl/extractor/played.py
+++ b/youtube_dl/extractor/played.py
@@ -5,12 +5,10 @@ import re
import os.path
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -46,7 +44,7 @@ class PlayedIE(InfoExtractor):
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
- req = compat_urllib_request.Request(url, post, headers)
+ req = sanitized_Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
diff --git a/youtube_dl/extractor/playwire.py b/youtube_dl/extractor/playwire.py
index bdc71017b..6d138ef25 100644
--- a/youtube_dl/extractor/playwire.py
+++ b/youtube_dl/extractor/playwire.py
@@ -19,7 +19,7 @@ class PlaywireIE(InfoExtractor):
'id': '3353705',
'ext': 'mp4',
'title': 'S04_RM_UCL_Rus',
- 'thumbnail': 're:^http://.*\.png$',
+ 'thumbnail': 're:^https?://.*\.png$',
'duration': 145.94,
},
}, {
diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
index fd32836cc..55c11b3bf 100644
--- a/youtube_dl/extractor/pluralsight.py
+++ b/youtube_dl/extractor/pluralsight.py
@@ -2,28 +2,36 @@ from __future__ import unicode_literals
import re
import json
+import random
+import collections
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
+ qualities,
+ sanitized_Request,
)
-class PluralsightIE(InfoExtractor):
+class PluralsightBaseIE(InfoExtractor):
+ _API_BASE = 'http://app.pluralsight.com'
+
+
+class PluralsightIE(PluralsightBaseIE):
IE_NAME = 'pluralsight'
- _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/training/player\?author=(?P<author>[^&]+)&name=(?P<name>[^&]+)(?:&mode=live)?&clip=(?P<clip>\d+)&course=(?P<course>[^&]+)'
- _LOGIN_URL = 'https://www.pluralsight.com/id/'
+ _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/training/player\?'
+ _LOGIN_URL = 'https://app.pluralsight.com/id/'
+
_NETRC_MACHINE = 'pluralsight'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas',
'md5': '4d458cf5cf4c593788672419a8dd4cf8',
'info_dict': {
@@ -33,7 +41,14 @@ class PluralsightIE(InfoExtractor):
'duration': 338,
},
'skip': 'Requires pluralsight account credentials',
- }
+ }, {
+ 'url': 'https://app.pluralsight.com/training/player?course=angularjs-get-started&author=scott-allen&name=angularjs-get-started-m1-introduction&clip=0&mode=live',
+ 'only_matching': True,
+ }, {
+ # available without pluralsight account
+ 'url': 'http://app.pluralsight.com/training/player?author=scott-allen&name=angularjs-get-started-m1-introduction&mode=live&clip=0&course=angularjs-get-started',
+ 'only_matching': True,
+ }]
def _real_initialize(self):
self._login()
@@ -41,7 +56,7 @@ class PluralsightIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- self.raise_login_required('Pluralsight account is required')
+ return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
@@ -60,7 +75,7 @@ class PluralsightIE(InfoExtractor):
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
@@ -73,31 +88,48 @@ class PluralsightIE(InfoExtractor):
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
+ if all(p not in response for p in ('__INITIAL_STATE__', '"currentUser"')):
+ raise ExtractorError('Unable to log in')
+
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- author = mobj.group('author')
- name = mobj.group('name')
- clip_id = mobj.group('clip')
- course = mobj.group('course')
+ qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+
+ author = qs.get('author', [None])[0]
+ name = qs.get('name', [None])[0]
+ clip_id = qs.get('clip', [None])[0]
+ course = qs.get('course', [None])[0]
+
+ if any(not f for f in (author, name, clip_id, course,)):
+ raise ExtractorError('Invalid URL', expected=True)
display_id = '%s-%s' % (name, clip_id)
webpage = self._download_webpage(url, display_id)
- collection = self._parse_json(
- self._search_regex(
- r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)',
- webpage, 'modules'),
- display_id)
+ modules = self._search_regex(
+ r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)',
+ webpage, 'modules', default=None)
+
+ if modules:
+ collection = self._parse_json(modules, display_id)
+ else:
+ # Webpage may be served in different layout (see
+ # https://github.com/rg3/youtube-dl/issues/7607)
+ collection = self._parse_json(
+ self._search_regex(
+ r'var\s+initialState\s*=\s*({.+?});\n', webpage, 'initial state'),
+ display_id)['course']['modules']
module, clip = None, None
for module_ in collection:
- if module_.get('moduleName') == name:
+ if name in (module_.get('moduleName'), module_.get('name')):
module = module_
for clip_ in module_.get('clips', []):
clip_index = clip_.get('clipIndex')
if clip_index is None:
+ clip_index = clip_.get('index')
+ if clip_index is None:
continue
if compat_str(clip_index) == clip_id:
clip = clip_
@@ -110,16 +142,49 @@ class PluralsightIE(InfoExtractor):
'low': {'width': 640, 'height': 480},
'medium': {'width': 848, 'height': 640},
'high': {'width': 1024, 'height': 768},
+ 'high-widescreen': {'width': 1280, 'height': 720},
}
+ QUALITIES_PREFERENCE = ('low', 'medium', 'high', 'high-widescreen',)
+ quality_key = qualities(QUALITIES_PREFERENCE)
+
+ AllowedQuality = collections.namedtuple('AllowedQuality', ['ext', 'qualities'])
+
ALLOWED_QUALITIES = (
- ('webm', ('high',)),
- ('mp4', ('low', 'medium', 'high',)),
+ AllowedQuality('webm', ['high', ]),
+ AllowedQuality('mp4', ['low', 'medium', 'high', ]),
)
+ # Some courses also offer widescreen resolution for high quality (see
+ # https://github.com/rg3/youtube-dl/issues/7766)
+ widescreen = True if re.search(
+ r'courseSupportsWidescreenVideoFormats\s*:\s*true', webpage) else False
+ best_quality = 'high-widescreen' if widescreen else 'high'
+ if widescreen:
+ for allowed_quality in ALLOWED_QUALITIES:
+ allowed_quality.qualities.append(best_quality)
+
+ # In order to minimize the number of calls to ViewClip API and reduce
+ # the probability of being throttled or banned by Pluralsight we will request
+ # only single format until formats listing was explicitly requested.
+ if self._downloader.params.get('listformats', False):
+ allowed_qualities = ALLOWED_QUALITIES
+ else:
+ def guess_allowed_qualities():
+ req_format = self._downloader.params.get('format') or 'best'
+ req_format_split = req_format.split('-', 1)
+ if len(req_format_split) > 1:
+ req_ext, req_quality = req_format_split
+ for allowed_quality in ALLOWED_QUALITIES:
+ if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities:
+ return (AllowedQuality(req_ext, (req_quality, )), )
+ req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4'
+ return (AllowedQuality(req_ext, (best_quality, )), )
+ allowed_qualities = guess_allowed_qualities()
+
formats = []
- for ext, qualities in ALLOWED_QUALITIES:
- for quality in qualities:
+ for ext, qualities_ in allowed_qualities:
+ for quality in qualities_:
f = QUALITIES[quality].copy()
clip_post = {
'a': author,
@@ -131,19 +196,31 @@ class PluralsightIE(InfoExtractor):
'mt': ext,
'q': '%dx%d' % (f['width'], f['height']),
}
- request = compat_urllib_request.Request(
- 'http://www.pluralsight.com/training/Player/ViewClip',
+ request = sanitized_Request(
+ '%s/training/Player/ViewClip' % self._API_BASE,
json.dumps(clip_post).encode('utf-8'))
request.add_header('Content-Type', 'application/json;charset=utf-8')
format_id = '%s-%s' % (ext, quality)
clip_url = self._download_webpage(
request, display_id, 'Downloading %s URL' % format_id, fatal=False)
+
+ # Pluralsight tracks multiple sequential calls to ViewClip API and start
+ # to return 429 HTTP errors after some time (see
+ # https://github.com/rg3/youtube-dl/pull/6989). Moreover it may even lead
+ # to account ban (see https://github.com/rg3/youtube-dl/issues/6842).
+ # To somewhat reduce the probability of these consequences
+ # we will sleep random amount of time before each call to ViewClip.
+ self._sleep(
+ random.randint(2, 5), display_id,
+ '%(video_id)s: Waiting for %(timeout)s seconds to avoid throttling')
+
if not clip_url:
continue
f.update({
'url': clip_url,
'ext': ext,
'format_id': format_id,
+ 'quality': quality_key(quality),
})
formats.append(f)
self._sort_formats(formats)
@@ -163,10 +240,10 @@ class PluralsightIE(InfoExtractor):
}
-class PluralsightCourseIE(InfoExtractor):
+class PluralsightCourseIE(PluralsightBaseIE):
IE_NAME = 'pluralsight:course'
- _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/courses/(?P<id>[^/]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/(?:library/)?courses/(?P<id>[^/]+)'
+ _TESTS = [{
# Free course from Pluralsight Starter Subscription for Microsoft TechNet
# https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz
'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas',
@@ -176,7 +253,14 @@ class PluralsightCourseIE(InfoExtractor):
'description': 'md5:61b37e60f21c4b2f91dc621a977d0986',
},
'playlist_count': 31,
- }
+ }, {
+ # available without pluralsight account
+ 'url': 'https://www.pluralsight.com/courses/angularjs-get-started',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://app.pluralsight.com/library/courses/understanding-microsoft-azure-amazon-aws/table-of-contents',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
course_id = self._match_id(url)
@@ -184,14 +268,14 @@ class PluralsightCourseIE(InfoExtractor):
# TODO: PSM cookie
course = self._download_json(
- 'http://www.pluralsight.com/data/course/%s' % course_id,
+ '%s/data/course/%s' % (self._API_BASE, course_id),
course_id, 'Downloading course JSON')
title = course['title']
description = course.get('description') or course.get('shortDescription')
course_data = self._download_json(
- 'http://www.pluralsight.com/data/course/content/%s' % course_id,
+ '%s/data/course/content/%s' % (self._API_BASE, course_id),
course_id, 'Downloading course data JSON')
entries = []
@@ -201,7 +285,7 @@ class PluralsightCourseIE(InfoExtractor):
if not player_parameters:
continue
entries.append(self.url_result(
- 'http://www.pluralsight.com/training/player?%s' % player_parameters,
+ '%s/training/player?%s' % (self._API_BASE, player_parameters),
'Pluralsight'))
return self.playlist_result(entries, course_id, title, description)
diff --git a/youtube_dl/extractor/pornhd.py b/youtube_dl/extractor/pornhd.py
index dbb2c3bd9..57c78ba52 100644
--- a/youtube_dl/extractor/pornhd.py
+++ b/youtube_dl/extractor/pornhd.py
@@ -36,7 +36,8 @@ class PornHdIE(InfoExtractor):
webpage = self._download_webpage(url, display_id or video_id)
title = self._html_search_regex(
- r'<title>(.+) porn HD.+?</title>', webpage, 'title')
+ [r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
+ r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
view_count = int_or_none(self._html_search_regex(
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 7b0cdc41a..08275687d 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -8,10 +8,10 @@ from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
- compat_urllib_request,
)
from ..utils import (
ExtractorError,
+ sanitized_Request,
str_to_int,
)
from ..aes import (
@@ -20,7 +20,7 @@ from ..aes import (
class PornHubIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
+ _VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': '882f488fa1f0026f023f33576004a2ed',
@@ -34,6 +34,9 @@ class PornHubIE(InfoExtractor):
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
+ }, {
+ 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
+ 'only_matching': True,
}]
@classmethod
@@ -50,7 +53,7 @@ class PornHubIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
@@ -144,7 +147,8 @@ class PornHubPlaylistIE(InfoExtractor):
entries = [
self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
- for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage))
+ for video_url in set(re.findall(
+ r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage))
]
playlist = self._parse_json(
diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py
index 34735c51e..5398e708b 100644
--- a/youtube_dl/extractor/pornotube.py
+++ b/youtube_dl/extractor/pornotube.py
@@ -3,11 +3,9 @@ from __future__ import unicode_literals
import json
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
int_or_none,
+ sanitized_Request,
)
@@ -46,7 +44,7 @@ class PornotubeIE(InfoExtractor):
'authenticationSpaceKey': originAuthenticationSpaceKey,
'credentials': 'Clip Application',
}
- token_req = compat_urllib_request.Request(
+ token_req = sanitized_Request(
'https://api.aebn.net/auth/v1/token/primal',
data=json.dumps(token_req_data).encode('utf-8'))
token_req.add_header('Content-Type', 'application/json')
@@ -56,7 +54,7 @@ class PornotubeIE(InfoExtractor):
token = token_answer['tokenKey']
# Get video URL
- delivery_req = compat_urllib_request.Request(
+ delivery_req = sanitized_Request(
'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id)
delivery_req.add_header('Authorization', token)
delivery_info = self._download_json(
@@ -64,7 +62,7 @@ class PornotubeIE(InfoExtractor):
video_url = delivery_info['mediaUrl']
# Get additional info (title etc.)
- info_req = compat_urllib_request.Request(
+ info_req = sanitized_Request(
'https://api.aebn.net/content/v1/clips/%s?expand='
'title,description,primaryImageNumber,startSecond,endSecond,'
'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,'
diff --git a/youtube_dl/extractor/primesharetv.py b/youtube_dl/extractor/primesharetv.py
index 304359dc5..85aae9576 100644
--- a/youtube_dl/extractor/primesharetv.py
+++ b/youtube_dl/extractor/primesharetv.py
@@ -1,11 +1,11 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
+from ..compat import compat_urllib_parse
+from ..utils import (
+ ExtractorError,
+ sanitized_Request,
)
-from ..utils import ExtractorError
class PrimeShareTVIE(InfoExtractor):
@@ -41,7 +41,7 @@ class PrimeShareTVIE(InfoExtractor):
webpage, 'wait time', default=7)) + 1
self._sleep(wait_time, video_id)
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
url, compat_urllib_parse.urlencode(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
diff --git a/youtube_dl/extractor/promptfile.py b/youtube_dl/extractor/promptfile.py
index 8190ed676..d5357283a 100644
--- a/youtube_dl/extractor/promptfile.py
+++ b/youtube_dl/extractor/promptfile.py
@@ -4,13 +4,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
determine_ext,
ExtractorError,
+ sanitized_Request,
)
@@ -37,7 +35,7 @@ class PromptFileIE(InfoExtractor):
fields = self._hidden_inputs(webpage)
post = compat_urllib_parse.urlencode(fields)
- req = compat_urllib_request.Request(url, post)
+ req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
req, video_id, 'Downloading video page')
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index effcf1db3..baa54a3af 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -20,7 +20,7 @@ from ..utils import (
class ProSiebenSat1IE(InfoExtractor):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
- _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at)|ran\.de|fem\.com)/(?P<id>.+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)'
_TESTS = [
{
diff --git a/youtube_dl/extractor/qqmusic.py b/youtube_dl/extractor/qqmusic.py
index 1654a641f..1ba3bbddf 100644
--- a/youtube_dl/extractor/qqmusic.py
+++ b/youtube_dl/extractor/qqmusic.py
@@ -7,11 +7,11 @@ import re
from .common import InfoExtractor
from ..utils import (
+ sanitized_Request,
strip_jsonp,
unescapeHTML,
clean_html,
)
-from ..compat import compat_urllib_request
class QQMusicIE(InfoExtractor):
@@ -25,7 +25,7 @@ class QQMusicIE(InfoExtractor):
'id': '004295Et37taLD',
'ext': 'mp3',
'title': '可惜没如果',
- 'upload_date': '20141227',
+ 'release_date': '20141227',
'creator': '林俊杰',
'description': 'md5:d327722d0361576fde558f1ac68a7065',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -38,11 +38,26 @@ class QQMusicIE(InfoExtractor):
'id': '004MsGEo3DdNxV',
'ext': 'mp3',
'title': '如果',
- 'upload_date': '20050626',
+ 'release_date': '20050626',
'creator': '李季美',
'description': 'md5:46857d5ed62bc4ba84607a805dccf437',
'thumbnail': 're:^https?://.*\.jpg$',
}
+ }, {
+ 'note': 'lyrics not in .lrc format',
+ 'url': 'http://y.qq.com/#type=song&mid=001JyApY11tIp6',
+ 'info_dict': {
+ 'id': '001JyApY11tIp6',
+ 'ext': 'mp3',
+ 'title': 'Shadows Over Transylvania',
+ 'release_date': '19970225',
+ 'creator': 'Dark Funeral',
+ 'description': 'md5:ed14d5bd7ecec19609108052c25b2c11',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
_FORMATS = {
@@ -112,15 +127,27 @@ class QQMusicIE(InfoExtractor):
self._check_formats(formats, mid)
self._sort_formats(formats)
- return {
+ actual_lrc_lyrics = ''.join(
+ line + '\n' for line in re.findall(
+ r'(?m)^(\[[0-9]{2}:[0-9]{2}(?:\.[0-9]{2,})?\][^\n]*|\[[^\]]*\])', lrc_content))
+
+ info_dict = {
'id': mid,
'formats': formats,
'title': song_name,
- 'upload_date': publish_time,
+ 'release_date': publish_time,
'creator': singer,
'description': lrc_content,
- 'thumbnail': thumbnail_url,
+ 'thumbnail': thumbnail_url
}
+ if actual_lrc_lyrics:
+ info_dict['subtitles'] = {
+ 'origin': [{
+ 'ext': 'lrc',
+ 'data': actual_lrc_lyrics,
+ }]
+ }
+ return info_dict
class QQPlaylistBaseIE(InfoExtractor):
@@ -174,7 +201,7 @@ class QQMusicSingerIE(QQPlaylistBaseIE):
singer_desc = None
if singer_id:
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id)
req.add_header(
'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html')
diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
index 1631faf29..7ff1d06c4 100644
--- a/youtube_dl/extractor/rai.py
+++ b/youtube_dl/extractor/rai.py
@@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
+ compat_urlparse,
)
from ..utils import (
parse_duration,
@@ -72,6 +73,18 @@ class RaiIE(InfoExtractor):
'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!',
'uploader': 'RaiTre',
}
+ },
+ {
+ 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html',
+ 'md5': '037104d2c14132887e5e4cf114569214',
+ 'info_dict': {
+ 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e',
+ 'ext': 'flv',
+ 'title': 'Il pacco',
+ 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a',
+ 'uploader': 'RaiTre',
+ 'upload_date': '20141221',
+ },
}
]
@@ -90,11 +103,14 @@ class RaiIE(InfoExtractor):
relinker_url = self._extract_relinker_url(webpage)
if not relinker_url:
- iframe_path = self._search_regex(
- r'<iframe[^>]+src="/?(dl/[^"]+\?iframe\b[^"]*)"',
+ iframe_url = self._search_regex(
+ [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"',
+ r'drawMediaRaiTV\(["\'](.+?)["\']'],
webpage, 'iframe')
+ if not iframe_url.startswith('http'):
+ iframe_url = compat_urlparse.urljoin(url, iframe_url)
webpage = self._download_webpage(
- '%s/%s' % (host, iframe_path), video_id)
+ iframe_url, video_id)
relinker_url = self._extract_relinker_url(webpage)
relinker = self._download_json(
diff --git a/youtube_dl/extractor/rtbf.py b/youtube_dl/extractor/rtbf.py
index e4215d546..e42b319a3 100644
--- a/youtube_dl/extractor/rtbf.py
+++ b/youtube_dl/extractor/rtbf.py
@@ -9,8 +9,8 @@ from ..utils import (
class RTBFIE(InfoExtractor):
- _VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?rtbf\.be/(?:video/[^?]+\?.*\bid=|ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=)(?P<id>\d+)'
+ _TESTS = [{
'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274',
'md5': '799f334ddf2c0a582ba80c44655be570',
'info_dict': {
@@ -19,7 +19,14 @@ class RTBFIE(InfoExtractor):
'title': 'Les Diables au coeur (épisode 2)',
'duration': 3099,
}
- }
+ }, {
+ # geo restricted
+ 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858',
+ 'only_matching': True,
+ }]
_QUALITIES = [
('mobile', 'mobile'),
@@ -36,7 +43,7 @@ class RTBFIE(InfoExtractor):
data = self._parse_json(
unescapeHTML(self._search_regex(
- r'data-video="([^"]+)"', webpage, 'data video')),
+ r'data-media="([^"]+)"', webpage, 'data video')),
video_id)
if data.get('provider').lower() == 'youtube':
diff --git a/youtube_dl/extractor/rte.py b/youtube_dl/extractor/rte.py
index 04158b993..d9cfbf180 100644
--- a/youtube_dl/extractor/rte.py
+++ b/youtube_dl/extractor/rte.py
@@ -9,16 +9,16 @@ from ..utils import (
class RteIE(InfoExtractor):
- _VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/'
+ _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)'
_TEST = {
- 'url': 'http://www.rte.ie/player/de/show/10363114/',
+ 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/',
'info_dict': {
- 'id': '10363114',
+ 'id': '10478715',
'ext': 'mp4',
- 'title': 'One News',
+ 'title': 'Watch iWitness online',
'thumbnail': 're:^https?://.*\.jpg$',
- 'description': 'The One O\'Clock News followed by Weather.',
- 'duration': 436.844,
+ 'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.',
+ 'duration': 60.046,
},
'params': {
'skip_download': 'f4m fails with --test atm'
diff --git a/youtube_dl/extractor/rtve.py b/youtube_dl/extractor/rtve.py
index 5b97d33ca..603d7bd00 100644
--- a/youtube_dl/extractor/rtve.py
+++ b/youtube_dl/extractor/rtve.py
@@ -6,11 +6,11 @@ import re
import time
from .common import InfoExtractor
-from ..compat import compat_urllib_request, compat_urlparse
from ..utils import (
ExtractorError,
float_or_none,
remove_end,
+ sanitized_Request,
std_headers,
struct_unpack,
)
@@ -102,20 +102,14 @@ class RTVEALaCartaIE(InfoExtractor):
if info['state'] == 'DESPU':
raise ExtractorError('The video is no longer available', expected=True)
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
- png_request = compat_urllib_request.Request(png_url)
+ png_request = sanitized_Request(png_url)
png_request.add_header('Referer', url)
png = self._download_webpage(png_request, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
if not video_url.endswith('.f4m'):
- auth_url = video_url.replace(
+ video_url = video_url.replace(
'resources/', 'auth/resources/'
).replace('.net.rtve', '.multimedia.cdn.rtve')
- video_path = self._download_webpage(
- auth_url, video_id, 'Getting video url')
- # Use mvod1.akcdn instead of flash.akamaihd.multimedia.cdn to get
- # the right Content-Length header and the mp4 format
- video_url = compat_urlparse.urljoin(
- 'http://mvod1.akcdn.rtve.es/', video_path)
subtitles = None
if info.get('sbtFile') is not None:
diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py
index d94dc7399..9db62adb1 100644
--- a/youtube_dl/extractor/rutube.py
+++ b/youtube_dl/extractor/rutube.py
@@ -9,7 +9,7 @@ from ..compat import (
compat_str,
)
from ..utils import (
- ExtractorError,
+ determine_ext,
unified_strdate,
)
@@ -17,9 +17,9 @@ from ..utils import (
class RutubeIE(InfoExtractor):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
- _VALID_URL = r'https?://rutube\.ru/video/(?P<id>[\da-z]{32})'
+ _VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P<id>[\da-z]{32})'
- _TEST = {
+ _TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
@@ -36,7 +36,10 @@ class RutubeIE(InfoExtractor):
# It requires ffmpeg (m3u8 download)
'skip_download': True,
},
- }
+ }, {
+ 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
@@ -51,10 +54,25 @@ class RutubeIE(InfoExtractor):
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
- m3u8_url = options['video_balancer'].get('m3u8')
- if m3u8_url is None:
- raise ExtractorError('Couldn\'t find m3u8 manifest url')
- formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
+ formats = []
+ for format_id, format_url in options['video_balancer'].items():
+ ext = determine_ext(format_url)
+ if ext == 'm3u8':
+ m3u8_formats = self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ elif ext == 'f4m':
+ f4m_formats = self._extract_f4m_formats(
+ format_url, video_id, f4m_id=format_id, fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
+ else:
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ })
+ self._sort_formats(formats)
return {
'id': video['id'],
@@ -74,9 +92,9 @@ class RutubeIE(InfoExtractor):
class RutubeEmbedIE(InfoExtractor):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
- _VALID_URL = 'https?://rutube\.ru/video/embed/(?P<id>[0-9]+)'
+ _VALID_URL = 'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
@@ -90,7 +108,10 @@ class RutubeEmbedIE(InfoExtractor):
'params': {
'skip_download': 'Requires ffmpeg',
},
- }
+ }, {
+ 'url': 'http://rutube.ru/play/embed/8083783',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
embed_id = self._match_id(url)
diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py
index c67ad25ce..e417bf661 100644
--- a/youtube_dl/extractor/ruutu.py
+++ b/youtube_dl/extractor/ruutu.py
@@ -57,16 +57,21 @@ class RuutuIE(InfoExtractor):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
- if not video_url or video_url in processed_urls or 'NOT_USED' in video_url:
+ if (not video_url or video_url in processed_urls or
+ any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
- video_url, video_id, 'mp4', m3u8_id='hls'))
+ m3u8_formats = self._extract_m3u8_formats(
+ video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
elif ext == 'f4m':
- formats.extend(self._extract_f4m_formats(
- video_url, video_id, f4m_id='hds'))
+ f4m_formats = self._extract_f4m_formats(
+ video_url, video_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
@@ -74,7 +79,7 @@ class RuutuIE(InfoExtractor):
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
- width, height = [int_or_none(x) for x in child.get('resolution', '').split('x')]
+ width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': '%s-%s' % (proto, label if label else tbr),
'url': video_url,
diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py
index a602af692..7de7b7273 100644
--- a/youtube_dl/extractor/safari.py
+++ b/youtube_dl/extractor/safari.py
@@ -4,16 +4,14 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from .brightcove import BrightcoveIE
+from .brightcove import BrightcoveLegacyIE
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
from ..utils import (
ExtractorError,
+ sanitized_Request,
smuggle_url,
std_headers,
+ urlencode_postdata,
)
@@ -58,8 +56,8 @@ class SafariBaseIE(InfoExtractor):
'next': '',
}
- request = compat_urllib_request.Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
+ request = sanitized_Request(
+ self._LOGIN_URL, urlencode_postdata(login_form), headers=headers)
login_page = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -112,11 +110,11 @@ class SafariIE(SafariBaseIE):
'%s/%s/chapter-content/%s.html' % (self._API_BASE, course_id, part),
part)
- bc_url = BrightcoveIE._extract_brightcove_url(webpage)
+ bc_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
if not bc_url:
raise ExtractorError('Could not extract Brightcove URL from %s' % url, expected=True)
- return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'Brightcove')
+ return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'BrightcoveLegacy')
class SafariCourseIE(SafariBaseIE):
diff --git a/youtube_dl/extractor/sandia.py b/youtube_dl/extractor/sandia.py
index 9c88167f0..759898a49 100644
--- a/youtube_dl/extractor/sandia.py
+++ b/youtube_dl/extractor/sandia.py
@@ -6,14 +6,12 @@ import json
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
mimetype2ext,
+ sanitized_Request,
unified_strdate,
)
@@ -37,7 +35,7 @@ class SandiaIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/senateisvp.py b/youtube_dl/extractor/senateisvp.py
index 9c53704ea..474ebb49b 100644
--- a/youtube_dl/extractor/senateisvp.py
+++ b/youtube_dl/extractor/senateisvp.py
@@ -121,9 +121,9 @@ class SenateISVPIE(InfoExtractor):
'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=',
}]
else:
- hdcore_sign = '?hdcore=3.1.0'
+ hdcore_sign = 'hdcore=3.1.0'
url_params = (domain, video_id, stream_num)
- f4m_url = '%s/z/%s_1@%s/manifest.f4m' % url_params + hdcore_sign
+ f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign
m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params
for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'):
# URLs without the extra param induce an 404 error
diff --git a/youtube_dl/extractor/shahid.py b/youtube_dl/extractor/shahid.py
index 6e9903d5e..f76fb12c0 100644
--- a/youtube_dl/extractor/shahid.py
+++ b/youtube_dl/extractor/shahid.py
@@ -16,7 +16,7 @@ class ShahidIE(InfoExtractor):
'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
'info_dict': {
'id': '90574',
- 'ext': 'm3u8',
+ 'ext': 'mp4',
'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3',
'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان',
'duration': 2972,
@@ -81,7 +81,7 @@ class ShahidIE(InfoExtractor):
compat_urllib_parse.urlencode({
'apiKey': 'sh@hid0nlin3',
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
- }).encode('utf-8')),
+ })),
video_id, 'Downloading video JSON')
video = video[api_vars['playerType']]
diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py
index c5636e8e9..8eda3c864 100644
--- a/youtube_dl/extractor/shared.py
+++ b/youtube_dl/extractor/shared.py
@@ -3,13 +3,11 @@ from __future__ import unicode_literals
import base64
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
+ sanitized_Request,
)
@@ -46,7 +44,7 @@ class SharedIE(InfoExtractor):
'Video %s does not exist' % video_id, expected=True)
download_form = self._hidden_inputs(webpage)
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
url, compat_urllib_parse.urlencode(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/sharesix.py b/youtube_dl/extractor/sharesix.py
index ac3e3adf2..f1ea9bdb2 100644
--- a/youtube_dl/extractor/sharesix.py
+++ b/youtube_dl/extractor/sharesix.py
@@ -4,12 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
parse_duration,
+ sanitized_Request,
)
@@ -50,7 +48,7 @@ class ShareSixIE(InfoExtractor):
'method_free': 'Free'
}
post = compat_urllib_parse.urlencode(fields)
- req = compat_urllib_request.Request(url, post)
+ req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py
index 0891a441f..b2258a0f6 100644
--- a/youtube_dl/extractor/sina.py
+++ b/youtube_dl/extractor/sina.py
@@ -4,10 +4,8 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
class SinaIE(InfoExtractor):
@@ -61,7 +59,7 @@ class SinaIE(InfoExtractor):
if mobj.group('token') is not None:
# The video id is in the redirected url
self.to_screen('Getting video id')
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.get_method = lambda: 'HEAD'
(_, urlh) = self._download_webpage_handle(request, 'NA', False)
return self._real_extract(urlh.geturl())
diff --git a/youtube_dl/extractor/skynewsarabia.py b/youtube_dl/extractor/skynewsarabia.py
new file mode 100644
index 000000000..05e1b02ad
--- /dev/null
+++ b/youtube_dl/extractor/skynewsarabia.py
@@ -0,0 +1,117 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ parse_iso8601,
+ parse_duration,
+)
+
+
+class SkyNewsArabiaBaseIE(InfoExtractor):
+ _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images'
+
+ def _call_api(self, path, value):
+ return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value)
+
+ def _get_limelight_media_id(self, url):
+ return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id')
+
+ def _get_image_url(self, image_path_template, width='1600', height='1200'):
+ return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height)
+
+ def _extract_video_info(self, video_data):
+ video_id = compat_str(video_data['id'])
+ topic = video_data.get('topicTitle')
+ return {
+ '_type': 'url_transparent',
+ 'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']),
+ 'id': video_id,
+ 'title': video_data['headline'],
+ 'description': video_data.get('summary'),
+ 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']),
+ 'timestamp': parse_iso8601(video_data.get('date')),
+ 'duration': parse_duration(video_data.get('runTime')),
+ 'tags': video_data.get('tags', []),
+ 'categories': [topic] if topic else [],
+ 'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id,
+ 'ie_key': 'LimelightMedia',
+ }
+
+
+class SkyNewsArabiaIE(SkyNewsArabiaBaseIE):
+ IE_NAME = 'skynewsarabia:video'
+ _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3',
+ 'info_dict': {
+ 'id': '794902',
+ 'ext': 'flv',
+ 'title': 'نصف مليون مصباح على شجرة كريسماس',
+ 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6',
+ 'upload_date': '20151128',
+ 'timestamp': 1448697198,
+ 'duration': 2119,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ video_data = self._call_api('video', video_id)
+ return self._extract_video_info(video_data)
+
+
+class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE):
+ IE_NAME = 'skynewsarabia:video'
+ _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9',
+ 'info_dict': {
+ 'id': '794549',
+ 'ext': 'flv',
+ 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة',
+ 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f',
+ 'upload_date': '20151126',
+ 'timestamp': 1448559336,
+ 'duration': 281.6,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD',
+ 'info_dict': {
+ 'id': '794844',
+ 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن',
+ 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e',
+ },
+ 'playlist_mincount': 2,
+ }]
+
+ def _real_extract(self, url):
+ article_id = self._match_id(url)
+ article_data = self._call_api('article', article_id)
+ media_asset = article_data['mediaAsset']
+ if media_asset['type'] == 'VIDEO':
+ topic = article_data.get('topicTitle')
+ return {
+ '_type': 'url_transparent',
+ 'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']),
+ 'id': article_id,
+ 'title': article_data['headline'],
+ 'description': article_data.get('summary'),
+ 'thumbnail': self._get_image_url(media_asset['imageUrl']),
+ 'timestamp': parse_iso8601(article_data.get('date')),
+ 'tags': article_data.get('tags', []),
+ 'categories': [topic] if topic else [],
+ 'webpage_url': url,
+ 'ie_key': 'LimelightMedia',
+ }
+ entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO']
+ return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary'))
diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py
index 35a81ee87..30210c8a3 100644
--- a/youtube_dl/extractor/smotri.py
+++ b/youtube_dl/extractor/smotri.py
@@ -7,13 +7,11 @@ import hashlib
import uuid
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
+ sanitized_Request,
unified_strdate,
)
@@ -176,7 +174,7 @@ class SmotriIE(InfoExtractor):
if video_password:
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
@@ -339,7 +337,7 @@ class SmotriBroadcastIE(InfoExtractor):
'password': password,
}
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
broadcast_page = self._download_webpage(
diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py
index ba2d5e19b..ea8fc258d 100644
--- a/youtube_dl/extractor/sohu.py
+++ b/youtube_dl/extractor/sohu.py
@@ -6,11 +6,11 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_request,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -96,7 +96,7 @@ class SohuIE(InfoExtractor):
else:
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
- req = compat_urllib_request.Request(base_data_url + vid_id)
+ req = sanitized_Request(base_data_url + vid_id)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
@@ -158,6 +158,7 @@ class SohuIE(InfoExtractor):
'file': clips_url[i],
'new': su[i],
'prod': 'flash',
+ 'rb': 1,
}
if cdnId is not None:
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index ed5dcc0d3..02e64e094 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -4,13 +4,17 @@ from __future__ import unicode_literals
import re
import itertools
-from .common import InfoExtractor
+from .common import (
+ InfoExtractor,
+ SearchInfoExtractor
+)
from ..compat import (
compat_str,
compat_urlparse,
compat_urllib_parse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
int_or_none,
unified_strdate,
@@ -113,7 +117,7 @@ class SoundcloudIE(InfoExtractor):
},
]
- _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28'
+ _CLIENT_ID = '02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea'
_IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'
def report_resolve(self, video_id):
@@ -469,3 +473,60 @@ class SoundcloudPlaylistIE(SoundcloudIE):
'description': data.get('description'),
'entries': entries,
}
+
+
+class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
+ IE_NAME = 'soundcloud:search'
+ IE_DESC = 'Soundcloud search'
+ _MAX_RESULTS = float('inf')
+ _TESTS = [{
+ 'url': 'scsearch15:post-avant jazzcore',
+ 'info_dict': {
+ 'title': 'post-avant jazzcore',
+ },
+ 'playlist_count': 15,
+ }]
+
+ _SEARCH_KEY = 'scsearch'
+ _MAX_RESULTS_PER_PAGE = 200
+ _DEFAULT_RESULTS_PER_PAGE = 50
+ _API_V2_BASE = 'https://api-v2.soundcloud.com'
+
+ def _get_collection(self, endpoint, collection_id, **query):
+ limit = min(
+ query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
+ self._MAX_RESULTS_PER_PAGE)
+ query['limit'] = limit
+ query['client_id'] = self._CLIENT_ID
+ query['linked_partitioning'] = '1'
+ query['offset'] = 0
+ data = compat_urllib_parse.urlencode(encode_dict(query))
+ next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
+
+ collected_results = 0
+
+ for i in itertools.count(1):
+ response = self._download_json(
+ next_url, collection_id, 'Downloading page {0}'.format(i),
+ 'Unable to download API page')
+
+ collection = response.get('collection', [])
+ if not collection:
+ break
+
+ collection = list(filter(bool, collection))
+ collected_results += len(collection)
+
+ for item in collection:
+ yield self.url_result(item['uri'], SoundcloudIE.ie_key())
+
+ if not collection or collected_results >= limit:
+ break
+
+ next_url = response.get('next_href')
+ if not next_url:
+ break
+
+ def _get_n_results(self, query, n):
+ tracks = self._get_collection('/search/tracks', query, limit=n, q=query)
+ return self.playlist_result(tracks, playlist_title=query)
diff --git a/youtube_dl/extractor/space.py b/youtube_dl/extractor/space.py
index c2d0d36a6..ebb5d6ec0 100644
--- a/youtube_dl/extractor/space.py
+++ b/youtube_dl/extractor/space.py
@@ -3,14 +3,14 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from .brightcove import BrightcoveIE
+from .brightcove import BrightcoveLegacyIE
from ..utils import RegexNotFoundError, ExtractorError
class SpaceIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html'
_TEST = {
- 'add_ie': ['Brightcove'],
+ 'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html',
'info_dict': {
'id': '2780937028001',
@@ -31,8 +31,8 @@ class SpaceIE(InfoExtractor):
brightcove_url = self._og_search_video_url(webpage)
except RegexNotFoundError:
# Other videos works fine with the info from the object
- brightcove_url = BrightcoveIE._extract_brightcove_url(webpage)
+ brightcove_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
if brightcove_url is None:
raise ExtractorError(
'The webpage does not contain a video', expected=True)
- return self.url_result(brightcove_url, BrightcoveIE.ie_key())
+ return self.url_result(brightcove_url, BrightcoveLegacyIE.ie_key())
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
index 9e8fb35b2..692fd78e8 100644
--- a/youtube_dl/extractor/spankwire.py
+++ b/youtube_dl/extractor/spankwire.py
@@ -6,9 +6,9 @@ from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
- compat_urllib_request,
)
from ..utils import (
+ sanitized_Request,
str_to_int,
unified_strdate,
)
@@ -51,7 +51,7 @@ class SpankwireIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- req = compat_urllib_request.Request('http://www.' + mobj.group('url'))
+ req = sanitized_Request('http://www.' + mobj.group('url'))
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index 5bd3c0087..39a7aaf9d 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -58,7 +58,8 @@ class SpiegelIE(InfoExtractor):
description = self._html_search_meta('description', webpage, 'description')
base_url = self._search_regex(
- r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL')
+ [r'server\s*:\s*(["\'])(?P<url>.+?)\1', r'var\s+server\s*=\s*"(?P<url>[^"]+)\"'],
+ webpage, 'server URL', group='url')
xml_url = base_url + video_id + '.xml'
idoc = self._download_xml(xml_url, video_id)
diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py
index 27f4033c5..034bd47ff 100644
--- a/youtube_dl/extractor/spiegeltv.py
+++ b/youtube_dl/extractor/spiegeltv.py
@@ -77,17 +77,21 @@ class SpiegeltvIE(InfoExtractor):
'rtmp_live': True,
})
elif determine_ext(endpoint) == 'm3u8':
- m3u8_formats = self._extract_m3u8_formats(
- endpoint.replace('[video]', play_path),
- video_id, 'm4v',
- preference=1, # Prefer hls since it allows to workaround georestriction
- m3u8_id='hls', fatal=False)
- if m3u8_formats is not False:
- formats.extend(m3u8_formats)
+ formats.append({
+ 'url': endpoint.replace('[video]', play_path),
+ 'ext': 'm4v',
+ 'format_id': 'hls', # Prefer hls since it allows to workaround georestriction
+ 'protocol': 'm3u8',
+ 'preference': 1,
+ 'http_headers': {
+ 'Accept-Encoding': 'deflate', # gzip causes trouble on the server side
+ },
+ })
else:
formats.append({
'url': endpoint,
})
+ self._check_formats(formats, video_id)
thumbnails = []
for image in media_json['images']:
diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py
index 7ec6c613f..ebb75f059 100644
--- a/youtube_dl/extractor/sportdeutschland.py
+++ b/youtube_dl/extractor/sportdeutschland.py
@@ -4,11 +4,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
parse_iso8601,
+ sanitized_Request,
)
@@ -54,7 +52,7 @@ class SportDeutschlandIE(InfoExtractor):
api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
sport_id, video_id)
- req = compat_urllib_request.Request(api_url, headers={
+ req = sanitized_Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
'Referer': url,
})
diff --git a/youtube_dl/extractor/srf.py b/youtube_dl/extractor/srf.py
index 77eec0bc7..16e1bf2d6 100644
--- a/youtube_dl/extractor/srf.py
+++ b/youtube_dl/extractor/srf.py
@@ -11,7 +11,7 @@ from ..utils import (
class SrfIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
+ _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/(?:tv|radio)/[^/]+/(?P<media_type>video|audio)/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
@@ -36,6 +36,20 @@ class SrfIE(InfoExtractor):
'timestamp': 1373493600,
},
}, {
+ 'url': 'http://www.srf.ch/play/radio/hoerspielarchiv-srf-musikwelle/audio/saegel-ohni-wind-von-jakob-stebler?id=415bf3d3-6429-4de7-968d-95866e37cfbc',
+ 'md5': '',
+ 'info_dict': {
+ 'id': '415bf3d3-6429-4de7-968d-95866e37cfbc',
+ 'display_id': 'saegel-ohni-wind-von-jakob-stebler',
+ 'ext': 'mp3',
+ 'upload_date': '20080518',
+ 'title': '«Sägel ohni Wind» von Jakob Stebler',
+ 'timestamp': 1211112000,
+ },
+ 'params': {
+ 'skip_download': True, # requires rtmpdump
+ },
+ }, {
'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}, {
@@ -44,11 +58,13 @@ class SrfIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
- display_id = re.match(self._VALID_URL, url).group('display_id') or video_id
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ media_type = mobj.group('media_type')
+ display_id = mobj.group('display_id') or video_id
video_data = self._download_xml(
- 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id,
+ 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/%s/play/%s.xml' % (media_type, video_id),
display_id)
title = xpath_text(
@@ -64,7 +80,7 @@ class SrfIE(InfoExtractor):
for url_node in item.findall('url'):
quality = url_node.attrib['quality']
full_url = url_node.text
- original_ext = determine_ext(full_url)
+ original_ext = determine_ext(full_url).lower()
format_id = '%s-%s' % (quality, item.attrib['protocol'])
if original_ext == 'f4m':
formats.extend(self._extract_f4m_formats(
diff --git a/youtube_dl/extractor/stitcher.py b/youtube_dl/extractor/stitcher.py
new file mode 100644
index 000000000..d5c852f52
--- /dev/null
+++ b/youtube_dl/extractor/stitcher.py
@@ -0,0 +1,81 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ js_to_json,
+ unescapeHTML,
+)
+
+
+class StitcherIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
+ _TESTS = [{
+ 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
+ 'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
+ 'info_dict': {
+ 'id': '40789481',
+ 'ext': 'mp3',
+ 'title': 'Machine Learning Mastery and Cancer Clusters',
+ 'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
+ 'duration': 1604,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
+ 'info_dict': {
+ 'id': '40846275',
+ 'display_id': 'the-rare-hourlong-comedy-plus',
+ 'ext': 'mp3',
+ 'title': "The CW's 'Crazy Ex-Girlfriend'",
+ 'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
+ 'duration': 2235,
+ 'thumbnail': 're:^https?://.*\.jpg',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # escaped title
+ 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ audio_id = mobj.group('id')
+ display_id = mobj.group('display_id') or audio_id
+
+ webpage = self._download_webpage(url, display_id)
+
+ episode = self._parse_json(
+ js_to_json(self._search_regex(
+ r'(?s)var\s+stitcher\s*=\s*({.+?});\n', webpage, 'episode config')),
+ display_id)['config']['episode']
+
+ title = unescapeHTML(episode['title'])
+ formats = [{
+ 'url': episode[episode_key],
+ 'ext': determine_ext(episode[episode_key]) or 'mp3',
+ 'vcodec': 'none',
+ } for episode_key in ('episodeURL',) if episode.get(episode_key)]
+ description = self._search_regex(
+ r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
+ duration = int_or_none(episode.get('duration'))
+ thumbnail = episode.get('episodeImage')
+
+ return {
+ 'id': audio_id,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/streamcloud.py b/youtube_dl/extractor/streamcloud.py
index d4e134015..77841b946 100644
--- a/youtube_dl/extractor/streamcloud.py
+++ b/youtube_dl/extractor/streamcloud.py
@@ -4,10 +4,8 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
+from ..utils import sanitized_Request
class StreamcloudIE(InfoExtractor):
@@ -43,7 +41,7 @@ class StreamcloudIE(InfoExtractor):
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
- req = compat_urllib_request.Request(url, post, headers)
+ req = sanitized_Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
diff --git a/youtube_dl/extractor/streamcz.py b/youtube_dl/extractor/streamcz.py
index e92b93285..d3d2b7eb7 100644
--- a/youtube_dl/extractor/streamcz.py
+++ b/youtube_dl/extractor/streamcz.py
@@ -5,11 +5,9 @@ import hashlib
import time
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
int_or_none,
+ sanitized_Request,
)
@@ -54,7 +52,7 @@ class StreamCZIE(InfoExtractor):
video_id = self._match_id(url)
api_path = '/episode/%s' % video_id
- req = compat_urllib_request.Request(self._API_URL + api_path)
+ req = sanitized_Request(self._API_URL + api_path)
req.add_header('Api-Password', _get_api_key(api_path))
data = self._download_json(req, video_id)
diff --git a/youtube_dl/extractor/tapely.py b/youtube_dl/extractor/tapely.py
index f1f43d0a7..ed560bd24 100644
--- a/youtube_dl/extractor/tapely.py
+++ b/youtube_dl/extractor/tapely.py
@@ -4,19 +4,17 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
clean_html,
ExtractorError,
float_or_none,
parse_iso8601,
+ sanitized_Request,
)
class TapelyIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
+ _VALID_URL = r'https?://(?:www\.)?(?:tape\.ly|tapely\.com)/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
_API_URL = 'http://tape.ly/showtape?id={0:}'
_S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
_SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
@@ -42,6 +40,10 @@ class TapelyIE(InfoExtractor):
'ext': 'm4a',
},
},
+ {
+ 'url': 'https://tapely.com/my-grief-as-told-by-water',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
@@ -49,7 +51,7 @@ class TapelyIE(InfoExtractor):
display_id = mobj.group('id')
playlist_url = self._API_URL.format(display_id)
- request = compat_urllib_request.Request(playlist_url)
+ request = sanitized_Request(playlist_url)
request.add_header('X-Requested-With', 'XMLHttpRequest')
request.add_header('Accept', 'application/json')
request.add_header('Referer', url)
diff --git a/youtube_dl/extractor/teachingchannel.py b/youtube_dl/extractor/teachingchannel.py
index 117afa9bf..e0477382c 100644
--- a/youtube_dl/extractor/teachingchannel.py
+++ b/youtube_dl/extractor/teachingchannel.py
@@ -16,6 +16,7 @@ class TeachingChannelIE(InfoExtractor):
'ext': 'mp4',
'title': 'A History of Teaming',
'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
+ 'duration': 422.255,
},
'params': {
# m3u8 download
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
index ae94f055c..2c8e9b941 100644
--- a/youtube_dl/extractor/telecinco.py
+++ b/youtube_dl/extractor/telecinco.py
@@ -1,24 +1,51 @@
# coding: utf-8
from __future__ import unicode_literals
-from .mitele import MiTeleIE
+import json
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_parse_unquote,
+ compat_urlparse,
+)
+from ..utils import (
+ get_element_by_attribute,
+ parse_duration,
+ strip_jsonp,
+)
-class TelecincoIE(MiTeleIE):
- IE_NAME = 'telecinco.es'
- _VALID_URL = r'https?://www\.telecinco\.es/(?:[^/]+/)+(?P<id>.+?)\.html'
+
+class TelecincoIE(InfoExtractor):
+ IE_DESC = 'telecinco.es, cuatro.com and mediaset.es'
+ _VALID_URL = r'https?://www\.(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html'
_TESTS = [{
'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
+ 'md5': '5cbef3ad5ef17bf0d21570332d140729',
'info_dict': {
'id': 'MDSVID20141015_0058',
'ext': 'mp4',
'title': 'Con Martín Berasategui, hacer un bacalao al ...',
'duration': 662,
},
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ }, {
+ 'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
+ 'md5': '0a5b9f3cc8b074f50a0578f823a12694',
+ 'info_dict': {
+ 'id': 'MDSVID20150916_0128',
+ 'ext': 'mp4',
+ 'title': '¿Quién es este ex futbolista con el que hablan ...',
+ 'duration': 79,
+ },
+ }, {
+ 'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
+ 'md5': 'ad1bfaaba922dd4a295724b05b68f86a',
+ 'info_dict': {
+ 'id': 'MDSVID20150513_0220',
+ 'ext': 'mp4',
+ 'title': '#DOYLACARA. Con la trata no hay trato',
+ 'duration': 50,
},
}, {
'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html',
@@ -27,3 +54,41 @@ class TelecincoIE(MiTeleIE):
'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
'only_matching': True,
}]
+
+ def _real_extract(self, url):
+ episode = self._match_id(url)
+ webpage = self._download_webpage(url, episode)
+ embed_data_json = self._search_regex(
+ r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
+ ).replace('\'', '"')
+ embed_data = json.loads(embed_data_json)
+
+ domain = embed_data['mediaUrl']
+ if not domain.startswith('http'):
+ # only happens in telecinco.es videos
+ domain = 'http://' + domain
+ info_url = compat_urlparse.urljoin(
+ domain,
+ compat_urllib_parse_unquote(embed_data['flashvars']['host'])
+ )
+ info_el = self._download_xml(info_url, episode).find('./video/info')
+
+ video_link = info_el.find('videoUrl/link').text
+ token_query = compat_urllib_parse.urlencode({'id': video_link})
+ token_info = self._download_json(
+ embed_data['flashvars']['ov_tk'] + '?' + token_query,
+ episode,
+ transform_source=strip_jsonp
+ )
+ formats = self._extract_m3u8_formats(
+ token_info['tokenizedUrl'], episode, ext='mp4', entry_protocol='m3u8_native')
+
+ return {
+ 'id': embed_data['videoId'],
+ 'display_id': episode,
+ 'title': info_el.find('title').text,
+ 'formats': formats,
+ 'description': get_element_by_attribute('class', 'text', webpage),
+ 'thumbnail': info_el.find('thumb').text,
+ 'duration': parse_duration(info_el.find('duration').text),
+ }
diff --git a/youtube_dl/extractor/tf1.py b/youtube_dl/extractor/tf1.py
index 3a68eaa80..6890021cf 100644
--- a/youtube_dl/extractor/tf1.py
+++ b/youtube_dl/extractor/tf1.py
@@ -6,7 +6,7 @@ from .common import InfoExtractor
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
- _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/.*?-(?P<id>\d+)(?:-\d+)?\.html'
+ _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/(?:[^/]+/)*(?P<id>.+?)\.html'
_TESTS = [{
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
'info_dict': {
@@ -22,7 +22,7 @@ class TF1IE(InfoExtractor):
}, {
'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html',
'info_dict': {
- 'id': '12043945',
+ 'id': 'le-grand-mysterioso-chuggington-7085291-739',
'ext': 'mp4',
'title': 'Le grand Mystérioso - Chuggington',
'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.',
@@ -32,22 +32,24 @@ class TF1IE(InfoExtractor):
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
+ 'skip': 'HTTP Error 410: Gone',
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html',
'only_matching': True,
+ }, {
+ 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
+ 'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- embed_url = self._html_search_regex(
- r'["\'](https?://www.wat.tv/embedframe/.*?)["\']', webpage, 'embed url')
- embed_page = self._download_webpage(embed_url, video_id,
- 'Downloading embed player page')
- wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')
+ wat_id = self._html_search_regex(
+ r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
+ webpage, 'wat id', group='id')
wat_info = self._download_json(
'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id)
return self.url_result(wat_info['media']['url'], 'Wat')
diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py
index 25edc3100..0bf6726b5 100644
--- a/youtube_dl/extractor/theplatform.py
+++ b/youtube_dl/extractor/theplatform.py
@@ -16,11 +16,12 @@ from ..compat import (
from ..utils import (
determine_ext,
ExtractorError,
- xpath_with_ns,
- unsmuggle_url,
+ float_or_none,
int_or_none,
+ sanitized_Request,
+ unsmuggle_url,
url_basename,
- float_or_none,
+ xpath_with_ns,
)
default_ns = 'http://www.w3.org/2005/SMIL21/Language'
@@ -139,6 +140,11 @@ class ThePlatformIE(ThePlatformBaseIE):
'upload_date': '20150701',
'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"],
},
+ }, {
+ # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1
+ # geo-restricted (US), HLS encrypted with AES-128
+ 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781',
+ 'only_matching': True,
}]
@staticmethod
@@ -182,8 +188,12 @@ class ThePlatformIE(ThePlatformBaseIE):
# Seems there's no pattern for the interested script filename, so
# I try one by one
for script in reversed(scripts):
- feed_script = self._download_webpage(script, video_id, 'Downloading feed script')
- feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None)
+ feed_script = self._download_webpage(
+ self._proto_relative_url(script, 'http:'),
+ video_id, 'Downloading feed script')
+ feed_id = self._search_regex(
+ r'defaultFeedId\s*:\s*"([^"]+)"', feed_script,
+ 'default feed id', default=None)
if feed_id is not None:
break
if feed_id is None:
@@ -193,6 +203,20 @@ class ThePlatformIE(ThePlatformBaseIE):
if smuggled_data.get('force_smil_url', False):
smil_url = url
+ # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385)
+ elif '/guid/' in url:
+ headers = {}
+ source_url = smuggled_data.get('source_url')
+ if source_url:
+ headers['Referer'] = source_url
+ request = sanitized_Request(url, headers=headers)
+ webpage = self._download_webpage(request, video_id)
+ smil_url = self._search_regex(
+ r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml',
+ webpage, 'smil url', group='url')
+ path = self._search_regex(
+ r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path')
+ smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4&format=SMIL'
elif mobj.group('config'):
config_url = url + '&form=json'
config_url = config_url.replace('swf/', 'config/')
diff --git a/youtube_dl/extractor/tlc.py b/youtube_dl/extractor/tlc.py
index 13263614c..d6d038a8d 100644
--- a/youtube_dl/extractor/tlc.py
+++ b/youtube_dl/extractor/tlc.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from .brightcove import BrightcoveIE
+from .brightcove import BrightcoveLegacyIE
from .discovery import DiscoveryIE
from ..compat import compat_urlparse
@@ -66,6 +66,6 @@ class TlcDeIE(InfoExtractor):
return {
'_type': 'url',
- 'url': BrightcoveIE._extract_brightcove_url(iframe),
- 'ie': BrightcoveIE.ie_key(),
+ 'url': BrightcoveLegacyIE._extract_brightcove_url(iframe),
+ 'ie': BrightcoveLegacyIE.ie_key(),
}
diff --git a/youtube_dl/extractor/toggle.py b/youtube_dl/extractor/toggle.py
new file mode 100644
index 000000000..a47239952
--- /dev/null
+++ b/youtube_dl/extractor/toggle.py
@@ -0,0 +1,194 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ ExtractorError,
+ float_or_none,
+ int_or_none,
+ parse_iso8601,
+ sanitized_Request,
+)
+
+
+class ToggleIE(InfoExtractor):
+ IE_NAME = 'toggle'
+ _VALID_URL = r'https?://video\.toggle\.sg/(?:en|zh)/(?:series|clips|movies)/(?:[^/]+/)+(?P<id>[0-9]+)'
+ _TESTS = [{
+ 'url': 'http://video.toggle.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115',
+ 'info_dict': {
+ 'id': '343115',
+ 'ext': 'mp4',
+ 'title': 'Lion Moms Premiere',
+ 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b',
+ 'upload_date': '20150910',
+ 'timestamp': 1441858274,
+ },
+ 'params': {
+ 'skip_download': 'm3u8 download',
+ }
+ }, {
+ 'note': 'DRM-protected video',
+ 'url': 'http://video.toggle.sg/en/movies/dug-s-special-mission/341413',
+ 'info_dict': {
+ 'id': '341413',
+ 'ext': 'wvm',
+ 'title': 'Dug\'s Special Mission',
+ 'description': 'md5:e86c6f4458214905c1772398fabc93e0',
+ 'upload_date': '20150827',
+ 'timestamp': 1440644006,
+ },
+ 'params': {
+ 'skip_download': 'DRM-protected wvm download',
+ }
+ }, {
+ # this also tests correct video id extraction
+ 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay',
+ 'url': 'http://video.toggle.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861',
+ 'info_dict': {
+ 'id': '332861',
+ 'ext': 'mp4',
+ 'title': '28th SEA Games (5 Show) - Episode 11',
+ 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa',
+ 'upload_date': '20150605',
+ 'timestamp': 1433480166,
+ },
+ 'params': {
+ 'skip_download': 'DRM-protected wvm download',
+ },
+ 'skip': 'm3u8 links are geo-restricted'
+ }, {
+ 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/zh/series/zero-calling-s2-hd/ep13/336367',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://video.toggle.sg/en/movies/seven-days/321936',
+ 'only_matching': True,
+ }]
+
+ _FORMAT_PREFERENCES = {
+ 'wvm-STBMain': -10,
+ 'wvm-iPadMain': -20,
+ 'wvm-iPhoneMain': -30,
+ 'wvm-Android': -40,
+ }
+ _API_USER = 'tvpapi_147'
+ _API_PASS = '11111'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ url, video_id, note='Downloading video page')
+
+ api_user = self._search_regex(
+ r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser',
+ default=self._API_USER, group='user')
+ api_pass = self._search_regex(
+ r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass',
+ default=self._API_PASS, group='pass')
+
+ params = {
+ 'initObj': {
+ 'Locale': {
+ 'LocaleLanguage': '',
+ 'LocaleCountry': '',
+ 'LocaleDevice': '',
+ 'LocaleUserState': 0
+ },
+ 'Platform': 0,
+ 'SiteGuid': 0,
+ 'DomainID': '0',
+ 'UDID': '',
+ 'ApiUser': api_user,
+ 'ApiPass': api_pass
+ },
+ 'MediaID': video_id,
+ 'mediaType': 0,
+ }
+
+ req = sanitized_Request(
+ 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo',
+ json.dumps(params).encode('utf-8'))
+ info = self._download_json(req, video_id, 'Downloading video info json')
+
+ title = info['MediaName']
+
+ formats = []
+ for video_file in info.get('Files', []):
+ video_url, vid_format = video_file.get('URL'), video_file.get('Format')
+ if not video_url or not vid_format:
+ continue
+ ext = determine_ext(video_url)
+ vid_format = vid_format.replace(' ', '')
+ # if geo-restricted, m3u8 is inaccessible, but mp4 is okay
+ if ext == 'm3u8':
+ m3u8_formats = self._extract_m3u8_formats(
+ video_url, video_id, ext='mp4', m3u8_id=vid_format,
+ note='Downloading %s m3u8 information' % vid_format,
+ errnote='Failed to download %s m3u8 information' % vid_format,
+ fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ elif ext in ('mp4', 'wvm'):
+ # wvm are drm-protected files
+ formats.append({
+ 'ext': ext,
+ 'url': video_url,
+ 'format_id': vid_format,
+ 'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1,
+ 'format_note': 'DRM-protected video' if ext == 'wvm' else None
+ })
+ if not formats:
+ # Most likely because geo-blocked
+ raise ExtractorError('No downloadable videos found', expected=True)
+ self._sort_formats(formats)
+
+ duration = int_or_none(info.get('Duration'))
+ description = info.get('Description')
+ created_at = parse_iso8601(info.get('CreationDate') or None)
+
+ average_rating = float_or_none(info.get('Rating'))
+ view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter'))
+ like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter'))
+
+ thumbnails = []
+ for picture in info.get('Pictures', []):
+ if not isinstance(picture, dict):
+ continue
+ pic_url = picture.get('URL')
+ if not pic_url:
+ continue
+ thumbnail = {
+ 'url': pic_url,
+ }
+ pic_size = picture.get('PicSize', '')
+ m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size)
+ if m:
+ thumbnail.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ thumbnails.append(thumbnail)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': created_at,
+ 'average_rating': average_rating,
+ 'view_count': view_count,
+ 'like_count': like_count,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/trilulilu.py b/youtube_dl/extractor/trilulilu.py
index 185accc4b..a800449e9 100644
--- a/youtube_dl/extractor/trilulilu.py
+++ b/youtube_dl/extractor/trilulilu.py
@@ -1,80 +1,103 @@
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+)
class TriluliluIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/(?:video-[^/]+/)?(?P<id>[^/#\?]+)'
- _TEST = {
- 'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1',
- 'md5': 'c1450a00da251e2769b74b9005601cac',
+ _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)'
+ _TESTS = [{
+ 'url': 'http://www.trilulilu.ro/big-buck-bunny-1',
+ 'md5': '68da087b676a6196a413549212f60cc6',
'info_dict': {
'id': 'ae2899e124140b',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': ':) pentru copilul din noi',
+ 'uploader_id': 'chipy',
+ 'upload_date': '20120304',
+ 'timestamp': 1330830647,
+ 'uploader': 'chipy',
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
},
- }
+ }, {
+ 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta',
+ 'md5': '929dfb8729dc71750463af88bbbbf4a4',
+ 'info_dict': {
+ 'id': 'f299710e3c91c5',
+ 'ext': 'mp4',
+ 'title': 'Adena ft. Morreti - Inocenta',
+ 'description': 'pop music',
+ 'uploader_id': 'VEVOmixt',
+ 'upload_date': '20151204',
+ 'uploader': 'VEVOmixt',
+ 'timestamp': 1449187937,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ }]
def _real_extract(self, url):
display_id = self._match_id(url)
- webpage = self._download_webpage(url, display_id)
+ media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id)
- if re.search(r'Fişierul nu este disponibil pentru vizionare în ţara dumneavoastră', webpage):
- raise ExtractorError(
- 'This video is not available in your country.', expected=True)
- elif re.search('Fişierul poate fi accesat doar de către prietenii lui', webpage):
+ age_limit = 0
+ errors = media_info.get('errors', {})
+ if errors.get('friends'):
raise ExtractorError('This video is private.', expected=True)
+ elif errors.get('geoblock'):
+ raise ExtractorError('This video is not available in your country.', expected=True)
+ elif errors.get('xxx_unlogged'):
+ age_limit = 18
- flashvars_str = self._search_regex(
- r'block_flash_vars\s*=\s*(\{[^\}]+\})', webpage, 'flashvars', fatal=False, default=None)
+ media_class = media_info.get('class')
+ if media_class not in ('video', 'audio'):
+ raise ExtractorError('not a video or an audio')
- if flashvars_str:
- flashvars = self._parse_json(flashvars_str, display_id)
- else:
- raise ExtractorError(
- 'This page does not contain videos', expected=True)
+ user = media_info.get('user', {})
- if flashvars['isMP3'] == 'true':
- raise ExtractorError(
- 'Audio downloads are currently not supported', expected=True)
+ thumbnail = media_info.get('cover_url')
+ if thumbnail:
+ thumbnail.format(width='1600', height='1200')
- video_id = flashvars['hash']
- title = self._og_search_title(webpage)
- thumbnail = self._og_search_thumbnail(webpage)
- description = self._og_search_description(webpage, default=None)
-
- format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/'
- 'video-formats2' % flashvars)
- format_doc = self._download_xml(
- format_url, video_id,
- note='Downloading formats',
- errnote='Error while downloading formats')
-
- video_url_template = (
- 'http://fs%(server)s.trilulilu.ro/stream.php?type=video'
- '&source=site&hash=%(hash)s&username=%(userid)s&'
- 'key=ministhebest&format=%%s&sig=&exp=' %
- flashvars)
- formats = [
- {
- 'format_id': fnode.text.partition('-')[2],
- 'url': video_url_template % fnode.text,
- 'ext': fnode.text.partition('-')[0]
- }
-
- for fnode in format_doc.findall('./formats/format')
- ]
+ # TODO: get correct ext for audio files
+ stream_type = media_info.get('stream_type')
+ formats = [{
+ 'url': media_info['href'],
+ 'ext': stream_type,
+ }]
+ if media_info.get('is_hd'):
+ formats.append({
+ 'format_id': 'hd',
+ 'url': media_info['hrefhd'],
+ 'ext': stream_type,
+ })
+ if media_class == 'audio':
+ formats[0]['vcodec'] = 'none'
+ else:
+ formats[0]['format_id'] = 'sd'
return {
- 'id': video_id,
+ 'id': media_info['identifier'].split('|')[1],
'display_id': display_id,
'formats': formats,
- 'title': title,
- 'description': description,
+ 'title': media_info['title'],
+ 'description': media_info.get('description'),
'thumbnail': thumbnail,
+ 'uploader_id': user.get('username'),
+ 'uploader': user.get('fullname'),
+ 'timestamp': parse_iso8601(media_info.get('published'), ' '),
+ 'duration': int_or_none(media_info.get('duration')),
+ 'view_count': int_or_none(media_info.get('count_views')),
+ 'like_count': int_or_none(media_info.get('count_likes')),
+ 'comment_count': int_or_none(media_info.get('count_comments')),
+ 'age_limit': age_limit,
}
diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py
index c9cb69333..46ef61ff5 100644
--- a/youtube_dl/extractor/tube8.py
+++ b/youtube_dl/extractor/tube8.py
@@ -4,12 +4,10 @@ import json
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_urlparse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse_urlparse
from ..utils import (
int_or_none,
+ sanitized_Request,
str_to_int,
)
from ..aes import aes_decrypt_text
@@ -42,7 +40,7 @@ class Tube8IE(InfoExtractor):
video_id = mobj.group('id')
display_id = mobj.group('display_id')
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, display_id)
diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index 4f86b3ee9..6d78b5dfe 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -5,13 +5,11 @@ import codecs
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
int_or_none,
+ sanitized_Request,
)
@@ -44,7 +42,7 @@ class TubiTvIE(InfoExtractor):
'password': password,
}
payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
- request = compat_urllib_request.Request(self._LOGIN_URL, payload)
+ request = sanitized_Request(self._LOGIN_URL, payload)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py
index 84fe71aef..5f7ac4b35 100644
--- a/youtube_dl/extractor/tudou.py
+++ b/youtube_dl/extractor/tudou.py
@@ -2,14 +2,12 @@
from __future__ import unicode_literals
-import re
-import json
-
from .common import InfoExtractor
+from ..compat import compat_str
class TudouIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/.*?/(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
+ _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:listplay|programs(?:/view)?|albumplay)/([^/]+/)*(?P<id>[^/?#]+?)(?:\.html)?/?(?:$|[?#])'
_TESTS = [{
'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',
'md5': '140a49ed444bd22f93330985d8475fcb',
@@ -27,41 +25,41 @@ class TudouIE(InfoExtractor):
'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012',
'thumbnail': 're:^https?://.*\.jpg$',
}
+ }, {
+ 'url': 'http://www.tudou.com/albumplay/cJAHGih4yYg.html',
+ 'only_matching': True,
}]
_PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf'
- def _url_for_id(self, id, quality=None):
- info_url = "http://v2.tudou.com/f?id=" + str(id)
+ def _url_for_id(self, video_id, quality=None):
+ info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
if quality:
info_url += '&hd' + quality
- webpage = self._download_webpage(info_url, id, "Opening the info webpage")
- final_url = self._html_search_regex('>(.+?)</f>', webpage, 'video url')
+ xml_data = self._download_xml(info_url, video_id, "Opening the info XML page")
+ final_url = xml_data.text
return final_url
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage)
- if m and m.group(1):
- return {
- '_type': 'url',
- 'url': 'youku:' + m.group(1),
- 'ie_key': 'Youku'
- }
+ youku_vcode = self._search_regex(
+ r'vcode\s*:\s*[\'"]([^\'"]*)[\'"]', webpage, 'youku vcode', default=None)
+ if youku_vcode:
+ return self.url_result('youku:' + youku_vcode, ie='Youku')
title = self._search_regex(
- r",kw:\s*['\"](.+?)[\"']", webpage, 'title')
+ r',kw\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'title')
thumbnail_url = self._search_regex(
- r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
+ r',pic\s*:\s*[\'"]([^\'"]+)[\'"]', webpage, 'thumbnail URL', fatal=False)
player_url = self._search_regex(
- r"playerUrl\s*:\s*['\"](.+?\.swf)[\"']",
+ r'playerUrl\s*:\s*[\'"]([^\'"]+\.swf)[\'"]',
webpage, 'player URL', default=self._PLAYER_URL)
- segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
- segments = json.loads(segs_json)
+ segments = self._parse_json(self._search_regex(
+ r'segs: \'([^\']+)\'', webpage, 'segments'), video_id)
# It looks like the keys are the arguments that have to be passed as
# the hd field in the request url, we pick the higher
# Also, filter non-number qualities (see issue #3643).
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 3d3b635e4..4f844706d 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import int_or_none
class TumblrIE(InfoExtractor):
@@ -29,6 +30,19 @@ class TumblrIE(InfoExtractor):
'thumbnail': 're:http://.*\.jpg',
}
}, {
+ 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video',
+ 'md5': '7ae503065ad150122dc3089f8cf1546c',
+ 'info_dict': {
+ 'id': '130323439814',
+ 'ext': 'mp4',
+ 'title': 'HD Video Testing \u2014 Test description for my HD video',
+ 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ 'params': {
+ 'format': 'hd',
+ },
+ }, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
@@ -37,6 +51,9 @@ class TumblrIE(InfoExtractor):
'title': 'naked smoking & stretching',
'upload_date': '20150506',
'timestamp': 1430931613,
+ 'age_limit': 18,
+ 'uploader_id': '1638622',
+ 'uploader': 'naked-yogi',
},
'add_ie': ['Vidme'],
}, {
@@ -66,10 +83,38 @@ class TumblrIE(InfoExtractor):
if iframe_url is None:
return self.url_result(urlh.geturl(), 'Generic')
- iframe = self._download_webpage(iframe_url, video_id,
- 'Downloading iframe page')
- video_url = self._search_regex(r'<source src="([^"]+)"',
- iframe, 'video url')
+ iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page')
+
+ duration = None
+ sources = []
+
+ sd_url = self._search_regex(
+ r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe,
+ 'sd video url', default=None, group='url')
+ if sd_url:
+ sources.append((sd_url, 'sd'))
+
+ options = self._parse_json(
+ self._search_regex(
+ r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe,
+ 'hd video url', default='', group='options'),
+ video_id, fatal=False)
+ if options:
+ duration = int_or_none(options.get('duration'))
+ hd_url = options.get('hdUrl')
+ if hd_url:
+ sources.append((hd_url, 'hd'))
+
+ formats = [{
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'format_id': format_id,
+ 'height': int_or_none(self._search_regex(
+ r'/(\d{3,4})$', video_url, 'height', default=None)),
+ 'quality': quality,
+ } for quality, (video_url, format_id) in enumerate(sources)]
+
+ self._sort_formats(formats)
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
@@ -79,9 +124,9 @@ class TumblrIE(InfoExtractor):
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
+ 'duration': duration,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/tutv.py b/youtube_dl/extractor/tutv.py
index fad720b68..822372ea1 100644
--- a/youtube_dl/extractor/tutv.py
+++ b/youtube_dl/extractor/tutv.py
@@ -10,10 +10,10 @@ class TutvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P<id>[^/?]+)'
_TEST = {
'url': 'http://tu.tv/videos/robots-futbolistas',
- 'md5': '627c7c124ac2a9b5ab6addb94e0e65f7',
+ 'md5': '0cd9e28ad270488911b0d2a72323395d',
'info_dict': {
'id': '2973058',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Robots futbolistas',
},
}
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 023911c41..69882da63 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -11,14 +11,15 @@ from ..compat import (
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
int_or_none,
parse_duration,
parse_iso8601,
+ sanitized_Request,
)
@@ -27,8 +28,7 @@ class TwitchBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
- _LOGIN_URL = 'https://secure.twitch.tv/login'
- _LOGIN_POST_URL = 'https://passport.twitch.tv/authentications/new'
+ _LOGIN_URL = 'http://www.twitch.tv/login'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
@@ -48,7 +48,7 @@ class TwitchBaseIE(InfoExtractor):
for cookie in self._downloader.cookiejar:
if cookie.name == 'api_token':
headers['Twitch-Api-Token'] = cookie.value
- request = compat_urllib_request.Request(url, headers=headers)
+ request = sanitized_Request(url, headers=headers)
response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
self._handle_error(response)
return response
@@ -61,26 +61,28 @@ class TwitchBaseIE(InfoExtractor):
if username is None:
return
- login_page = self._download_webpage(
+ login_page, handle = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
- 'login': username.encode('utf-8'),
- 'password': password.encode('utf-8'),
+ 'username': username,
+ 'password': password,
})
+ redirect_url = handle.geturl()
+
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
- 'post url', default=self._LOGIN_POST_URL, group='url')
+ 'post url', default=redirect_url, group='url')
if not post_url.startswith('http'):
- post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
+ post_url = compat_urlparse.urljoin(redirect_url, post_url)
- request = compat_urllib_request.Request(
- post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
- request.add_header('Referer', self._LOGIN_URL)
+ request = sanitized_Request(
+ post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
+ request.add_header('Referer', redirect_url)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -238,14 +240,24 @@ class TwitchVodIE(TwitchItemBaseIE):
def _real_extract(self, url):
item_id = self._match_id(url)
+
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
+
formats = self._extract_m3u8_formats(
- '%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
- % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
+ '%s/vod/%s?%s' % (
+ self._USHER_BASE, item_id,
+ compat_urllib_parse.urlencode({
+ 'allow_source': 'true',
+ 'allow_spectre': 'true',
+ 'player': 'twitchweb',
+ 'nauth': access_token['token'],
+ 'nauthsig': access_token['sig'],
+ })),
item_id, 'mp4')
+
self._prefer_source(formats)
info['formats'] = formats
diff --git a/youtube_dl/extractor/twitter.py b/youtube_dl/extractor/twitter.py
index 1aaa06305..a161f046b 100644
--- a/youtube_dl/extractor/twitter.py
+++ b/youtube_dl/extractor/twitter.py
@@ -1,28 +1,73 @@
+# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_request
from ..utils import (
float_or_none,
- unescapeHTML,
+ xpath_text,
+ remove_end,
+ int_or_none,
+ ExtractorError,
+ sanitized_Request,
)
class TwitterCardIE(InfoExtractor):
+ IE_NAME = 'twitter:card'
_VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)'
- _TEST = {
- 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
- 'md5': 'a74f50b310c83170319ba16de6955192',
- 'info_dict': {
- 'id': '560070183650213889',
- 'ext': 'mp4',
- 'title': 'TwitterCard',
- 'thumbnail': 're:^https?://.*\.jpg$',
- 'duration': 30.033,
+ _TESTS = [
+ {
+ 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
+ 'md5': '4fa26a35f9d1bf4b646590ba8e84be19',
+ 'info_dict': {
+ 'id': '560070183650213889',
+ 'ext': 'mp4',
+ 'title': 'TwitterCard',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 30.033,
+ }
},
- }
+ {
+ 'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
+ 'md5': '7ee2a553b63d1bccba97fbed97d9e1c8',
+ 'info_dict': {
+ 'id': '623160978427936768',
+ 'ext': 'mp4',
+ 'title': 'TwitterCard',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 80.155,
+ },
+ },
+ {
+ 'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
+ 'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814',
+ 'info_dict': {
+ 'id': 'dq4Oj5quskI',
+ 'ext': 'mp4',
+ 'title': 'Ubuntu 11.10 Overview',
+ 'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/',
+ 'upload_date': '20111013',
+ 'uploader': 'OMG! Ubuntu!',
+ 'uploader_id': 'omgubuntu',
+ },
+ 'add_ie': ['Youtube'],
+ },
+ {
+ 'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
+ 'md5': 'ab2745d0b0ce53319a534fccaa986439',
+ 'info_dict': {
+ 'id': 'iBb2x00UVlv',
+ 'ext': 'mp4',
+ 'upload_date': '20151113',
+ 'uploader_id': '1189339351084113920',
+ 'uploader': '@ArsenalTerje',
+ 'title': 'Vine by @ArsenalTerje',
+ },
+ 'add_ie': ['Vine'],
+ }
+ ]
def _real_extract(self, url):
video_id = self._match_id(url)
@@ -36,14 +81,28 @@ class TwitterCardIE(InfoExtractor):
config = None
formats = []
for user_agent in USER_AGENTS:
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.add_header('User-Agent', user_agent)
webpage = self._download_webpage(request, video_id)
- config = self._parse_json(
- unescapeHTML(self._search_regex(
- r'data-player-config="([^"]+)"', webpage, 'data player config')),
+ iframe_url = self._html_search_regex(
+ r'<iframe[^>]+src="((?:https?:)?//(?:www.youtube.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"',
+ webpage, 'video iframe', default=None)
+ if iframe_url:
+ return self.url_result(iframe_url)
+
+ config = self._parse_json(self._html_search_regex(
+ r'data-player-config="([^"]+)"', webpage, 'data player config'),
video_id)
+ if 'playlist' not in config:
+ if 'vmapUrl' in config:
+ vmap_data = self._download_xml(config['vmapUrl'], video_id)
+ video_url = xpath_text(vmap_data, './/MediaFile').strip()
+ formats.append({
+ 'url': video_url,
+ })
+ break # same video regardless of UA
+ continue
video_url = config['playlist'][0]['source']
@@ -70,3 +129,100 @@ class TwitterCardIE(InfoExtractor):
'duration': duration,
'formats': formats,
}
+
+
+class TwitterIE(InfoExtractor):
+ IE_NAME = 'twitter'
+ _VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)'
+ _TEMPLATE_URL = 'https://twitter.com/%s/status/%s'
+
+ _TESTS = [{
+ 'url': 'https://twitter.com/freethenipple/status/643211948184596480',
+ 'md5': 'db6612ec5d03355953c3ca9250c97e5e',
+ 'info_dict': {
+ 'id': '643211948184596480',
+ 'ext': 'mp4',
+ 'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 12.922,
+ 'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"',
+ 'uploader': 'FREE THE NIPPLE',
+ 'uploader_id': 'freethenipple',
+ },
+ }, {
+ 'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
+ 'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
+ 'info_dict': {
+ 'id': '657991469417025536',
+ 'ext': 'mp4',
+ 'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
+ 'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
+ 'thumbnail': 're:^https?://.*\.png',
+ 'uploader': 'Gifs',
+ 'uploader_id': 'giphz',
+ },
+ }, {
+ 'url': 'https://twitter.com/starwars/status/665052190608723968',
+ 'md5': '39b7199856dee6cd4432e72c74bc69d4',
+ 'info_dict': {
+ 'id': '665052190608723968',
+ 'ext': 'mp4',
+ 'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.',
+ 'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."',
+ 'uploader_id': 'starwars',
+ 'uploader': 'Star Wars',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ user_id = mobj.group('user_id')
+ twid = mobj.group('id')
+
+ webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid)
+
+ username = remove_end(self._og_search_title(webpage), ' on Twitter')
+
+ title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”')
+
+ # strip 'https -_t.co_BJYgOjSeGA' junk from filenames
+ title = re.sub(r'\s+(https?://[^ ]+)', '', title)
+
+ info = {
+ 'uploader_id': user_id,
+ 'uploader': username,
+ 'webpage_url': url,
+ 'description': '%s on Twitter: "%s"' % (username, description),
+ 'title': username + ' - ' + title,
+ }
+
+ card_id = self._search_regex(
+ r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url', default=None)
+ if card_id:
+ card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id
+ info.update({
+ '_type': 'url_transparent',
+ 'ie_key': 'TwitterCard',
+ 'url': card_url,
+ })
+ return info
+
+ mobj = re.search(r'''(?x)
+ <video[^>]+class="animated-gif"[^>]+
+ (?:data-height="(?P<height>\d+)")?[^>]+
+ (?:data-width="(?P<width>\d+)")?[^>]+
+ (?:poster="(?P<poster>[^"]+)")?[^>]*>\s*
+ <source[^>]+video-src="(?P<url>[^"]+)"
+ ''', webpage)
+
+ if mobj:
+ info.update({
+ 'id': twid,
+ 'url': mobj.group('url'),
+ 'height': int_or_none(mobj.group('height')),
+ 'width': int_or_none(mobj.group('width')),
+ 'thumbnail': mobj.group('poster'),
+ })
+ return info
+
+ raise ExtractorError('There\'s not video in this tweet.')
diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index 365d8b4bf..59832b1ec 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -1,14 +1,16 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..compat import (
+ compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
+ float_or_none,
+ int_or_none,
+ sanitized_Request,
)
@@ -17,6 +19,8 @@ class UdemyIE(InfoExtractor):
_VALID_URL = r'https?://www\.udemy\.com/(?:[^#]+#/lecture/|lecture/view/?\?lectureId=)(?P<id>\d+)'
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
+ _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<'
+ _ALREADY_ENROLLED = '>You are already taking this course.<'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
@@ -32,6 +36,29 @@ class UdemyIE(InfoExtractor):
'skip': 'Requires udemy account credentials',
}]
+ def _enroll_course(self, webpage, course_id):
+ enroll_url = self._search_regex(
+ r'href=(["\'])(?P<url>https?://(?:www\.)?udemy\.com/course/subscribe/.+?)\1',
+ webpage, 'enroll url', group='url',
+ default='https://www.udemy.com/course/subscribe/?courseId=%s' % course_id)
+ webpage = self._download_webpage(enroll_url, course_id, 'Enrolling in the course')
+ if self._SUCCESSFULLY_ENROLLED in webpage:
+ self.to_screen('%s: Successfully enrolled in' % course_id)
+ elif self._ALREADY_ENROLLED in webpage:
+ self.to_screen('%s: Already enrolled in' % course_id)
+
+ def _download_lecture(self, course_id, lecture_id):
+ return self._download_json(
+ 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
+ course_id, lecture_id, compat_urllib_parse.urlencode({
+ 'video_only': '',
+ 'auto_play': '',
+ 'fields[lecture]': 'title,description,asset',
+ 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data',
+ 'instructorPreviewMode': 'False',
+ })),
+ lecture_id, 'Downloading lecture JSON')
+
def _handle_error(self, response):
if not isinstance(response, dict):
return
@@ -53,12 +80,13 @@ class UdemyIE(InfoExtractor):
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
+ headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
- url_or_request = compat_urllib_request.Request(url_or_request, headers=headers)
+ url_or_request = sanitized_Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, video_id, note)
self._handle_error(response)
@@ -70,7 +98,7 @@ class UdemyIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- self.raise_login_required('Udemy account is required')
+ return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
@@ -89,7 +117,7 @@ class UdemyIE(InfoExtractor):
'password': password.encode('utf-8'),
})
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._ORIGIN_URL)
request.add_header('Origin', self._ORIGIN_URL)
@@ -108,44 +136,76 @@ class UdemyIE(InfoExtractor):
def _real_extract(self, url):
lecture_id = self._match_id(url)
- lecture = self._download_json(
- 'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id,
- lecture_id, 'Downloading lecture JSON')
+ webpage = self._download_webpage(url, lecture_id)
+
+ course_id = self._search_regex(
+ r'data-course-id=["\'](\d+)', webpage, 'course id')
+
+ try:
+ lecture = self._download_lecture(course_id, lecture_id)
+ except ExtractorError as e:
+ # Error could possibly mean we are not enrolled in the course
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
+ self._enroll_course(webpage, course_id)
+ lecture_id = self._download_lecture(course_id, lecture_id)
+ else:
+ raise
+
+ title = lecture['title']
+ description = lecture.get('description')
- asset_type = lecture.get('assetType') or lecture.get('asset_type')
+ asset = lecture['asset']
+
+ asset_type = asset.get('assetType') or asset.get('asset_type')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
- asset = lecture['asset']
-
stream_url = asset.get('streamUrl') or asset.get('stream_url')
- mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url)
- if mobj:
- return self.url_result(mobj.group(1), 'Youtube')
+ if stream_url:
+ youtube_url = self._search_regex(
+ r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
+ if youtube_url:
+ return self.url_result(youtube_url, 'Youtube')
video_id = asset['id']
thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url')
- duration = asset['data']['duration']
-
- download_url = asset.get('downloadUrl') or asset.get('download_url')
-
- video = download_url.get('Video') or download_url.get('video')
- video_480p = download_url.get('Video480p') or download_url.get('video_480p')
-
- formats = [
- {
- 'url': video_480p[0],
- 'format_id': '360p',
- },
- {
- 'url': video[0],
- 'format_id': '720p',
- },
- ]
-
- title = lecture['title']
- description = lecture['description']
+ duration = float_or_none(asset.get('data', {}).get('duration'))
+ outputs = asset.get('data', {}).get('outputs', {})
+
+ formats = []
+ for format_ in asset.get('download_urls', {}).get('Video', []):
+ video_url = format_.get('file')
+ if not video_url:
+ continue
+ format_id = format_.get('label')
+ f = {
+ 'url': format_['file'],
+ 'height': int_or_none(format_id),
+ }
+ if format_id:
+ # Some videos contain additional metadata (e.g.
+ # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
+ output = outputs.get(format_id)
+ if isinstance(output, dict):
+ f.update({
+ 'format_id': '%sp' % (output.get('label') or format_id),
+ 'width': int_or_none(output.get('width')),
+ 'height': int_or_none(output.get('height')),
+ 'vbr': int_or_none(output.get('video_bitrate_in_kbps')),
+ 'vcodec': output.get('video_codec'),
+ 'fps': int_or_none(output.get('frame_rate')),
+ 'abr': int_or_none(output.get('audio_bitrate_in_kbps')),
+ 'acodec': output.get('audio_codec'),
+ 'asr': int_or_none(output.get('audio_sample_rate')),
+ 'tbr': int_or_none(output.get('total_bitrate_in_kbps')),
+ 'filesize': int_or_none(output.get('file_size_in_bytes')),
+ })
+ else:
+ f['format_id'] = '%sp' % format_id
+ formats.append(f)
+
+ self._sort_formats(formats)
return {
'id': video_id,
@@ -159,9 +219,7 @@ class UdemyIE(InfoExtractor):
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
- _VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)'
- _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<'
- _ALREADY_ENROLLED = '>You are already taking this course.<'
+ _VALID_URL = r'https?://www\.udemy\.com/(?P<id>[\da-z-]+)'
_TESTS = []
@classmethod
@@ -169,24 +227,18 @@ class UdemyCourseIE(UdemyIE):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- course_path = mobj.group('coursepath')
+ course_path = self._match_id(url)
+
+ webpage = self._download_webpage(url, course_path)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s' % course_path,
course_path, 'Downloading course JSON')
- course_id = int(response['id'])
- course_title = response['title']
+ course_id = response['id']
+ course_title = response.get('title')
- webpage = self._download_webpage(
- 'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id,
- course_id, 'Enrolling in the course')
-
- if self._SUCCESSFULLY_ENROLLED in webpage:
- self.to_screen('%s: Successfully enrolled in' % course_id)
- elif self._ALREADY_ENROLLED in webpage:
- self.to_screen('%s: Already enrolled in' % course_id)
+ self._enroll_course(webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id,
diff --git a/youtube_dl/extractor/udn.py b/youtube_dl/extractor/udn.py
index 2151f8338..ee35b7227 100644
--- a/youtube_dl/extractor/udn.py
+++ b/youtube_dl/extractor/udn.py
@@ -12,7 +12,8 @@ from ..compat import compat_urlparse
class UDNEmbedIE(InfoExtractor):
IE_DESC = '聯合影音'
- _VALID_URL = r'https?://video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)'
+ _PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)'
+ _VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL
_TESTS = [{
'url': 'http://video.udn.com/embed/news/300040',
'md5': 'de06b4c90b042c128395a88f0384817e',
diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
index c39c278ab..73b05ecab 100644
--- a/youtube_dl/extractor/ustream.py
+++ b/youtube_dl/extractor/ustream.py
@@ -1,17 +1,20 @@
from __future__ import unicode_literals
-import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ float_or_none,
+)
class UstreamIE(InfoExtractor):
- _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
+ _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)'
IE_NAME = 'ustream'
_TESTS = [{
'url': 'http://www.ustream.tv/recorded/20274954',
@@ -19,8 +22,12 @@ class UstreamIE(InfoExtractor):
'info_dict': {
'id': '20274954',
'ext': 'flv',
- 'uploader': 'Young Americans for Liberty',
'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
+ 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM',
+ 'timestamp': 1328577035,
+ 'upload_date': '20120207',
+ 'uploader': 'yaliberty',
+ 'uploader_id': '6780869',
},
}, {
# From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444
@@ -32,20 +39,21 @@ class UstreamIE(InfoExtractor):
'ext': 'flv',
'title': '-CG11- Canada Games Figure Skating',
'uploader': 'sportscanadatv',
- }
+ },
+ 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.',
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- video_id = m.group('videoID')
+ video_id = m.group('id')
# some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
if m.group('type') == 'embed/recorded':
- video_id = m.group('videoID')
+ video_id = m.group('id')
desktop_url = 'http://www.ustream.tv/recorded/' + video_id
return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
- video_id = m.group('videoID')
+ video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
desktop_video_id = self._html_search_regex(
r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
@@ -53,52 +61,50 @@ class UstreamIE(InfoExtractor):
return self.url_result(desktop_url, 'Ustream')
params = self._download_json(
- 'http://cdngw.ustream.tv/rgwjson/Viewer.getVideo/' + json.dumps({
- 'brandId': 1,
- 'videoId': int(video_id),
- 'autoplay': False,
- }), video_id)
-
- if 'error' in params:
- raise ExtractorError(params['error']['message'], expected=True)
-
- video_url = params['flv']
+ 'https://api.ustream.tv/videos/%s.json' % video_id, video_id)
- webpage = self._download_webpage(url, video_id)
+ error = params.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error), expected=True)
- self.report_extraction(video_id)
+ video = params['video']
- video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
- webpage, 'title', default=None)
+ title = video['title']
+ filesize = float_or_none(video.get('file_size'))
- if not video_title:
- try:
- video_title = params['moduleConfig']['meta']['title']
- except KeyError:
- pass
-
- if not video_title:
- video_title = 'Ustream video ' + video_id
+ formats = [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': format_id,
+ 'filesize': filesize,
+ } for format_id, video_url in video['media_urls'].items()]
+ self._sort_formats(formats)
- uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
- webpage, 'uploader', fatal=False, flags=re.DOTALL, default=None)
+ description = video.get('description')
+ timestamp = int_or_none(video.get('created_at'))
+ duration = float_or_none(video.get('length'))
+ view_count = int_or_none(video.get('views'))
- if not uploader:
- try:
- uploader = params['moduleConfig']['meta']['userName']
- except KeyError:
- uploader = None
+ uploader = video.get('owner', {}).get('username')
+ uploader_id = video.get('owner', {}).get('id')
- thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
- webpage, 'thumbnail', fatal=False)
+ thumbnails = [{
+ 'id': thumbnail_id,
+ 'url': thumbnail_url,
+ } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()]
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'flv',
- 'title': video_title,
+ 'title': title,
+ 'description': description,
+ 'thumbnails': thumbnails,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'view_count': view_count,
'uploader': uploader,
- 'thumbnail': thumbnail,
+ 'uploader_id': uploader_id,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py
index 722eb5236..1e740fbe6 100644
--- a/youtube_dl/extractor/vbox7.py
+++ b/youtube_dl/extractor/vbox7.py
@@ -4,11 +4,11 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
+ sanitized_Request,
)
@@ -49,7 +49,7 @@ class Vbox7IE(InfoExtractor):
info_url = "http://vbox7.com/play/magare.do"
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
- info_request = compat_urllib_request.Request(info_url, data)
+ info_request = sanitized_Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
if info_response is None:
diff --git a/youtube_dl/extractor/veoh.py b/youtube_dl/extractor/veoh.py
index 01e258e32..9633f7ffe 100644
--- a/youtube_dl/extractor/veoh.py
+++ b/youtube_dl/extractor/veoh.py
@@ -4,12 +4,10 @@ import re
import json
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
-)
from ..utils import (
int_or_none,
ExtractorError,
+ sanitized_Request,
)
@@ -110,7 +108,7 @@ class VeohIE(InfoExtractor):
if 'class="adultwarning-container"' in webpage:
self.report_age_confirmation()
age_limit = 18
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.add_header('Cookie', 'confirmedAdult=true')
webpage = self._download_webpage(request, video_id)
diff --git a/youtube_dl/extractor/vessel.py b/youtube_dl/extractor/vessel.py
index 3c8d2a943..1a0ff3395 100644
--- a/youtube_dl/extractor/vessel.py
+++ b/youtube_dl/extractor/vessel.py
@@ -4,10 +4,10 @@ from __future__ import unicode_literals
import json
from .common import InfoExtractor
-from ..compat import compat_urllib_request
from ..utils import (
ExtractorError,
parse_iso8601,
+ sanitized_Request,
)
@@ -33,7 +33,7 @@ class VesselIE(InfoExtractor):
@staticmethod
def make_json_request(url, data):
payload = json.dumps(data).encode('utf-8')
- req = compat_urllib_request.Request(url, payload)
+ req = sanitized_Request(url, payload)
req.add_header('Content-Type', 'application/json; charset=utf-8')
return req
diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py
index c17094f81..02dfd36f4 100644
--- a/youtube_dl/extractor/vevo.py
+++ b/youtube_dl/extractor/vevo.py
@@ -1,15 +1,16 @@
from __future__ import unicode_literals
import re
-import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
- compat_urllib_request,
+ compat_etree_fromstring,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
+ sanitized_Request,
)
@@ -69,11 +70,22 @@ class VevoIE(InfoExtractor):
'params': {
'skip_download': 'true',
}
+ }, {
+ 'note': 'No video_info',
+ 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000',
+ 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0',
+ 'info_dict': {
+ 'id': 'USUV71503000',
+ 'ext': 'mp4',
+ 'title': 'Till I Die - K Camp ft. T.I.',
+ 'duration': 193,
+ },
+ 'expected_warnings': ['Unable to download SMIL file'],
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
def _real_initialize(self):
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
@@ -83,11 +95,17 @@ class VevoIE(InfoExtractor):
if webpage is False:
self._oauth_token = None
else:
+ if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage:
+ raise ExtractorError('%s said: This page is currently unavailable in your region.' % self.IE_NAME, expected=True)
+
self._oauth_token = self._search_regex(
r'access_token":\s*"([^"]+)"',
webpage, 'access token', fatal=False)
def _formats_from_json(self, video_info):
+ if not video_info:
+ return []
+
last_version = {'version': -1}
for version in video_info['videoVersions']:
# These are the HTTP downloads, other types are for different manifests
@@ -97,7 +115,7 @@ class VevoIE(InfoExtractor):
if last_version['version'] == -1:
raise ExtractorError('Unable to extract last version of the video')
- renditions = xml.etree.ElementTree.fromstring(last_version['data'])
+ renditions = compat_etree_fromstring(last_version['data'])
formats = []
# Already sorted from worst to best quality
for rend in renditions.findall('rendition'):
@@ -112,9 +130,8 @@ class VevoIE(InfoExtractor):
})
return formats
- def _formats_from_smil(self, smil_xml):
+ def _formats_from_smil(self, smil_doc):
formats = []
- smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8'))
els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
@@ -147,14 +164,14 @@ class VevoIE(InfoExtractor):
})
return formats
- def _download_api_formats(self, video_id):
+ def _download_api_formats(self, video_id, video_url):
if not self._oauth_token:
self._downloader.report_warning(
'No oauth token available, skipping API HLS download')
return []
- api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
- video_id, self._oauth_token)
+ api_url = compat_urlparse.urljoin(video_url, '//apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
+ video_id, self._oauth_token))
api_data = self._download_json(
api_url, video_id,
note='Downloading HLS formats',
@@ -168,18 +185,26 @@ class VevoIE(InfoExtractor):
preference=0)
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = self._match_id(url)
+
+ webpage = None
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
response = self._download_json(json_url, video_id)
- video_info = response['video']
+ video_info = response['video'] or {}
- if not video_info:
+ if not video_info and response.get('statusCode') != 909:
if 'statusMessage' in response:
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True)
raise ExtractorError('Unable to extract videos')
+ if not video_info:
+ if url.startswith('vevo:'):
+ raise ExtractorError('Please specify full Vevo URL for downloading', expected=True)
+ webpage = self._download_webpage(url, video_id)
+
+ title = video_info.get('title') or self._og_search_title(webpage)
+
formats = self._formats_from_json(video_info)
is_explicit = video_info.get('isExplicit')
@@ -191,11 +216,11 @@ class VevoIE(InfoExtractor):
age_limit = None
# Download via HLS API
- formats.extend(self._download_api_formats(video_id))
+ formats.extend(self._download_api_formats(video_id, url))
# Download SMIL
smil_blocks = sorted((
- f for f in video_info['videoVersions']
+ f for f in video_info.get('videoVersions', [])
if f['sourceType'] == 13),
key=lambda f: f['version'])
smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
@@ -207,23 +232,26 @@ class VevoIE(InfoExtractor):
if smil_url_m is not None:
smil_url = smil_url_m
if smil_url:
- smil_xml = self._download_webpage(
- smil_url, video_id, 'Downloading SMIL info', fatal=False)
- if smil_xml:
- formats.extend(self._formats_from_smil(smil_xml))
+ smil_doc = self._download_smil(smil_url, video_id, fatal=False)
+ if smil_doc:
+ formats.extend(self._formats_from_smil(smil_doc))
self._sort_formats(formats)
- timestamp_ms = int_or_none(self._search_regex(
+ timestamp = int_or_none(self._search_regex(
r'/Date\((\d+)\)/',
- video_info['launchDate'], 'launch date', fatal=False))
+ video_info['launchDate'], 'launch date', fatal=False),
+ scale=1000) if video_info else None
+
+ duration = video_info.get('duration') or int_or_none(
+ self._html_search_meta('video:duration', webpage))
return {
'id': video_id,
- 'title': video_info['title'],
+ 'title': title,
'formats': formats,
- 'thumbnail': video_info['imageUrl'],
- 'timestamp': timestamp_ms // 1000,
- 'uploader': video_info['mainArtists'][0]['artistName'],
- 'duration': video_info['duration'],
+ 'thumbnail': video_info.get('imageUrl'),
+ 'timestamp': timestamp,
+ 'uploader': video_info['mainArtists'][0]['artistName'] if video_info else None,
+ 'duration': duration,
'age_limit': age_limit,
}
diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py
index 01af7a995..3db6286e4 100644
--- a/youtube_dl/extractor/vice.py
+++ b/youtube_dl/extractor/vice.py
@@ -15,6 +15,7 @@ class ViceIE(InfoExtractor):
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
'ext': 'mp4',
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
+ 'duration': 725.983,
},
'params': {
# Requires ffmpeg (m3u8 manifest)
diff --git a/youtube_dl/extractor/viddler.py b/youtube_dl/extractor/viddler.py
index 8516a2940..40ffbad2a 100644
--- a/youtube_dl/extractor/viddler.py
+++ b/youtube_dl/extractor/viddler.py
@@ -4,9 +4,7 @@ from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
-)
-from ..compat import (
- compat_urllib_request
+ sanitized_Request,
)
@@ -65,7 +63,7 @@ class ViddlerIE(InfoExtractor):
'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' %
video_id)
headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
- request = compat_urllib_request.Request(json_url, None, headers)
+ request = sanitized_Request(json_url, None, headers)
data = self._download_json(request, video_id)['video']
formats = []
diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py
index 94f9e9be9..cd3f50a63 100644
--- a/youtube_dl/extractor/videofyme.py
+++ b/youtube_dl/extractor/videofyme.py
@@ -2,8 +2,8 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
- find_xpath_attr,
int_or_none,
+ parse_iso8601,
)
@@ -18,33 +18,35 @@ class VideofyMeIE(InfoExtractor):
'id': '1100701',
'ext': 'mp4',
'title': 'This is VideofyMe',
- 'description': None,
+ 'description': '',
+ 'upload_date': '20130326',
+ 'timestamp': 1364288959,
'uploader': 'VideofyMe',
'uploader_id': 'thisisvideofyme',
'view_count': int,
+ 'likes': int,
+ 'comment_count': int,
},
-
}
def _real_extract(self, url):
video_id = self._match_id(url)
- config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id,
- video_id)
- video = config.find('video')
- sources = video.find('sources')
- url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key)
- for key in ['on', 'av', 'off']] if node is not None)
- video_url = url_node.find('url').text
- view_count = int_or_none(self._search_regex(
- r'([0-9]+)', video.find('views').text, 'view count', fatal=False))
+
+ config = self._download_json('http://vf-player-info-loader.herokuapp.com/%s.json' % video_id, video_id)['videoinfo']
+
+ video = config.get('video')
+ blog = config.get('blog', {})
return {
'id': video_id,
- 'title': video.find('title').text,
- 'url': video_url,
- 'thumbnail': video.find('thumb').text,
- 'description': video.find('description').text,
- 'uploader': config.find('blog/name').text,
- 'uploader_id': video.find('identifier').text,
- 'view_count': view_count,
+ 'title': video['title'],
+ 'url': video['sources']['source']['url'],
+ 'thumbnail': video.get('thumb'),
+ 'description': video.get('description'),
+ 'timestamp': parse_iso8601(video.get('date')),
+ 'uploader': blog.get('name'),
+ 'uploader_id': blog.get('identifier'),
+ 'view_count': int_or_none(self._search_regex(r'([0-9]+)', video.get('views'), 'view count', fatal=False)),
+ 'likes': int_or_none(video.get('likes')),
+ 'comment_count': int_or_none(video.get('nrOfComments')),
}
diff --git a/youtube_dl/extractor/videolecturesnet.py b/youtube_dl/extractor/videolecturesnet.py
deleted file mode 100644
index ef2da5632..000000000
--- a/youtube_dl/extractor/videolecturesnet.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-from ..utils import (
- find_xpath_attr,
- int_or_none,
- parse_duration,
- unified_strdate,
-)
-
-
-class VideoLecturesNetIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/*(?:[#?].*)?$'
- IE_NAME = 'videolectures.net'
-
- _TEST = {
- 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
- 'info_dict': {
- 'id': 'promogram_igor_mekjavic_eng',
- 'ext': 'mp4',
- 'title': 'Automatics, robotics and biocybernetics',
- 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
- 'upload_date': '20130627',
- 'duration': 565,
- 'thumbnail': 're:http://.*\.jpg',
- },
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id
- smil = self._download_xml(smil_url, video_id)
-
- title = find_xpath_attr(smil, './/meta', 'name', 'title').attrib['content']
- description_el = find_xpath_attr(smil, './/meta', 'name', 'abstract')
- description = (
- None if description_el is None
- else description_el.attrib['content'])
- upload_date = unified_strdate(
- find_xpath_attr(smil, './/meta', 'name', 'date').attrib['content'])
-
- switch = smil.find('.//switch')
- duration = parse_duration(switch.attrib.get('dur'))
- thumbnail_el = find_xpath_attr(switch, './image', 'type', 'thumbnail')
- thumbnail = (
- None if thumbnail_el is None else thumbnail_el.attrib.get('src'))
-
- formats = []
- for v in switch.findall('./video'):
- proto = v.attrib.get('proto')
- if proto not in ['http', 'rtmp']:
- continue
- f = {
- 'width': int_or_none(v.attrib.get('width')),
- 'height': int_or_none(v.attrib.get('height')),
- 'filesize': int_or_none(v.attrib.get('size')),
- 'tbr': int_or_none(v.attrib.get('systemBitrate')) / 1000.0,
- 'ext': v.attrib.get('ext'),
- }
- src = v.attrib['src']
- if proto == 'http':
- if self._is_valid_url(src, video_id):
- f['url'] = src
- formats.append(f)
- elif proto == 'rtmp':
- f.update({
- 'url': v.attrib['streamer'],
- 'play_path': src,
- 'rtmp_real_time': True,
- })
- formats.append(f)
- self._sort_formats(formats)
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'upload_date': upload_date,
- 'duration': duration,
- 'thumbnail': thumbnail,
- 'formats': formats,
- }
diff --git a/youtube_dl/extractor/videomega.py b/youtube_dl/extractor/videomega.py
index 78ff6310a..87aca327b 100644
--- a/youtube_dl/extractor/videomega.py
+++ b/youtube_dl/extractor/videomega.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
class VideoMegaIE(InfoExtractor):
@@ -30,7 +30,7 @@ class VideoMegaIE(InfoExtractor):
video_id = self._match_id(url)
iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id
- req = compat_urllib_request.Request(iframe_url)
+ req = sanitized_Request(iframe_url)
req.add_header('Referer', url)
req.add_header('Cookie', 'noadvtday=0')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/videoweed.py b/youtube_dl/extractor/videoweed.py
deleted file mode 100644
index ca2e50935..000000000
--- a/youtube_dl/extractor/videoweed.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from __future__ import unicode_literals
-
-from .novamov import NovaMovIE
-
-
-class VideoWeedIE(NovaMovIE):
- IE_NAME = 'videoweed'
- IE_DESC = 'VideoWeed'
-
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'}
-
- _HOST = 'www.videoweed.es'
-
- _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
- _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>'
-
- _TEST = {
- 'url': 'http://www.videoweed.es/file/b42178afbea14',
- 'md5': 'abd31a2132947262c50429e1d16c1bfd',
- 'info_dict': {
- 'id': 'b42178afbea14',
- 'ext': 'flv',
- 'title': 'optical illusion dissapeared image magic illusion',
- 'description': ''
- },
- }
diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py
index 157bb74fe..3d63ed4f0 100644
--- a/youtube_dl/extractor/vidme.py
+++ b/youtube_dl/extractor/vidme.py
@@ -1,10 +1,12 @@
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import compat_HTTPError
from ..utils import (
+ ExtractorError,
int_or_none,
float_or_none,
- str_to_int,
+ parse_iso8601,
)
@@ -18,12 +20,35 @@ class VidmeIE(InfoExtractor):
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
- 'duration': 119.92,
+ 'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
+ 'age_limit': 0,
+ 'duration': 119.92,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ }, {
+ 'url': 'https://vid.me/Gc6M',
+ 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+ 'info_dict': {
+ 'id': 'Gc6M',
+ 'ext': 'mp4',
+ 'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1441211642,
+ 'upload_date': '20150902',
+ 'uploader': 'SunshineM',
+ 'uploader_id': '3552827',
+ 'age_limit': 0,
+ 'duration': 223.72,
'view_count': int,
'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
},
}, {
# tests uploader field
@@ -33,63 +58,147 @@ class VidmeIE(InfoExtractor):
'ext': 'mp4',
'title': 'The Carver',
'description': 'md5:e9c24870018ae8113be936645b93ba3c',
- 'duration': 97.859999999999999,
+ 'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1433203629,
'upload_date': '20150602',
'uploader': 'Thomas',
- 'thumbnail': 're:^https?://.*\.jpg',
+ 'uploader_id': '109747',
+ 'age_limit': 0,
+ 'duration': 97.859999999999999,
'view_count': int,
'like_count': int,
+ 'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
- # From http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
+ # nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
+ 'info_dict': {
+ 'id': 'Wmur',
+ 'ext': 'mp4',
+ 'title': 'naked smoking & stretching',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1430931613,
+ 'upload_date': '20150506',
+ 'uploader': 'naked-yogi',
+ 'uploader_id': '1638622',
+ 'age_limit': 18,
+ 'duration': 653.26999999999998,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # nsfw, user-disabled
+ 'url': 'https://vid.me/dzGJ',
+ 'only_matching': True,
+ }, {
+ # suspended
+ 'url': 'https://vid.me/Ox3G',
'only_matching': True,
+ }, {
+ # deleted
+ 'url': 'https://vid.me/KTPm',
+ 'only_matching': True,
+ }, {
+ # no formats in the API response
+ 'url': 'https://vid.me/e5g',
+ 'info_dict': {
+ 'id': 'e5g',
+ 'ext': 'mp4',
+ 'title': 'Video upload (e5g)',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1401480195,
+ 'upload_date': '20140530',
+ 'uploader': None,
+ 'uploader_id': None,
+ 'age_limit': 0,
+ 'duration': 483,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
- url = url.replace('vid.me/e/', 'vid.me/')
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_url = self._html_search_regex(
- r'<source src="([^"]+)"', webpage, 'video URL')
+ try:
+ response = self._download_json(
+ 'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
+ response = self._parse_json(e.cause.read(), video_id)
+ else:
+ raise
+
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error), expected=True)
+
+ video = response['video']
+
+ if video.get('state') == 'deleted':
+ raise ExtractorError(
+ 'Vidme said: Sorry, this video has been deleted.',
+ expected=True)
- title = self._og_search_title(webpage)
- description = self._og_search_description(webpage, default='')
- thumbnail = self._og_search_thumbnail(webpage)
- timestamp = int_or_none(self._og_search_property(
- 'updated_time', webpage, fatal=False))
- width = int_or_none(self._og_search_property(
- 'video:width', webpage, fatal=False))
- height = int_or_none(self._og_search_property(
- 'video:height', webpage, fatal=False))
- duration = float_or_none(self._html_search_regex(
- r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
- view_count = str_to_int(self._html_search_regex(
- r'<(?:li|span) class="video_views">\s*([\d,\.]+)\s*plays?',
- webpage, 'view count', fatal=False))
- like_count = str_to_int(self._html_search_regex(
- r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
- webpage, 'like count', fatal=False))
- uploader = self._html_search_regex(
- 'class="video_author_username"[^>]*>([^<]+)',
- webpage, 'uploader', default=None)
+ if video.get('state') in ('user-disabled', 'suspended'):
+ raise ExtractorError(
+ 'Vidme said: This video has been suspended either due to a copyright claim, '
+ 'or for violating the terms of use.',
+ expected=True)
+
+ formats = [{
+ 'format_id': f.get('type'),
+ 'url': f['uri'],
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ 'preference': 0 if f.get('type', '').endswith('clip') else 1,
+ } for f in video.get('formats', []) if f.get('uri')]
+
+ if not formats and video.get('complete_url'):
+ formats.append({
+ 'url': video.get('complete_url'),
+ 'width': int_or_none(video.get('width')),
+ 'height': int_or_none(video.get('height')),
+ })
+
+ self._sort_formats(formats)
+
+ title = video['title']
+ description = video.get('description')
+ thumbnail = video.get('thumbnail_url')
+ timestamp = parse_iso8601(video.get('date_created'), ' ')
+ uploader = video.get('user', {}).get('username')
+ uploader_id = video.get('user', {}).get('user_id')
+ age_limit = 18 if video.get('nsfw') is True else 0
+ duration = float_or_none(video.get('duration'))
+ view_count = int_or_none(video.get('view_count'))
+ like_count = int_or_none(video.get('likes_count'))
+ comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
- 'url': video_url,
- 'title': title,
+ 'title': title or 'Video upload (%s)' % video_id,
'description': description,
'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
'timestamp': timestamp,
- 'width': width,
- 'height': height,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
- 'uploader': uploader,
+ 'comment_count': comment_count,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py
index 08a5a7b8d..2ba9f31df 100644
--- a/youtube_dl/extractor/vidzi.py
+++ b/youtube_dl/extractor/vidzi.py
@@ -20,8 +20,14 @@ class VidziIE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- video_url = self._html_search_regex(
- r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url')
+ video_host = self._html_search_regex(
+ r'id=\'vplayer\'><img src="http://(.*?)/i', webpage,
+ 'video host')
+ video_hash = self._html_search_regex(
+ r'\|([a-z0-9]+)\|hls\|type', webpage, 'video_hash')
+ ext = self._html_search_regex(
+ r'\|tracks\|([a-z0-9]+)\|', webpage, 'video ext')
+ video_url = 'http://' + video_host + '/' + video_hash + '/v.' + ext
title = self._html_search_regex(
r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
diff --git a/youtube_dl/extractor/vier.py b/youtube_dl/extractor/vier.py
index 15377097e..c76c20614 100644
--- a/youtube_dl/extractor/vier.py
+++ b/youtube_dl/extractor/vier.py
@@ -2,6 +2,7 @@
from __future__ import unicode_literals
import re
+import itertools
from .common import InfoExtractor
@@ -91,31 +92,27 @@ class VierVideosIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
program = mobj.group('program')
- webpage = self._download_webpage(url, program)
-
page_id = mobj.group('page')
if page_id:
page_id = int(page_id)
start_page = page_id
- last_page = start_page + 1
playlist_id = '%s-page%d' % (program, page_id)
else:
start_page = 0
- last_page = int(self._search_regex(
- r'videos\?page=(\d+)">laatste</a>',
- webpage, 'last page', default=0)) + 1
playlist_id = program
entries = []
- for current_page_id in range(start_page, last_page):
+ for current_page_id in itertools.count(start_page):
current_page = self._download_webpage(
'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id),
program,
- 'Downloading page %d' % (current_page_id + 1)) if current_page_id != page_id else webpage
+ 'Downloading page %d' % (current_page_id + 1))
page_entries = [
self.url_result('http://www.vier.be' + video_url, 'Vier')
for video_url in re.findall(
r'<h3><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
entries.extend(page_entries)
+ if page_id or '>Meer<' not in current_page:
+ break
return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py
index cda02ba24..185b1c119 100644
--- a/youtube_dl/extractor/viewster.py
+++ b/youtube_dl/extractor/viewster.py
@@ -3,27 +3,29 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_urllib_request,
+ compat_HTTPError,
compat_urllib_parse,
compat_urllib_parse_unquote,
)
from ..utils import (
determine_ext,
+ ExtractorError,
int_or_none,
parse_iso8601,
+ sanitized_Request,
HEADRequest,
)
class ViewsterIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)'
+ _VALID_URL = r'https?://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)'
_TESTS = [{
# movie, Type=Movie
'url': 'http://www.viewster.com/movie/1140-11855-000/the-listening-project/',
- 'md5': '14d3cfffe66d57b41ae2d9c873416f01',
+ 'md5': 'e642d1b27fcf3a4ffa79f194f5adde36',
'info_dict': {
'id': '1140-11855-000',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'The listening Project',
'description': 'md5:bac720244afd1a8ea279864e67baa071',
'timestamp': 1214870400,
@@ -33,10 +35,10 @@ class ViewsterIE(InfoExtractor):
}, {
# series episode, Type=Episode
'url': 'http://www.viewster.com/serie/1284-19427-001/the-world-and-a-wall/',
- 'md5': 'd5434c80fcfdb61651cc2199a88d6ba3',
+ 'md5': '9243079a8531809efe1b089db102c069',
'info_dict': {
'id': '1284-19427-001',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'The World and a Wall',
'description': 'md5:24814cf74d3453fdf5bfef9716d073e3',
'timestamp': 1428192000,
@@ -61,12 +63,20 @@ class ViewsterIE(InfoExtractor):
'description': 'md5:e7097a8fc97151e25f085c9eb7a1cdb1',
},
'playlist_mincount': 16,
+ }, {
+ # geo restricted series
+ 'url': 'https://www.viewster.com/serie/1280-18794-002/',
+ 'only_matching': True,
+ }, {
+ # geo restricted video
+ 'url': 'https://www.viewster.com/serie/1280-18794-002/what-is-extraterritoriality-lawo/',
+ 'only_matching': True,
}]
_ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01'
def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True):
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
request.add_header('Accept', self._ACCEPT_HEADER)
request.add_header('Auth-token', self._AUTH_TOKEN)
return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal)
@@ -74,8 +84,8 @@ class ViewsterIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
# Get 'api_token' cookie
- self._request_webpage(HEADRequest(url), video_id)
- cookies = self._get_cookies(url)
+ self._request_webpage(HEADRequest('http://www.viewster.com/'), video_id)
+ cookies = self._get_cookies('http://www.viewster.com/')
self._AUTH_TOKEN = compat_urllib_parse_unquote(cookies['api_token'].value)
info = self._download_json(
@@ -85,10 +95,16 @@ class ViewsterIE(InfoExtractor):
entry_id = info.get('Id') or info['id']
# unfinished serie has no Type
- if info.get('Type') in ['Serie', None]:
- episodes = self._download_json(
- 'https://public-api.viewster.com/series/%s/episodes' % entry_id,
- video_id, 'Downloading series JSON')
+ if info.get('Type') in ('Serie', None):
+ try:
+ episodes = self._download_json(
+ 'https://public-api.viewster.com/series/%s/episodes' % entry_id,
+ video_id, 'Downloading series JSON')
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
+ self.raise_geo_restricted()
+ else:
+ raise
entries = [
self.url_result(
'http://www.viewster.com/movie/%s' % episode['OriginId'], 'Viewster')
@@ -98,7 +114,7 @@ class ViewsterIE(InfoExtractor):
return self.playlist_result(entries, video_id, title, description)
formats = []
- for media_type in ('application/f4m+xml', 'application/x-mpegURL'):
+ for media_type in ('application/f4m+xml', 'application/x-mpegURL', 'video/mp4'):
media = self._download_json(
'https://public-api.viewster.com/movies/%s/video?mediaType=%s'
% (entry_id, compat_urllib_parse.quote(media_type)),
@@ -115,14 +131,28 @@ class ViewsterIE(InfoExtractor):
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds'))
elif ext == 'm3u8':
- formats.extend(self._extract_m3u8_formats(
+ m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls',
- fatal=False # m3u8 sometimes fail
- ))
+ fatal=False) # m3u8 sometimes fail
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
else:
- formats.append({
+ format_id = media.get('Bitrate')
+ f = {
'url': video_url,
- })
+ 'format_id': 'mp4-%s' % format_id,
+ 'height': int_or_none(media.get('Height')),
+ 'width': int_or_none(media.get('Width')),
+ 'preference': 1,
+ }
+ if format_id and not f['height']:
+ f['height'] = int_or_none(self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None))
+ formats.append(f)
+
+ if not formats and not info.get('LanguageSets') and not info.get('VODSettings'):
+ self.raise_geo_restricted()
+
self._sort_formats(formats)
synopsis = info.get('Synopsis', {})
diff --git a/youtube_dl/extractor/viidea.py b/youtube_dl/extractor/viidea.py
new file mode 100644
index 000000000..525e303d4
--- /dev/null
+++ b/youtube_dl/extractor/viidea.py
@@ -0,0 +1,188 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urlparse,
+ compat_str,
+)
+from ..utils import (
+ parse_duration,
+ js_to_json,
+ parse_iso8601,
+)
+
+
+class ViideaIE(InfoExtractor):
+ _VALID_URL = r'''(?x)http://(?:www\.)?(?:
+ videolectures\.net|
+ flexilearn\.viidea\.net|
+ presentations\.ocwconsortium\.org|
+ video\.travel-zoom\.si|
+ video\.pomp-forum\.si|
+ tv\.nil\.si|
+ video\.hekovnik.com|
+ video\.szko\.si|
+ kpk\.viidea\.com|
+ inside\.viidea\.net|
+ video\.kiberpipa\.org|
+ bvvideo\.si|
+ kongres\.viidea\.net|
+ edemokracija\.viidea\.com
+ )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$'''
+
+ _TESTS = [{
+ 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/',
+ 'info_dict': {
+ 'id': '20171',
+ 'display_id': 'promogram_igor_mekjavic_eng',
+ 'ext': 'mp4',
+ 'title': 'Automatics, robotics and biocybernetics',
+ 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'timestamp': 1372349289,
+ 'upload_date': '20130627',
+ 'duration': 565,
+ },
+ }, {
+ # video with invalid direct format links (HTTP 403)
+ 'url': 'http://videolectures.net/russir2010_filippova_nlp/',
+ 'info_dict': {
+ 'id': '14891',
+ 'display_id': 'russir2010_filippova_nlp',
+ 'ext': 'flv',
+ 'title': 'NLP at Google',
+ 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'timestamp': 1284375600,
+ 'upload_date': '20100913',
+ 'duration': 5352,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # event playlist
+ 'url': 'http://videolectures.net/deeplearning2015_montreal/',
+ 'info_dict': {
+ 'id': '23181',
+ 'title': 'Deep Learning Summer School, Montreal 2015',
+ 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'timestamp': 1438560000,
+ },
+ 'playlist_count': 30,
+ }, {
+ # multi part lecture
+ 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/',
+ 'info_dict': {
+ 'id': '9737',
+ 'display_id': 'mlss09uk_bishop_ibi',
+ 'title': 'Introduction To Bayesian Inference',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'timestamp': 1251622800,
+ },
+ 'playlist': [{
+ 'info_dict': {
+ 'id': '9737_part1',
+ 'display_id': 'mlss09uk_bishop_ibi_part1',
+ 'ext': 'wmv',
+ 'title': 'Introduction To Bayesian Inference (Part 1)',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'duration': 4622,
+ 'timestamp': 1251622800,
+ 'upload_date': '20090830',
+ },
+ }, {
+ 'info_dict': {
+ 'id': '9737_part2',
+ 'display_id': 'mlss09uk_bishop_ibi_part2',
+ 'ext': 'wmv',
+ 'title': 'Introduction To Bayesian Inference (Part 2)',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'duration': 5641,
+ 'timestamp': 1251622800,
+ 'upload_date': '20090830',
+ },
+ }],
+ 'playlist_count': 2,
+ }]
+
+ def _real_extract(self, url):
+ lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups()
+
+ webpage = self._download_webpage(url, lecture_slug)
+
+ cfg = self._parse_json(self._search_regex(
+ [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function',
+ r'cfg\s*:\s*({[^}]+})'],
+ webpage, 'cfg'), lecture_slug, js_to_json)
+
+ lecture_id = compat_str(cfg['obj_id'])
+
+ base_url = self._proto_relative_url(cfg['livepipe'], 'http:')
+
+ lecture_data = self._download_json(
+ '%s/site/api/lecture/%s?format=json' % (base_url, lecture_id),
+ lecture_id)['lecture'][0]
+
+ lecture_info = {
+ 'id': lecture_id,
+ 'display_id': lecture_slug,
+ 'title': lecture_data['title'],
+ 'timestamp': parse_iso8601(lecture_data.get('time')),
+ 'description': lecture_data.get('description_wiki'),
+ 'thumbnail': lecture_data.get('thumb'),
+ }
+
+ playlist_entries = []
+ lecture_type = lecture_data.get('type')
+ parts = [compat_str(video) for video in cfg.get('videos', [])]
+ if parts:
+ multipart = len(parts) > 1
+
+ def extract_part(part_id):
+ smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id)
+ smil = self._download_smil(smil_url, lecture_id)
+ info = self._parse_smil(smil, smil_url, lecture_id)
+ info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id)
+ info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id)
+ if multipart:
+ info['title'] += ' (Part %s)' % part_id
+ switch = smil.find('.//switch')
+ if switch is not None:
+ info['duration'] = parse_duration(switch.attrib.get('dur'))
+ item_info = lecture_info.copy()
+ item_info.update(info)
+ return item_info
+
+ if explicit_part_id or not multipart:
+ result = extract_part(explicit_part_id or parts[0])
+ else:
+ result = {
+ '_type': 'multi_video',
+ 'entries': [extract_part(part) for part in parts],
+ }
+ result.update(lecture_info)
+
+ # Immediately return explicitly requested part or non event item
+ if explicit_part_id or lecture_type != 'evt':
+ return result
+
+ playlist_entries.append(result)
+
+ # It's probably a playlist
+ if not parts or lecture_type == 'evt':
+ playlist_webpage = self._download_webpage(
+ '%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id)
+ entries = [
+ self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea')
+ for _, video_url in re.findall(
+ r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)]
+ playlist_entries.extend(entries)
+
+ playlist = self.playlist_result(playlist_entries, lecture_id)
+ playlist.update(lecture_info)
+ return playlist
diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index ddbd395c8..a63c23617 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -7,14 +7,14 @@ import hmac
import hashlib
import itertools
+from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
+ sanitized_Request,
)
-from ..compat import compat_urllib_request
-from .common import InfoExtractor
class VikiBaseIE(InfoExtractor):
@@ -43,7 +43,7 @@ class VikiBaseIE(InfoExtractor):
hashlib.sha1
).hexdigest()
url = self._API_URL_TEMPLATE % (query, sig)
- return compat_urllib_request.Request(
+ return sanitized_Request(
url, json.dumps(post_data).encode('utf-8')) if post_data else url
def _call_api(self, path, video_id, note, timestamp=None, post_data=None):
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 50df79ca1..ce08e6955 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -8,21 +8,22 @@ import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
- compat_urllib_parse,
- compat_urllib_request,
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
+ sanitized_Request,
smuggle_url,
std_headers,
unified_strdate,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
+ parse_filesize,
)
@@ -39,23 +40,31 @@ class VimeoBaseInfoExtractor(InfoExtractor):
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
- token = self._extract_xsrft(webpage)
- data = urlencode_postdata({
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
+ data = urlencode_postdata(encode_dict({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
- })
- login_request = compat_urllib_request.Request(self._LOGIN_URL, data)
+ }))
+ login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
+ self._set_vimeo_cookie('vuid', vuid)
self._download_webpage(login_request, None, False, 'Wrong login info')
- def _extract_xsrft(self, webpage):
- return self._search_regex(
+ def _extract_xsrft_and_vuid(self, webpage):
+ xsrft = self._search_regex(
r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
+ vuid = self._search_regex(
+ r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
+ webpage, 'vuid', group='vuid')
+ return xsrft, vuid
+
+ def _set_vimeo_cookie(self, name, value):
+ self._set_cookie('vimeo.com', name, value)
class VimeoIE(VimeoBaseInfoExtractor):
@@ -80,12 +89,12 @@ class VimeoIE(VimeoBaseInfoExtractor):
'info_dict': {
'id': '56015672',
'ext': 'mp4',
- "upload_date": "20121220",
- "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
- "uploader_id": "user7108434",
- "uploader": "Filippo Valsorda",
- "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
- "duration": 10,
+ 'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
+ 'description': 'md5:2d3305bad981a06ff79f027f19865021',
+ 'upload_date': '20121220',
+ 'uploader_id': 'user7108434',
+ 'uploader': 'Filippo Valsorda',
+ 'duration': 10,
},
},
{
@@ -98,7 +107,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
- 'description': 'md5:380943ec71b89736ff4bf27183233d09',
+ 'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
@@ -128,7 +137,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
- 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
+ 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
},
'params': {
'videopassword': 'youtube-dl',
@@ -152,7 +161,6 @@ class VimeoIE(VimeoBaseInfoExtractor):
},
{
'url': 'http://vimeo.com/76979871',
- 'md5': '3363dd6ffebe3784d56f4132317fd446',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
@@ -177,6 +185,29 @@ class VimeoIE(VimeoBaseInfoExtractor):
'uploader_id': 'user28849593',
},
},
+ {
+ # contains original format
+ 'url': 'https://vimeo.com/33951933',
+ 'md5': '53c688fa95a55bf4b7293d37a89c5c53',
+ 'info_dict': {
+ 'id': '33951933',
+ 'ext': 'mp4',
+ 'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute',
+ 'uploader': 'The DMCI',
+ 'uploader_id': 'dmci',
+ 'upload_date': '20111220',
+ 'description': 'md5:ae23671e82d05415868f7ad1aec21147',
+ },
+ },
+ {
+ 'url': 'https://vimeo.com/109815029',
+ 'note': 'Video not completely processed, "failed" seed status',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
+ 'only_matching': True,
+ },
]
@staticmethod
@@ -198,17 +229,18 @@ class VimeoIE(VimeoBaseInfoExtractor):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
- token = self._extract_xsrft(webpage)
- data = urlencode_postdata({
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
+ data = urlencode_postdata(encode_dict({
'password': password,
'token': token,
- })
+ }))
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
- password_request = compat_urllib_request.Request(url + '/password', data)
+ password_request = sanitized_Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
+ self._set_vimeo_cookie('vuid', vuid)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
@@ -217,9 +249,9 @@ class VimeoIE(VimeoBaseInfoExtractor):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
- data = compat_urllib_parse.urlencode({'password': password})
+ data = urlencode_postdata(encode_dict({'password': password}))
pass_url = url + '/check-password'
- password_request = compat_urllib_request.Request(pass_url, data)
+ password_request = sanitized_Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
return self._download_json(
password_request, video_id,
@@ -248,7 +280,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url, None, headers)
+ request = sanitized_Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
@@ -268,20 +300,30 @@ class VimeoIE(VimeoBaseInfoExtractor):
self.report_extraction(video_id)
vimeo_config = self._search_regex(
- r'vimeo\.config\s*=\s*({.+?});', webpage,
+ r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage,
'vimeo config', default=None)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
- '%s returned error: %s' % (self.IE_NAME, seed_status['title']),
+ '%s said: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
- r' data-config-url="(.+?)"', webpage, 'config URL')
+ r' data-config-url="(.+?)"', webpage,
+ 'config URL', default=None)
+ if not config_url:
+ # Sometimes new react-based page is served instead of old one that require
+ # different config URL extraction approach (see
+ # https://github.com/rg3/youtube-dl/pull/7209)
+ vimeo_clip_page_config = self._search_regex(
+ r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage,
+ 'vimeo clip page config')
+ config_url = self._parse_json(
+ vimeo_clip_page_config, video_id)['player']['config_url']
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
@@ -364,41 +406,44 @@ class VimeoIE(VimeoBaseInfoExtractor):
like_count = None
comment_count = None
- # Vimeo specific: extract request signature and timestamp
- sig = config['request']['signature']
- timestamp = config['request']['timestamp']
-
- # Vimeo specific: extract video codec and quality information
- # First consider quality, then codecs, then take everything
- codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
- files = {'hd': [], 'sd': [], 'other': []}
- config_files = config["video"].get("files") or config["request"].get("files")
- for codec_name, codec_extension in codecs:
- for quality in config_files.get(codec_name, []):
- format_id = '-'.join((codec_name, quality)).lower()
- key = quality if quality in files else 'other'
- video_url = None
- if isinstance(config_files[codec_name], dict):
- file_info = config_files[codec_name][quality]
- video_url = file_info.get('url')
- else:
- file_info = {}
- if video_url is None:
- video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
- % (video_id, sig, timestamp, quality, codec_name.upper())
-
- files[key].append({
- 'ext': codec_extension,
- 'url': video_url,
- 'format_id': format_id,
- 'width': file_info.get('width'),
- 'height': file_info.get('height'),
- })
formats = []
- for key in ('other', 'sd', 'hd'):
- formats += files[key]
- if len(formats) == 0:
- raise ExtractorError('No known codec found')
+ download_request = sanitized_Request('https://vimeo.com/%s?action=load_download_config' % video_id, headers={
+ 'X-Requested-With': 'XMLHttpRequest'})
+ download_data = self._download_json(download_request, video_id, fatal=False)
+ if download_data:
+ source_file = download_data.get('source_file')
+ if source_file and not source_file.get('is_cold') and not source_file.get('is_defrosting'):
+ formats.append({
+ 'url': source_file['download_url'],
+ 'ext': source_file['extension'].lower(),
+ 'width': int_or_none(source_file.get('width')),
+ 'height': int_or_none(source_file.get('height')),
+ 'filesize': parse_filesize(source_file.get('size')),
+ 'format_id': source_file.get('public_name', 'Original'),
+ 'preference': 1,
+ })
+ config_files = config['video'].get('files') or config['request'].get('files', {})
+ for f in config_files.get('progressive', []):
+ video_url = f.get('url')
+ if not video_url:
+ continue
+ formats.append({
+ 'url': video_url,
+ 'format_id': 'http-%s' % f.get('quality'),
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ 'fps': int_or_none(f.get('fps')),
+ 'tbr': int_or_none(f.get('bitrate')),
+ })
+ m3u8_url = config_files.get('hls', {}).get('url')
+ if m3u8_url:
+ m3u8_formats = self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+ # Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
+ # at the same time without actual units specified. This lead to wrong sorting.
+ self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id'))
subtitles = {}
text_tracks = config['request'].get('text_tracks')
@@ -459,23 +504,23 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
- token = self._extract_xsrft(webpage)
+ token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
- post = urlencode_postdata(fields)
+ post = urlencode_postdata(encode_dict(fields))
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
- password_request = compat_urllib_request.Request(password_url, post)
+ password_request = sanitized_Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
- self._set_cookie('vimeo.com', 'xsrft', token)
+ self._set_vimeo_cookie('vuid', vuid)
+ self._set_vimeo_cookie('xsrft', token)
return self._download_webpage(
password_request, list_id,
'Verifying the password', 'Wrong password')
- def _extract_videos(self, list_id, base_url):
- video_ids = []
+ def _title_and_entries(self, list_id, base_url):
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
@@ -484,18 +529,18 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
+ yield self._extract_list_title(webpage)
+
+ for video_id in re.findall(r'id="clip_(\d+?)"', webpage):
+ yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
- video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
- entries = [self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
- for video_id in video_ids]
- return {'_type': 'playlist',
- 'id': list_id,
- 'title': self._extract_list_title(webpage),
- 'entries': entries,
- }
+ def _extract_videos(self, list_id, base_url):
+ title_and_entries = self._title_and_entries(list_id, base_url)
+ list_title = next(title_and_entries)
+ return self.playlist_result(title_and_entries, list_id, list_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -556,7 +601,7 @@ class VimeoAlbumIE(VimeoChannelIE):
class VimeoGroupsIE(VimeoAlbumIE):
IE_NAME = 'vimeo:group'
- _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)'
+ _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)(?:/(?!videos?/\d+)|$)'
_TESTS = [{
'url': 'https://vimeo.com/groups/rolexawards',
'info_dict': {
@@ -625,7 +670,7 @@ class VimeoWatchLaterIE(VimeoChannelIE):
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
- request = compat_urllib_request.Request(url)
+ request = sanitized_Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py
index c733a48fa..cb2a4b0b5 100644
--- a/youtube_dl/extractor/vine.py
+++ b/youtube_dl/extractor/vine.py
@@ -1,10 +1,14 @@
+# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+ int_or_none,
+ unified_strdate,
+)
class VineIE(InfoExtractor):
@@ -17,10 +21,12 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': 'Chicken.',
'alt_title': 'Vine by Jack Dorsey',
- 'description': 'Chicken.',
'upload_date': '20130519',
'uploader': 'Jack Dorsey',
'uploader_id': '76',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/v/MYxVapFvz2z',
@@ -29,11 +35,13 @@ class VineIE(InfoExtractor):
'id': 'MYxVapFvz2z',
'ext': 'mp4',
'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
- 'alt_title': 'Vine by Luna',
- 'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14',
+ 'alt_title': 'Vine by Mars Ruiz',
'upload_date': '20140815',
- 'uploader': 'Luna',
+ 'uploader': 'Mars Ruiz',
'uploader_id': '1102363502380728320',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/v/bxVjBbZlPUH',
@@ -43,14 +51,33 @@ class VineIE(InfoExtractor):
'ext': 'mp4',
'title': '#mw3 #ac130 #killcam #angelofdeath',
'alt_title': 'Vine by Z3k3',
- 'description': '#mw3 #ac130 #killcam #angelofdeath',
'upload_date': '20130430',
'uploader': 'Z3k3',
'uploader_id': '936470460173008896',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
},
}, {
'url': 'https://vine.co/oembed/MYxVapFvz2z.json',
'only_matching': True,
+ }, {
+ 'url': 'https://vine.co/v/e192BnZnZ9V',
+ 'info_dict': {
+ 'id': 'e192BnZnZ9V',
+ 'ext': 'mp4',
+ 'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2',
+ 'alt_title': 'Vine by Pimry_zaa',
+ 'upload_date': '20150705',
+ 'uploader': 'Pimry_zaa',
+ 'uploader_id': '1135760698325307392',
+ 'like_count': int,
+ 'comment_count': int,
+ 'repost_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
@@ -58,32 +85,33 @@ class VineIE(InfoExtractor):
webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id)
data = self._parse_json(
- self._html_search_regex(
- r'window\.POST_DATA = { %s: ({.+?}) };\s*</script>' % video_id,
+ self._search_regex(
+ r'window\.POST_DATA\s*=\s*{\s*%s\s*:\s*({.+?})\s*};\s*</script>' % video_id,
webpage, 'vine data'),
video_id)
formats = [{
'format_id': '%(format)s-%(rate)s' % f,
- 'vcodec': f['format'],
- 'quality': f['rate'],
+ 'vcodec': f.get('format'),
+ 'quality': f.get('rate'),
'url': f['videoUrl'],
- } for f in data['videoUrls']]
+ } for f in data['videoUrls'] if f.get('videoUrl')]
self._sort_formats(formats)
+ username = data.get('username')
+
return {
'id': video_id,
- 'title': self._og_search_title(webpage),
- 'alt_title': self._og_search_description(webpage, default=None),
- 'description': data['description'],
- 'thumbnail': data['thumbnailUrl'],
- 'upload_date': unified_strdate(data['created']),
- 'uploader': data['username'],
- 'uploader_id': data['userIdStr'],
- 'like_count': data['likes']['count'],
- 'comment_count': data['comments']['count'],
- 'repost_count': data['reposts']['count'],
+ 'title': data.get('description') or self._og_search_title(webpage),
+ 'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None),
+ 'thumbnail': data.get('thumbnailUrl'),
+ 'upload_date': unified_strdate(data.get('created')),
+ 'uploader': username,
+ 'uploader_id': data.get('userIdStr'),
+ 'like_count': int_or_none(data.get('likes', {}).get('count')),
+ 'comment_count': int_or_none(data.get('comments', {}).get('count')),
+ 'repost_count': int_or_none(data.get('reposts', {}).get('count')),
'formats': formats,
}
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index c30c5a8e5..90557fa61 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -8,15 +8,17 @@ from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
)
from ..utils import (
ExtractorError,
orderedSet,
+ sanitized_Request,
str_to_int,
unescapeHTML,
unified_strdate,
)
+from .vimeo import VimeoIE
+from .pladform import PladformIE
class VKIE(InfoExtractor):
@@ -163,6 +165,11 @@ class VKIE(InfoExtractor):
# vk wrapper
'url': 'http://www.biqle.ru/watch/847655_160197695',
'only_matching': True,
+ },
+ {
+ # pladform embed
+ 'url': 'https://vk.com/video-76116461_171554880',
+ 'only_matching': True,
}
]
@@ -181,7 +188,7 @@ class VKIE(InfoExtractor):
'pass': password.encode('cp1251'),
})
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'https://login.vk.com/?act=login',
compat_urllib_parse.urlencode(login_form).encode('utf-8'))
login_page = self._download_webpage(
@@ -249,10 +256,17 @@ class VKIE(InfoExtractor):
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
+ vimeo_url = VimeoIE._extract_vimeo_url(url, info_page)
+ if vimeo_url is not None:
+ return self.url_result(vimeo_url)
+
+ pladform_url = PladformIE._extract_url(info_page)
+ if pladform_url:
+ return self.url_result(pladform_url)
+
m_rutube = re.search(
r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page)
if m_rutube is not None:
- self.to_screen('rutube video detected')
rutube_url = self._proto_relative_url(
m_rutube.group(1).replace('\\', ''))
return self.url_result(rutube_url)
@@ -276,9 +290,13 @@ class VKIE(InfoExtractor):
mobj.group(1) + ' ' + mobj.group(2)
upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))
- view_count = str_to_int(self._search_regex(
- r'"mv_views_count_number"[^>]*>([\d,.]+) views<',
- info_page, 'view count', fatal=False))
+ view_count = None
+ views = self._html_search_regex(
+ r'"mv_views_count_number"[^>]*>(.+?\bviews?)<',
+ info_page, 'view count', fatal=False)
+ if views:
+ view_count = str_to_int(self._search_regex(
+ r'([\d,.]+)', views, 'view count', fatal=False))
formats = [{
'format_id': k,
diff --git a/youtube_dl/extractor/vodlocker.py b/youtube_dl/extractor/vodlocker.py
index ccf1928b5..357594a11 100644
--- a/youtube_dl/extractor/vodlocker.py
+++ b/youtube_dl/extractor/vodlocker.py
@@ -2,14 +2,15 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
+from ..compat import compat_urllib_parse
+from ..utils import (
+ ExtractorError,
+ sanitized_Request,
)
class VodlockerIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
+ _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
_TESTS = [{
'url': 'http://vodlocker.com/e8wvyzz4sl42',
@@ -26,12 +27,18 @@ class VodlockerIE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
+ if any(p in webpage for p in (
+ '>THIS FILE WAS DELETED<',
+ '>File Not Found<',
+ 'The file you were looking for could not be found, sorry for any inconvenience.<')):
+ raise ExtractorError('Video %s does not exist' % video_id, expected=True)
+
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
self._sleep(3, video_id) # they do detect when requests happen too fast!
post = compat_urllib_parse.urlencode(fields)
- req = compat_urllib_request.Request(url, post)
+ req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
req, video_id, 'Downloading video page')
diff --git a/youtube_dl/extractor/voicerepublic.py b/youtube_dl/extractor/voicerepublic.py
index 254383d6c..93d15a556 100644
--- a/youtube_dl/extractor/voicerepublic.py
+++ b/youtube_dl/extractor/voicerepublic.py
@@ -3,14 +3,12 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
+ sanitized_Request,
)
@@ -37,7 +35,7 @@ class VoiceRepublicIE(InfoExtractor):
def _real_extract(self, url):
display_id = self._match_id(url)
- req = compat_urllib_request.Request(
+ req = sanitized_Request(
compat_urlparse.urljoin(url, '/talks/%s' % display_id))
# Older versions of Firefox get redirected to an "upgrade browser" page
req.add_header('User-Agent', 'youtube-dl')
diff --git a/youtube_dl/extractor/washingtonpost.py b/youtube_dl/extractor/washingtonpost.py
index 72eb010f8..ec8b99998 100644
--- a/youtube_dl/extractor/washingtonpost.py
+++ b/youtube_dl/extractor/washingtonpost.py
@@ -19,25 +19,25 @@ class WashingtonPostIE(InfoExtractor):
'title': 'Sinkhole of bureaucracy',
},
'playlist': [{
- 'md5': '79132cc09ec5309fa590ae46e4cc31bc',
+ 'md5': 'b9be794ceb56c7267d410a13f99d801a',
'info_dict': {
'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'Breaking Points: The Paper Mine',
- 'duration': 1287,
+ 'duration': 1290,
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
'uploader': 'The Washington Post',
'timestamp': 1395527908,
'upload_date': '20140322',
},
}, {
- 'md5': 'e1d5734c06865cc504ad99dc2de0d443',
+ 'md5': '1fff6a689d8770966df78c8cb6c8c17c',
'info_dict': {
'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'The town bureaucracy sustains',
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
- 'duration': 2217,
+ 'duration': 2220,
'timestamp': 1395528005,
'upload_date': '20140322',
'uploader': 'The Washington Post',
diff --git a/youtube_dl/extractor/wdr.py b/youtube_dl/extractor/wdr.py
index b46802306..ef096cbd2 100644
--- a/youtube_dl/extractor/wdr.py
+++ b/youtube_dl/extractor/wdr.py
@@ -10,8 +10,8 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
- determine_ext,
unified_strdate,
+ qualities,
)
@@ -33,6 +33,7 @@ class WDRIE(InfoExtractor):
'params': {
'skip_download': True,
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html',
@@ -47,6 +48,7 @@ class WDRIE(InfoExtractor):
'params': {
'skip_download': True,
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html',
@@ -71,6 +73,7 @@ class WDRIE(InfoExtractor):
'upload_date': '20140717',
'is_live': False
},
+ 'skip': 'Page Not Found',
},
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html',
@@ -83,10 +86,10 @@ class WDRIE(InfoExtractor):
'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html',
'info_dict': {
'id': 'mdb-103364',
- 'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'title': 're:^WDR Fernsehen Live [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9',
'ext': 'flv',
- 'upload_date': '20150212',
+ 'upload_date': '20150101',
'is_live': True
},
'params': {
@@ -150,25 +153,52 @@ class WDRIE(InfoExtractor):
if upload_date:
upload_date = unified_strdate(upload_date)
+ formats = []
+ preference = qualities(['S', 'M', 'L', 'XL'])
+
if video_url.endswith('.f4m'):
- video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18'
- ext = 'flv'
+ f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', page_id, f4m_id='hds', fatal=False)
+ if f4m_formats:
+ formats.extend(f4m_formats)
elif video_url.endswith('.smil'):
- fmt = self._extract_smil_formats(video_url, page_id)[0]
- video_url = fmt['url']
- sep = '&' if '?' in video_url else '?'
- video_url += sep
- video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43'
- ext = fmt['ext']
+ smil_formats = self._extract_smil_formats(video_url, page_id, False, {
+ 'hdcore': '3.3.0',
+ 'plugin': 'aasp-3.3.0.99.43',
+ })
+ if smil_formats:
+ formats.extend(smil_formats)
else:
- ext = determine_ext(video_url)
+ formats.append({
+ 'url': video_url,
+ 'http_headers': {
+ 'User-Agent': 'mobile',
+ },
+ })
+
+ m3u8_url = self._search_regex(r'rel="adaptiv"[^>]+href="([^"]+)"', webpage, 'm3u8 url', default=None)
+ if m3u8_url:
+ m3u8_formats = self._extract_m3u8_formats(m3u8_url, page_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
+ if m3u8_formats:
+ formats.extend(m3u8_formats)
+
+ direct_urls = re.findall(r'rel="web(S|M|L|XL)"[^>]+href="([^"]+)"', webpage)
+ if direct_urls:
+ for quality, video_url in direct_urls:
+ formats.append({
+ 'url': video_url,
+ 'preference': preference(quality),
+ 'http_headers': {
+ 'User-Agent': 'mobile',
+ },
+ })
+
+ self._sort_formats(formats)
description = self._html_search_meta('Description', webpage, 'description')
return {
'id': page_id,
- 'url': video_url,
- 'ext': ext,
+ 'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py
index f69d46a28..041ff6c55 100644
--- a/youtube_dl/extractor/wimp.py
+++ b/youtube_dl/extractor/wimp.py
@@ -1,52 +1,50 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/'
+ _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
- 'md5': 'f1acced123ecb28d9bb79f2479f2b6a1',
+ 'md5': 'ee21217ffd66d058e8b16be340b74883',
'info_dict': {
'id': 'maruexhausted',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
- # youtube video
'url': 'http://www.wimp.com/clowncar/',
+ 'md5': '4e2986c793694b55b37cf92521d12bb4',
'info_dict': {
- 'id': 'cG4CEr2aiSg',
+ 'id': 'clowncar',
'ext': 'mp4',
- 'title': 'Basset hound clown car...incredible!',
- 'description': 'md5:8d228485e0719898c017203f900b3a35',
- 'uploader': 'Gretchen Hoey',
- 'uploader_id': 'gretchenandjeff1',
- 'upload_date': '20140303',
+ 'title': 'It\'s like a clown car.',
+ 'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2',
},
- 'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = self._match_id(url)
+
webpage = self._download_webpage(url, video_id)
- video_url = self._search_regex(
- [r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
- webpage, 'video URL')
- if YoutubeIE.suitable(video_url):
- self.to_screen('Found YouTube video')
+
+ youtube_id = self._search_regex(
+ r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']",
+ webpage, 'video URL', default=None)
+ if youtube_id:
return {
'_type': 'url',
- 'url': video_url,
+ 'url': youtube_id,
'ie_key': YoutubeIE.ie_key(),
}
+ video_url = self._search_regex(
+ r'<video[^>]+>\s*<source[^>]+src=(["\'])(?P<url>.+?)\1',
+ webpage, 'video URL', group='url')
+
return {
'id': video_id,
'url': video_url,
diff --git a/youtube_dl/extractor/wistia.py b/youtube_dl/extractor/wistia.py
index 13a079151..fdb16d91c 100644
--- a/youtube_dl/extractor/wistia.py
+++ b/youtube_dl/extractor/wistia.py
@@ -1,8 +1,10 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_request
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ sanitized_Request,
+)
class WistiaIE(InfoExtractor):
@@ -23,7 +25,7 @@ class WistiaIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- request = compat_urllib_request.Request(self._API_URL.format(video_id))
+ request = sanitized_Request(self._API_URL.format(video_id))
request.add_header('Referer', url) # Some videos require this.
data_json = self._download_json(request, video_id)
if data_json.get('error'):
diff --git a/youtube_dl/extractor/wsj.py b/youtube_dl/extractor/wsj.py
index 2ddf29a69..5a897371d 100644
--- a/youtube_dl/extractor/wsj.py
+++ b/youtube_dl/extractor/wsj.py
@@ -84,6 +84,5 @@ class WSJIE(InfoExtractor):
'duration': duration,
'upload_date': upload_date,
'title': title,
- 'formats': formats,
'categories': categories,
}
diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/xfileshare.py
index d23e3eac1..a3236e66c 100644
--- a/youtube_dl/extractor/gorillavid.py
+++ b/youtube_dl/extractor/xfileshare.py
@@ -1,25 +1,23 @@
-# -*- coding: utf-8 -*-
+# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse
from ..utils import (
ExtractorError,
encode_dict,
int_or_none,
+ sanitized_Request,
)
-class GorillaVidIE(InfoExtractor):
- IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com'
+class XFileShareIE(InfoExtractor):
+ IE_DESC = 'XFileShare based sites: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net, filehoot.com and vidto.me'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
- (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com))/
+ (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com|vidto\.me))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
@@ -76,6 +74,13 @@ class GorillaVidIE(InfoExtractor):
'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
'thumbnail': 're:http://.*\.jpg',
}
+ }, {
+ 'url': 'http://vidto.me/ku5glz52nqe1.html',
+ 'info_dict': {
+ 'id': 'ku5glz52nqe1',
+ 'ext': 'mp4',
+ 'title': 'test'
+ }
}]
def _real_extract(self, url):
@@ -99,18 +104,23 @@ class GorillaVidIE(InfoExtractor):
post = compat_urllib_parse.urlencode(encode_dict(fields))
- req = compat_urllib_request.Request(url, post)
+ req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id, 'Downloading video page')
- title = self._search_regex(
- [r'style="z-index: [0-9]+;">([^<]+)</span>', r'<td nowrap>([^<]+)</td>', r'>Watch (.+) '],
- webpage, 'title', default=None) or self._og_search_title(webpage)
+ title = (self._search_regex(
+ [r'style="z-index: [0-9]+;">([^<]+)</span>',
+ r'<td nowrap>([^<]+)</td>',
+ r'>Watch (.+) ',
+ r'<h2 class="video-page-head">([^<]+)</h2>'],
+ webpage, 'title', default=None) or self._og_search_title(webpage)).strip()
video_url = self._search_regex(
- r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
+ [r'file\s*:\s*["\'](http[^"\']+)["\'],',
+ r'file_link\s*=\s*\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'],
+ webpage, 'file url')
thumbnail = self._search_regex(
- r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False)
+ r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
formats = [{
'format_id': 'sd',
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index 97315750f..8938c0e45 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -63,7 +63,9 @@ class XHamsterIE(InfoExtractor):
mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo)
webpage = self._download_webpage(mrss_url, video_id)
- title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, 'title')
+ title = self._html_search_regex(
+ [r'<title>(?P<title>.+?)(?:, (?:[^,]+? )?Porn: xHamster| - xHamster\.com)</title>',
+ r'<h1>([^<]+)</h1>'], webpage, 'title')
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py
index 779e4f46a..a1fe24050 100644
--- a/youtube_dl/extractor/xtube.py
+++ b/youtube_dl/extractor/xtube.py
@@ -3,12 +3,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_request,
- compat_urllib_parse_unquote,
-)
+from ..compat import compat_urllib_parse_unquote
from ..utils import (
parse_duration,
+ sanitized_Request,
str_to_int,
)
@@ -32,7 +30,7 @@ class XTubeIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- req = compat_urllib_request.Request(url)
+ req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/xuite.py b/youtube_dl/extractor/xuite.py
index 5aac8adb3..8bbac54e2 100644
--- a/youtube_dl/extractor/xuite.py
+++ b/youtube_dl/extractor/xuite.py
@@ -19,7 +19,7 @@ class XuiteIE(InfoExtractor):
_TESTS = [{
# Audio
'url': 'http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2',
- 'md5': '63a42c705772aa53fd4c1a0027f86adf',
+ 'md5': 'e79284c87b371424885448d11f6398c8',
'info_dict': {
'id': '3860914',
'ext': 'mp3',
diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py
index 5dcf2fdd1..710ad5041 100644
--- a/youtube_dl/extractor/xvideos.py
+++ b/youtube_dl/extractor/xvideos.py
@@ -3,14 +3,12 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_unquote,
- compat_urllib_request,
-)
+from ..compat import compat_urllib_parse_unquote
from ..utils import (
clean_html,
ExtractorError,
determine_ext,
+ sanitized_Request,
)
@@ -48,7 +46,7 @@ class XVideosIE(InfoExtractor):
'url': video_url,
}]
- android_req = compat_urllib_request.Request(url)
+ android_req = sanitized_Request(url)
android_req.add_header('User-Agent', self._ANDROID_USER_AGENT)
android_webpage = self._download_webpage(android_req, video_id, fatal=False)
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index b8579b573..fca5ddc69 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -144,6 +144,17 @@ class YahooIE(InfoExtractor):
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
+ }, {
+ # Query result is embedded in webpage, but explicit request to video API fails with geo restriction
+ 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
+ 'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
+ 'info_dict': {
+ 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
+ 'ext': 'mp4',
+ 'title': 'Communitary - Community Episode 1: Ladders',
+ 'description': 'md5:8fc39608213295748e1e289807838c97',
+ 'duration': 1646,
+ },
}
]
@@ -171,6 +182,19 @@ class YahooIE(InfoExtractor):
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
+ # Query result is often embedded in webpage as JSON. Sometimes explicit requests
+ # to video API results in a failure with geo restriction reason therefore using
+ # embedded query result when present sounds reasonable.
+ config_json = self._search_regex(
+ r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
+ webpage, 'videoplayer applet', default=None)
+ if config_json:
+ config = self._parse_json(config_json, display_id, fatal=False)
+ if config:
+ sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
+ if sapi:
+ return self._extract_info(display_id, sapi, webpage)
+
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
@@ -190,22 +214,10 @@ class YahooIE(InfoExtractor):
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
- def _get_info(self, video_id, display_id, webpage):
- region = self._search_regex(
- r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
- webpage, 'region', fatal=False, default='US')
- data = compat_urllib_parse.urlencode({
- 'protocol': 'http',
- 'region': region,
- })
- query_url = (
- 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
- '{id}?{data}'.format(id=video_id, data=data))
- query_result = self._download_json(
- query_url, display_id, 'Downloading video info')
-
- info = query_result['query']['results']['mediaObj'][0]
+ def _extract_info(self, display_id, query, webpage):
+ info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
+ video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
@@ -231,6 +243,9 @@ class YahooIE(InfoExtractor):
'ext': 'flv',
})
else:
+ if s.get('format') == 'm3u8_playlist':
+ format_info['protocol'] = 'm3u8_native'
+ format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
@@ -264,6 +279,21 @@ class YahooIE(InfoExtractor):
'subtitles': subtitles,
}
+ def _get_info(self, video_id, display_id, webpage):
+ region = self._search_regex(
+ r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+ webpage, 'region', fatal=False, default='US')
+ data = compat_urllib_parse.urlencode({
+ 'protocol': 'http',
+ 'region': region,
+ })
+ query_url = (
+ 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
+ '{id}?{data}'.format(id=video_id, data=data))
+ query_result = self._download_json(
+ query_url, display_id, 'Downloading video info')
+ return self._extract_info(display_id, query_result, webpage)
+
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index 4098e4629..d3cc1a29f 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -8,11 +8,11 @@ from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
- compat_urllib_request,
)
from ..utils import (
int_or_none,
float_or_none,
+ sanitized_Request,
)
@@ -46,6 +46,12 @@ class YandexMusicTrackIE(InfoExtractor):
% (data['host'], key, data['ts'] + data['path'], storage[1]))
def _get_track_info(self, track):
+ thumbnail = None
+ cover_uri = track.get('albums', [{}])[0].get('coverUri')
+ if cover_uri:
+ thumbnail = cover_uri.replace('%%', 'orig')
+ if not thumbnail.startswith('http'):
+ thumbnail = 'http://' + thumbnail
return {
'id': track['id'],
'ext': 'mp3',
@@ -53,6 +59,7 @@ class YandexMusicTrackIE(InfoExtractor):
'title': '%s - %s' % (track['artists'][0]['name'], track['title']),
'filesize': int_or_none(track.get('fileSize')),
'duration': float_or_none(track.get('durationMs'), 1000),
+ 'thumbnail': thumbnail,
}
def _real_extract(self, url):
@@ -147,7 +154,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
if len(tracks) < len(track_ids):
present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')])
missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
- request = compat_urllib_request.Request(
+ request = sanitized_Request(
'https://music.yandex.ru/handlers/track-entries.jsx',
compat_urllib_parse.urlencode({
'entries': ','.join(missing_track_ids),
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 2e81d9223..3a3432be8 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -4,12 +4,13 @@ from __future__ import unicode_literals
import base64
from .common import InfoExtractor
-from ..utils import ExtractorError
-
from ..compat import (
compat_urllib_parse,
compat_ord,
- compat_urllib_request,
+)
+from ..utils import (
+ ExtractorError,
+ sanitized_Request,
)
@@ -24,8 +25,8 @@ class YoukuIE(InfoExtractor):
'''
_TESTS = [{
+ # MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
- 'md5': '5f3af4192eabacc4501508d54a8cabd7',
'info_dict': {
'id': 'XMTc1ODE5Njcy_part1',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
@@ -41,6 +42,7 @@ class YoukuIE(InfoExtractor):
'title': '武媚娘传奇 85',
},
'playlist_count': 11,
+ 'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
@@ -48,7 +50,6 @@ class YoukuIE(InfoExtractor):
'title': '花千骨 04',
},
'playlist_count': 13,
- 'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
@@ -62,7 +63,7 @@ class YoukuIE(InfoExtractor):
},
}]
- def construct_video_urls(self, data1, data2):
+ def construct_video_urls(self, data):
# get sid, token
def yk_t(s1, s2):
ls = list(range(256))
@@ -80,34 +81,24 @@ class YoukuIE(InfoExtractor):
return bytes(s)
sid, token = yk_t(
- b'becaf9be', base64.b64decode(data2['ep'].encode('ascii'))
+ b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii'))
).decode('ascii').split('_')
# get oip
- oip = data2['ip']
-
- # get fileid
- string_ls = list(
- 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890')
- shuffled_string_ls = []
- seed = data1['seed']
- N = len(string_ls)
- for ii in range(N):
- seed = (seed * 0xd3 + 0x754f) % 0x10000
- idx = seed * len(string_ls) // 0x10000
- shuffled_string_ls.append(string_ls[idx])
- del string_ls[idx]
+ oip = data['security']['ip']
fileid_dict = {}
- for format in data1['streamtypes']:
- streamfileid = [
- int(i) for i in data1['streamfileids'][format].strip('*').split('*')]
- fileid = ''.join(
- [shuffled_string_ls[i] for i in streamfileid])
- fileid_dict[format] = fileid[:8] + '%s' + fileid[10:]
+ for stream in data['stream']:
+ format = stream.get('stream_type')
+ fileid = stream['stream_fileid']
+ fileid_dict[format] = fileid
def get_fileid(format, n):
- fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2)
+ number = hex(int(str(n), 10))[2:].upper()
+ if len(number) == 1:
+ number = '0' + number
+ streamfileids = fileid_dict[format]
+ fileid = streamfileids[0:8] + number + streamfileids[10:]
return fileid
# get ep
@@ -122,15 +113,15 @@ class YoukuIE(InfoExtractor):
# generate video_urls
video_urls_dict = {}
- for format in data1['streamtypes']:
+ for stream in data['stream']:
+ format = stream.get('stream_type')
video_urls = []
- for dt in data1['segs'][format]:
- n = str(int(dt['no']))
+ for dt in stream['segs']:
+ n = str(stream['segs'].index(dt))
param = {
- 'K': dt['k'],
+ 'K': dt['key'],
'hd': self.get_hd(format),
'myp': 0,
- 'ts': dt['seconds'],
'ypp': 0,
'ctype': 12,
'ev': 1,
@@ -141,7 +132,7 @@ class YoukuIE(InfoExtractor):
video_url = \
'http://k.youku.com/player/getFlvPath/' + \
'sid/' + sid + \
- '_' + str(int(n) + 1).zfill(2) + \
+ '_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
compat_urllib_parse.urlencode(param)
@@ -152,23 +143,31 @@ class YoukuIE(InfoExtractor):
def get_hd(self, fm):
hd_id_dict = {
+ '3gp': '0',
+ '3gphd': '1',
'flv': '0',
+ 'flvhd': '0',
'mp4': '1',
+ 'mp4hd': '1',
+ 'mp4hd2': '1',
+ 'mp4hd3': '1',
'hd2': '2',
'hd3': '3',
- '3gp': '0',
- '3gphd': '1'
}
return hd_id_dict[fm]
def parse_ext_l(self, fm):
ext_dict = {
+ '3gp': 'flv',
+ '3gphd': 'mp4',
'flv': 'flv',
+ 'flvhd': 'flv',
'mp4': 'mp4',
+ 'mp4hd': 'mp4',
+ 'mp4hd2': 'flv',
+ 'mp4hd3': 'flv',
'hd2': 'flv',
'hd3': 'flv',
- '3gp': 'flv',
- '3gphd': 'mp4'
}
return ext_dict[fm]
@@ -177,9 +176,13 @@ class YoukuIE(InfoExtractor):
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
+ 'flvhd': 'h4',
'mp4': 'h3',
+ 'mp4hd': 'h3',
+ 'mp4hd2': 'h4',
+ 'mp4hd3': 'h4',
'hd2': 'h2',
- 'hd3': 'h1'
+ 'hd3': 'h1',
}
return _dict[fm]
@@ -187,45 +190,46 @@ class YoukuIE(InfoExtractor):
video_id = self._match_id(url)
def retrieve_data(req_url, note):
- req = compat_urllib_request.Request(req_url)
+ headers = {
+ 'Referer': req_url,
+ }
+ self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
+ req = sanitized_Request(req_url, headers=headers)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
req.add_header('Ytdl-request-proxy', cn_verification_proxy)
raw_data = self._download_json(req, video_id, note=note)
- return raw_data['data'][0]
+
+ return raw_data['data']
video_password = self._downloader.params.get('videopassword', None)
# request basic data
- basic_data_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id
+ basic_data_url = "http://play.youku.com/play/get.json?vid=%s&ct=12" % video_id
if video_password:
- basic_data_url += '?password=%s' % video_password
-
- data1 = retrieve_data(
- basic_data_url,
- 'Downloading JSON metadata 1')
- data2 = retrieve_data(
- 'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id,
- 'Downloading JSON metadata 2')
-
- error_code = data1.get('error_code')
- if error_code:
- error = data1.get('error')
- if error is not None and '因版权原因无法观看此视频' in error:
+ basic_data_url += '&pwd=%s' % video_password
+
+ data = retrieve_data(basic_data_url, 'Downloading JSON metadata')
+
+ error = data.get('error')
+ if error:
+ error_note = error.get('note')
+ if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
else:
- msg = 'Youku server reported error %i' % error_code
- if error is not None:
- msg += ': ' + error
+ msg = 'Youku server reported error %i' % error.get('code')
+ if error_note is not None:
+ msg += ': ' + error_note
raise ExtractorError(msg)
- title = data1['title']
+ # get video title
+ title = data['video']['title']
# generate video_urls_dict
- video_urls_dict = self.construct_video_urls(data1, data2)
+ video_urls_dict = self.construct_video_urls(data)
# construct info
entries = [{
@@ -234,10 +238,11 @@ class YoukuIE(InfoExtractor):
'formats': [],
# some formats are not available for all parts, we have to detect
# which one has all
- } for i in range(max(len(v) for v in data1['segs'].values()))]
- for fm in data1['streamtypes']:
+ } for i in range(max(len(v.get('segs')) for v in data['stream']))]
+ for stream in data['stream']:
+ fm = stream.get('stream_type')
video_urls = video_urls_dict[fm]
- for video_url, seg, entry in zip(video_urls, data1['segs'][fm], entries):
+ for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
entry['formats'].append({
'url': video_url,
'format_id': self.get_format_name(fm),
diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py
index 4ba7c36db..dd724085a 100644
--- a/youtube_dl/extractor/youporn.py
+++ b/youtube_dl/extractor/youporn.py
@@ -1,121 +1,171 @@
from __future__ import unicode_literals
-
-import json
import re
-import sys
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_urlparse,
- compat_urllib_request,
-)
from ..utils import (
- ExtractorError,
+ int_or_none,
+ sanitized_Request,
+ str_to_int,
unescapeHTML,
unified_strdate,
)
-from ..aes import (
- aes_decrypt_text
-)
+from ..aes import aes_decrypt_text
class YouPornIE(InfoExtractor):
- _VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
+ _TESTS = [{
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
+ 'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
'info_dict': {
'id': '505835',
+ 'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
'ext': 'mp4',
- 'upload_date': '20101221',
+ 'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?',
+ 'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'Ask Dan And Jennifer',
- 'title': 'Sex Ed: Is It Safe To Masturbate Daily?',
+ 'upload_date': '20101221',
+ 'average_rating': int,
+ 'view_count': int,
+ 'comment_count': int,
+ 'categories': list,
+ 'tags': list,
'age_limit': 18,
- }
- }
+ },
+ }, {
+ # Anonymous User uploader
+ 'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4',
+ 'info_dict': {
+ 'id': '561726',
+ 'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show',
+ 'ext': 'mp4',
+ 'title': 'Big Tits Awesome Brunette On amazing webcam show',
+ 'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Anonymous User',
+ 'upload_date': '20111125',
+ 'average_rating': int,
+ 'view_count': int,
+ 'comment_count': int,
+ 'categories': list,
+ 'tags': list,
+ 'age_limit': 18,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
- url = mobj.group('proto') + 'www.' + mobj.group('url')
+ video_id = mobj.group('id')
+ display_id = mobj.group('display_id')
- req = compat_urllib_request.Request(url)
- req.add_header('Cookie', 'age_verified=1')
- webpage = self._download_webpage(req, video_id)
- age_limit = self._rta_search(webpage)
+ request = sanitized_Request(url)
+ request.add_header('Cookie', 'age_verified=1')
+ webpage = self._download_webpage(request, display_id)
+
+ title = self._search_regex(
+ [r'(?:video_titles|videoTitle)\s*[:=]\s*(["\'])(?P<title>.+?)\1',
+ r'<h1[^>]+class=["\']heading\d?["\'][^>]*>([^<])<'],
+ webpage, 'title', group='title')
- # Get JSON parameters
- json_params = self._search_regex(
- [r'videoJa?son\s*=\s*({.+})',
- r'var\s+currentVideo\s*=\s*new\s+Video\((.+?)\)[,;]'],
- webpage, 'JSON parameters')
- try:
- params = json.loads(json_params)
- except ValueError:
- raise ExtractorError('Invalid JSON')
-
- self.report_extraction(video_id)
- try:
- video_title = params['title']
- upload_date = unified_strdate(params['release_date_f'])
- video_description = params['description']
- video_uploader = params['submitted_by']
- thumbnail = params['thumbnails'][0]['image']
- except KeyError:
- raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
-
- # Get all of the links from the page
- DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
- download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
- webpage, 'download list').strip()
- LINK_RE = r'<a href="([^"]+)">'
- links = re.findall(LINK_RE, download_list_html)
-
- # Get all encrypted links
- encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage)
- for encrypted_link in encrypted_links:
- link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8')
+ links = []
+
+ sources = self._search_regex(
+ r'sources\s*:\s*({.+?})', webpage, 'sources', default=None)
+ if sources:
+ for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources):
+ links.append(link)
+
+ # Fallback #1
+ for _, link in re.findall(
+ r'(?:videoUrl|videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage):
+ links.append(link)
+
+ # Fallback #2, this also contains extra low quality 180p format
+ for _, link in re.findall(r'<a[^>]+href=(["\'])(http.+?)\1[^>]+title=["\']Download [Vv]ideo', webpage):
links.append(link)
+ # Fallback #3, encrypted links
+ for _, encrypted_link in re.findall(
+ r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage):
+ links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8'))
+
formats = []
- for link in links:
- # A link looks like this:
- # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
- # A path looks like this:
- # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
- video_url = unescapeHTML(link)
- path = compat_urllib_parse_urlparse(video_url).path
- format_parts = path.split('/')[4].split('_')[:2]
-
- dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0]
-
- resolution = format_parts[0]
- height = int(resolution[:-len('p')])
- bitrate = int(format_parts[1][:-len('k')])
- format = '-'.join(format_parts) + '-' + dn
-
- formats.append({
+ for video_url in set(unescapeHTML(link) for link in links):
+ f = {
'url': video_url,
- 'format': format,
- 'format_id': format,
- 'height': height,
- 'tbr': bitrate,
- 'resolution': resolution,
- })
-
+ }
+ # Video URL's path looks like this:
+ # /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4
+ # We will benefit from it by extracting some metadata
+ mobj = re.search(r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+/', video_url)
+ if mobj:
+ height = int(mobj.group('height'))
+ bitrate = int(mobj.group('bitrate'))
+ f.update({
+ 'format_id': '%dp-%dk' % (height, bitrate),
+ 'height': height,
+ 'tbr': bitrate,
+ })
+ formats.append(f)
self._sort_formats(formats)
- if not formats:
- raise ExtractorError('ERROR: no known formats available for video')
+ description = self._html_search_regex(
+ r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>',
+ webpage, 'description', default=None)
+ thumbnail = self._search_regex(
+ r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1',
+ webpage, 'thumbnail', fatal=False, group='thumbnail')
+
+ uploader = self._html_search_regex(
+ r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
+ webpage, 'uploader', fatal=False)
+ upload_date = unified_strdate(self._html_search_regex(
+ r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>',
+ webpage, 'upload date', fatal=False))
+
+ age_limit = self._rta_search(webpage)
+
+ average_rating = int_or_none(self._search_regex(
+ r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
+ webpage, 'average rating', fatal=False))
+
+ view_count = str_to_int(self._search_regex(
+ r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>',
+ webpage, 'view count', fatal=False))
+ comment_count = str_to_int(self._search_regex(
+ r'>All [Cc]omments? \(([\d,.]+)\)',
+ webpage, 'comment count', fatal=False))
+
+ def extract_tag_box(title):
+ tag_box = self._search_regex(
+ (r'<div[^>]+class=["\']tagBoxTitle["\'][^>]*>\s*%s\b.*?</div>\s*'
+ '<div[^>]+class=["\']tagBoxContent["\']>(.+?)</div>') % re.escape(title),
+ webpage, '%s tag box' % title, default=None)
+ if not tag_box:
+ return []
+ return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box)
+
+ categories = extract_tag_box('Category')
+ tags = extract_tag_box('Tags')
return {
'id': video_id,
- 'uploader': video_uploader,
- 'upload_date': upload_date,
- 'title': video_title,
+ 'display_id': display_id,
+ 'title': title,
+ 'description': description,
'thumbnail': thumbnail,
- 'description': video_description,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ 'average_rating': average_rating,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'categories': categories,
+ 'tags': tags,
'age_limit': age_limit,
'formats': formats,
}
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 97ce36550..4aac2cc03 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -20,13 +20,13 @@ from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
- compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
encode_dict,
+ error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_attribute,
@@ -34,7 +34,9 @@ from ..utils import (
int_or_none,
orderedSet,
parse_duration,
+ remove_quotes,
remove_start,
+ sanitized_Request,
smuggle_url,
str_to_int,
unescapeHTML,
@@ -114,7 +116,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
- req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
+ req = sanitized_Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
@@ -147,7 +149,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
- tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
+ tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
@@ -178,6 +180,69 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return
+class YoutubeEntryListBaseInfoExtractor(InfoExtractor):
+ # Extract entries from page with "Load more" button
+ def _entries(self, page, playlist_id):
+ more_widget_html = content_html = page
+ for page_num in itertools.count(1):
+ for entry in self._process_page(content_html):
+ yield entry
+
+ mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
+ if not mobj:
+ break
+
+ more = self._download_json(
+ 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
+ 'Downloading page #%s' % page_num,
+ transform_source=uppercase_escape)
+ content_html = more['content_html']
+ if not content_html.strip():
+ # Some webpages show a "Load more" button but they don't
+ # have more videos
+ break
+ more_widget_html = more['load_more_widget_html']
+
+
+class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
+ def _process_page(self, content):
+ for video_id, video_title in self.extract_videos_from_page(content):
+ yield self.url_result(video_id, 'Youtube', video_id, video_title)
+
+ def extract_videos_from_page(self, page):
+ ids_in_page = []
+ titles_in_page = []
+ for mobj in re.finditer(self._VIDEO_RE, page):
+ # The link with index 0 is not the first video of the playlist (not sure if still actual)
+ if 'index' in mobj.groupdict() and mobj.group('id') == '0':
+ continue
+ video_id = mobj.group('id')
+ video_title = unescapeHTML(mobj.group('title'))
+ if video_title:
+ video_title = video_title.strip()
+ try:
+ idx = ids_in_page.index(video_id)
+ if video_title and not titles_in_page[idx]:
+ titles_in_page[idx] = video_title
+ except ValueError:
+ ids_in_page.append(video_id)
+ titles_in_page.append(video_title)
+ return zip(ids_in_page, titles_in_page)
+
+
+class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
+ def _process_page(self, content):
+ for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content):
+ yield self.url_result(
+ 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+ webpage = self._download_webpage(url, playlist_id)
+ title = self._og_search_title(webpage, fatal=False)
+ return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
+
+
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
@@ -195,7 +260,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
+ (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
v=
)
))
@@ -283,6 +348,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+ # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
@@ -331,12 +397,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
+ 'alt_title': 'I Love It (feat. Charli XCX)',
'description': 'md5:782e8651347686cba06e58f71ab51773',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
+ 'creator': 'Icona Pop',
}
},
{
@@ -347,9 +415,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
+ 'alt_title': 'Tunnel Vision',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
+ 'creator': 'Justin Timberlake',
'age_limit': 18,
}
},
@@ -363,7 +433,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
- 'uploader_id': 'setindia'
+ 'uploader_id': 'setindia',
+ 'age_limit': 18,
}
},
{
@@ -427,10 +498,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
+ 'alt_title': 'Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
+ 'creator': 'Taylor Swift',
},
'params': {
'youtube_include_dash_manifest': True,
@@ -486,9 +559,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
+ 'creator': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
+ 'alt_title': 'Some Chords',
},
'expected_warnings': [
'DASH manifest missing',
@@ -500,7 +575,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
- 'upload_date': '20120724',
+ 'upload_date': '20150827',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
@@ -628,6 +703,49 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
+ },
+ {
+ # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
+ 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
+ 'info_dict': {
+ 'id': 'lsguqyKfVQg',
+ 'ext': 'mp4',
+ 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
+ 'alt_title': 'Dark Walk',
+ 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
+ 'upload_date': '20151119',
+ 'uploader_id': 'IronSoulElf',
+ 'uploader': 'IronSoulElf',
+ 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
+ # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
+ 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
+ 'only_matching': True,
+ },
+ {
+ # Video with yt:stretch=17:0
+ 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
+ 'info_dict': {
+ 'id': 'Q39EVAstoRM',
+ 'ext': 'mp4',
+ 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
+ 'description': 'md5:ee18a25c350637c8faff806845bddee9',
+ 'upload_date': '20151107',
+ 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
+ 'uploader': 'CH GAMER DROID',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
+ 'only_matching': True,
}
]
@@ -657,7 +775,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
- r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$',
+ r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
@@ -786,7 +904,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
- self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
+ self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
return {}
sub_lang_list = {}
@@ -812,16 +930,33 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return {}
return sub_lang_list
+ def _get_ytplayer_config(self, video_id, webpage):
+ patterns = (
+ # User data may contain arbitrary character sequences that may affect
+ # JSON extraction with regex, e.g. when '};' is contained the second
+ # regex won't capture the whole JSON. Yet working around by trying more
+ # concrete regex first keeping in mind proper quoted string handling
+ # to be implemented in future that will replace this workaround (see
+ # https://github.com/rg3/youtube-dl/issues/7468,
+ # https://github.com/rg3/youtube-dl/pull/7599)
+ r';ytplayer\.config\s*=\s*({.+?});ytplayer',
+ r';ytplayer\.config\s*=\s*({.+?});',
+ )
+ config = self._search_regex(
+ patterns, webpage, 'ytplayer.config', default=None)
+ if config:
+ return self._parse_json(
+ uppercase_escape(config), video_id, fatal=False)
+
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
- mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
+ player_config = self._get_ytplayer_config(video_id, webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
- if mobj is None:
+ if not player_config:
self._downloader.report_warning(err_msg)
return {}
- player_config = json.loads(mobj.group(1))
try:
args = player_config['args']
caption_url = args['ttsurl']
@@ -1028,10 +1163,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
age_gate = False
video_info = None
# Try looking directly into the video webpage
- mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
- if mobj:
- json_code = uppercase_escape(mobj.group(1))
- ytplayer_config = json.loads(json_code)
+ ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
+ if ytplayer_config:
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map'):
# Convert to the same format returned by compat_parse_qs
@@ -1061,6 +1194,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
+ # Different get_video_info requests may report different results, e.g.
+ # some may report video unavailability, but some may serve it without
+ # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
+ # the original webpage as well as el=info and el=embedded get_video_info
+ # requests report video unavailability due to geo restriction while
+ # el=detailpage succeeds and returns valid data). This is probably
+ # due to YouTube measures against IP ranges of hosting providers.
+ # Working around by preferring the first succeeded video_info containing
+ # the token if no such video_info yet was found.
+ if 'token' not in video_info:
+ video_info = get_video_info
break
if 'token' not in video_info:
if 'reason' in video_info:
@@ -1176,6 +1320,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
+ m_music = re.search(
+ r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
+ video_webpage)
+ if m_music:
+ video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
+ video_creator = clean_html(m_music.group('creator'))
+ else:
+ video_alt_title = video_creator = None
+
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
@@ -1286,7 +1439,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
- r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
+ [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
@@ -1348,6 +1501,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
+ # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
+ for a_format in formats:
+ a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
@@ -1385,10 +1541,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
- ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
- for f in formats:
- if f.get('vcodec') != 'none':
- f['stretched_ratio'] = ratio
+ w = float(stretched_m.group('w'))
+ h = float(stretched_m.group('h'))
+ # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
+ # We will only process correct ratios.
+ if w > 0 and h > 0:
+ ratio = w / h
+ for f in formats:
+ if f.get('vcodec') != 'none':
+ f['stretched_ratio'] = ratio
self._sort_formats(formats)
@@ -1397,7 +1558,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
+ 'creator': video_creator,
'title': video_title,
+ 'alt_title': video_alt_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
@@ -1419,7 +1582,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}
-class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
+class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
@@ -1427,7 +1590,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
- \? (?:.*?&)*? (?:p|a|list)=
+ \? (?:.*?[&;])*? (?:p|a|list)=
| p/
)
(
@@ -1440,7 +1603,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
- _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
+ _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
@@ -1557,37 +1720,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
else:
self.report_warning('Youtube gives an alert message: ' + match)
- # Extract the video ids from the playlist pages
- def _entries():
- more_widget_html = content_html = page
- for page_num in itertools.count(1):
- matches = re.finditer(self._VIDEO_RE, content_html)
- # We remove the duplicates and the link with index 0
- # (it's not the first video of the playlist)
- new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
- for vid_id in new_ids:
- yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
-
- mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
- if not mobj:
- break
-
- more = self._download_json(
- 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
- 'Downloading page #%s' % page_num,
- transform_source=uppercase_escape)
- content_html = more['content_html']
- if not content_html.strip():
- # Some webpages show a "Load more" button but they don't
- # have more videos
- break
- more_widget_html = more['load_more_widget_html']
-
playlist_title = self._html_search_regex(
- r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
+ r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
page, 'title')
- return self.playlist_result(_entries(), playlist_id, playlist_title)
+ return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
@@ -1613,35 +1750,34 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
return self._extract_playlist(playlist_id)
-class YoutubeChannelIE(InfoExtractor):
+class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
+ _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
- 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
+ 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
+ 'title': 'Uploads from lex will',
}
+ }, {
+ 'note': 'Age restricted channel',
+ # from https://www.youtube.com/user/DeusExOfficial
+ 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
+ 'playlist_mincount': 64,
+ 'info_dict': {
+ 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
+ 'title': 'Uploads from Deus Ex',
+ },
}]
- @staticmethod
- def extract_videos_from_page(page):
- ids_in_page = []
- titles_in_page = []
- for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
- video_id = mobj.group('id')
- video_title = unescapeHTML(mobj.group('title'))
- try:
- idx = ids_in_page.index(video_id)
- if video_title and not titles_in_page[idx]:
- titles_in_page[idx] = video_title
- except ValueError:
- ids_in_page.append(video_id)
- titles_in_page.append(video_title)
- return zip(ids_in_page, titles_in_page)
+ @classmethod
+ def suitable(cls, url):
+ return False if YoutubePlaylistsIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url)
def _real_extract(self, url):
channel_id = self._match_id(url)
@@ -1654,12 +1790,15 @@ class YoutubeChannelIE(InfoExtractor):
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
- channel_playlist_id = self._html_search_meta(
- 'channelId', channel_page, 'channel id', default=None)
- if not channel_playlist_id:
- channel_playlist_id = self._search_regex(
- r'data-channel-external-id="([^"]+)"',
- channel_page, 'channel id', default=None)
+ if channel_page is False:
+ channel_playlist_id = False
+ else:
+ channel_playlist_id = self._html_search_meta(
+ 'channelId', channel_page, 'channel id', default=None)
+ if not channel_playlist_id:
+ channel_playlist_id = self._search_regex(
+ r'data-(?:channel-external-|yt)id="([^"]+)"',
+ channel_page, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
@@ -1682,29 +1821,7 @@ class YoutubeChannelIE(InfoExtractor):
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
- def _entries():
- more_widget_html = content_html = channel_page
- for pagenum in itertools.count(1):
-
- for video_id, video_title in self.extract_videos_from_page(content_html):
- yield self.url_result(
- video_id, 'Youtube', video_id=video_id,
- video_title=video_title)
-
- mobj = re.search(
- r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
- more_widget_html)
- if not mobj:
- break
-
- more = self._download_json(
- 'https://youtube.com/%s' % mobj.group('more'), channel_id,
- 'Downloading page #%s' % (pagenum + 1),
- transform_source=uppercase_escape)
- content_html = more['content_html']
- more_widget_html = more['load_more_widget_html']
-
- return self.playlist_result(_entries(), channel_id)
+ return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
@@ -1735,6 +1852,36 @@ class YoutubeUserIE(YoutubeChannelIE):
return super(YoutubeUserIE, cls).suitable(url)
+class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
+ IE_DESC = 'YouTube.com user/channel playlists'
+ _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
+ IE_NAME = 'youtube:playlists'
+
+ _TESTS = [{
+ 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
+ 'playlist_mincount': 4,
+ 'info_dict': {
+ 'id': 'ThirstForScience',
+ 'title': 'Thirst for Science',
+ },
+ }, {
+ # with "Load more" button
+ 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
+ 'playlist_mincount': 70,
+ 'info_dict': {
+ 'id': 'igorkle1',
+ 'title': 'Игорь Клейнер',
+ },
+ }, {
+ 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
+ 'playlist_mincount': 17,
+ 'info_dict': {
+ 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
+ 'title': 'Chem Player',
+ },
+ }]
+
+
class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
@@ -1830,7 +1977,7 @@ class YoutubeSearchURLIE(InfoExtractor):
}
-class YoutubeShowIE(InfoExtractor):
+class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
@@ -1844,26 +1991,9 @@ class YoutubeShowIE(InfoExtractor):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- playlist_id = mobj.group('id')
- webpage = self._download_webpage(
- 'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage')
- # There's one playlist for each season of the show
- m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
- self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
- entries = [
- self.url_result(
- 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
- for season in m_seasons
- ]
- title = self._og_search_title(webpage, fatal=False)
-
- return {
- '_type': 'playlist',
- 'id': playlist_id,
- 'title': title,
- 'entries': entries,
- }
+ playlist_id = self._match_id(url)
+ return super(YoutubeShowIE, self)._real_extract(
+ 'https://www.youtube.com/show/%s/playlists' % playlist_id)
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
@@ -1970,6 +2100,7 @@ class YoutubeTruncatedURLIE(InfoExtractor):
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
+ t=[0-9]+
)?
|
attribution_link\?a=[^&]+
@@ -1992,6 +2123,9 @@ class YoutubeTruncatedURLIE(InfoExtractor):
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
+ }, {
+ 'url': 'https://www.youtube.com/watch?t=2372',
+ 'only_matching': True,
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py
index 98f15177b..a795f56b3 100644
--- a/youtube_dl/extractor/zdf.py
+++ b/youtube_dl/extractor/zdf.py
@@ -9,6 +9,7 @@ from ..utils import (
int_or_none,
unified_strdate,
OnDemandPagedList,
+ xpath_text,
)
@@ -19,13 +20,11 @@ def extract_from_xml_url(ie, video_id, xml_url):
errnote='Failed to download video info')
title = doc.find('.//information/title').text
- description = doc.find('.//information/detail').text
- duration = int(doc.find('.//details/lengthSec').text)
- uploader_node = doc.find('.//details/originChannelTitle')
- uploader = None if uploader_node is None else uploader_node.text
- uploader_id_node = doc.find('.//details/originChannelId')
- uploader_id = None if uploader_id_node is None else uploader_id_node.text
- upload_date = unified_strdate(doc.find('.//details/airtime').text)
+ description = xpath_text(doc, './/information/detail', 'description')
+ duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration'))
+ uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader')
+ uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id')
+ upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date'))
def xml_to_format(fnode):
video_url = fnode.find('url').text
@@ -40,15 +39,14 @@ def extract_from_xml_url(ie, video_id, xml_url):
ext = format_m.group('container')
proto = format_m.group('proto').lower()
- quality = fnode.find('./quality').text
- abr = int(fnode.find('./audioBitrate').text) // 1000
- vbr_node = fnode.find('./videoBitrate')
- vbr = None if vbr_node is None else int(vbr_node.text) // 1000
+ quality = xpath_text(fnode, './quality', 'quality')
+ abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000)
+ vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000)
- width_node = fnode.find('./width')
- width = None if width_node is None else int_or_none(width_node.text)
- height_node = fnode.find('./height')
- height = None if height_node is None else int_or_none(height_node.text)
+ width = int_or_none(xpath_text(fnode, './width', 'width'))
+ height = int_or_none(xpath_text(fnode, './height', 'height'))
+
+ filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize'))
format_note = ''
if not format_note:
@@ -64,12 +62,31 @@ def extract_from_xml_url(ie, video_id, xml_url):
'vbr': vbr,
'width': width,
'height': height,
- 'filesize': int_or_none(fnode.find('./filesize').text),
+ 'filesize': filesize,
'format_note': format_note,
'protocol': proto,
'_available': is_available,
}
+ def xml_to_thumbnails(fnode):
+ thumbnails = []
+ for node in fnode:
+ thumbnail_url = node.text
+ if not thumbnail_url:
+ continue
+ thumbnail = {
+ 'url': thumbnail_url,
+ }
+ if 'key' in node.attrib:
+ m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key'])
+ if m:
+ thumbnail['width'] = int(m.group(1))
+ thumbnail['height'] = int(m.group(2))
+ thumbnails.append(thumbnail)
+ return thumbnails
+
+ thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage'))
+
format_nodes = doc.findall('.//formitaeten/formitaet')
formats = list(filter(
lambda f: f['_available'],
@@ -81,6 +98,7 @@ def extract_from_xml_url(ie, video_id, xml_url):
'title': title,
'description': description,
'duration': duration,
+ 'thumbnails': thumbnails,
'uploader': uploader,
'uploader_id': uploader_id,
'upload_date': upload_date,
diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py
index 7dc1e2f2b..437eecb67 100644
--- a/youtube_dl/extractor/zingmp3.py
+++ b/youtube_dl/extractor/zingmp3.py
@@ -9,9 +9,11 @@ from ..utils import ExtractorError
class ZingMp3BaseInfoExtractor(InfoExtractor):
- def _extract_item(self, item):
+ def _extract_item(self, item, fatal=True):
error_message = item.find('./errormessage').text
if error_message:
+ if not fatal:
+ return
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message),
expected=True)
@@ -43,7 +45,9 @@ class ZingMp3BaseInfoExtractor(InfoExtractor):
entries = []
for i, item in enumerate(items, 1):
- entry = self._extract_item(item)
+ entry = self._extract_item(item, fatal=False)
+ if not entry:
+ continue
entry['id'] = '%s-%d' % (id, i)
entries.append(entry)
@@ -85,7 +89,7 @@ class ZingMp3SongIE(ZingMp3BaseInfoExtractor):
class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
- _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
+ _VALID_URL = r'https?://mp3\.zing\.vn/(?:album|playlist)/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html'
_TESTS = [{
'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html',
'info_dict': {
@@ -94,6 +98,9 @@ class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor):
'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless',
},
'playlist_count': 10,
+ }, {
+ 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html',
+ 'only_matching': True,
}]
IE_NAME = 'zingmp3:album'
IE_DESC = 'mp3.zing.vn albums'
diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py
index 0e0c7d90d..a7440c582 100644
--- a/youtube_dl/jsinterp.py
+++ b/youtube_dl/jsinterp.py
@@ -214,7 +214,7 @@ class JSInterpreter(object):
obj = {}
obj_m = re.search(
(r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
- r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' +
+ r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' +
r'\}\s*;',
self.code)
fields = obj_m.group('fields')
@@ -232,10 +232,10 @@ class JSInterpreter(object):
def extract_function(self, funcname):
func_m = re.search(
r'''(?x)
- (?:function\s+%s|[{;]%s\s*=\s*function)\s*
+ (?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
\((?P<args>[^)]*)\)\s*
\{(?P<code>[^}]+)\}''' % (
- re.escape(funcname), re.escape(funcname)),
+ re.escape(funcname), re.escape(funcname), re.escape(funcname)),
self.code)
if func_m is None:
raise ExtractorError('Could not find JS function %r' % funcname)
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 5eccc0a70..c46e136bf 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -276,7 +276,7 @@ def parseOpts(overrideArguments=None):
'For example, to only match videos that have been liked more than '
'100 times and disliked less than 50 times (or the dislike '
'functionality is not available at the given service), but who '
- 'also have a description, use --match-filter '
+ 'also have a description, use --match-filter '
'"like_count > 100 & dislike_count <? 50 & description" .'
))
selection.add_option(
@@ -338,7 +338,7 @@ def parseOpts(overrideArguments=None):
video_format.add_option(
'-F', '--list-formats',
action='store_true', dest='listformats',
- help='List all available formats')
+ help='List all available formats of requested videos')
video_format.add_option(
'--youtube-include-dash-manifest',
action='store_true', dest='youtube_include_dash_manifest', default=True,
@@ -363,7 +363,7 @@ def parseOpts(overrideArguments=None):
subtitles.add_option(
'--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub', default=False,
- help='Write automatic subtitle file (YouTube only)')
+ help='Write automatically generated subtitle file (YouTube only)')
subtitles.add_option(
'--all-subs',
action='store_true', dest='allsubtitles', default=False,
@@ -602,7 +602,7 @@ def parseOpts(overrideArguments=None):
filesystem.add_option(
'-A', '--auto-number',
action='store_true', dest='autonumber', default=False,
- help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
+ help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
filesystem.add_option(
'-t', '--title',
action='store_true', dest='usetitle', default=False,
diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py
index 1f723908b..daca5d814 100644
--- a/youtube_dl/postprocessor/ffmpeg.py
+++ b/youtube_dl/postprocessor/ffmpeg.py
@@ -52,7 +52,7 @@ class FFmpegPostProcessor(PostProcessor):
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
- prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
+ prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
@@ -60,6 +60,7 @@ class FFmpegPostProcessor(PostProcessor):
self._paths = None
self._versions = None
if self._downloader:
+ prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
@@ -135,7 +136,10 @@ class FFmpegPostProcessor(PostProcessor):
files_cmd = []
for path in input_paths:
- files_cmd.extend([encodeArgument('-i'), encodeFilename(path, True)])
+ files_cmd.extend([
+ encodeArgument('-i'),
+ encodeFilename(self._ffmpeg_filename_argument(path), True)
+ ])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
@@ -155,10 +159,10 @@ class FFmpegPostProcessor(PostProcessor):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
- # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
- if fn.startswith('-'):
- return './' + fn
- return fn
+ # Always use 'file:' because the filename may contain ':' (ffmpeg
+ # interprets that as a protocol) or can start with '-' (-- is broken in
+ # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
+ return 'file:' + fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
@@ -269,7 +273,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor):
return [], information
try:
- self._downloader.to_screen('[' + self.basename + '] Destination: ' + new_path)
+ self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
diff --git a/youtube_dl/update.py b/youtube_dl/update.py
index fc7ac8305..995b8ed96 100644
--- a/youtube_dl/update.py
+++ b/youtube_dl/update.py
@@ -9,11 +9,8 @@ import subprocess
import sys
from zipimport import zipimporter
-from .compat import (
- compat_str,
- compat_urllib_request,
-)
-from .utils import make_HTTPS_handler
+from .utils import encode_compat_str
+
from .version import __version__
@@ -47,7 +44,7 @@ def rsa_verify(message, signature, key):
return True
-def update_self(to_screen, verbose):
+def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = "https://rg3.github.io/youtube-dl/update/"
@@ -59,15 +56,12 @@ def update_self(to_screen, verbose):
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
- https_handler = make_HTTPS_handler({})
- opener = compat_urllib_request.build_opener(https_handler)
-
# Check if there is a new version
try:
newversion = opener.open(VERSION_URL).read().decode('utf-8').strip()
except Exception:
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
@@ -80,7 +74,7 @@ def update_self(to_screen, verbose):
versions_info = json.loads(versions_info)
except Exception:
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: can\'t obtain versions info. Please try again later.')
return
if 'signature' not in versions_info:
@@ -129,7 +123,7 @@ def update_self(to_screen, verbose):
urlh.close()
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
@@ -143,7 +137,7 @@ def update_self(to_screen, verbose):
outf.write(newcontent)
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to write the new version')
return
@@ -163,7 +157,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"
return # Do not show premature success messages
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
@@ -175,7 +169,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"
urlh.close()
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to download latest version')
return
@@ -189,7 +183,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"
outf.write(newcontent)
except (IOError, OSError):
if verbose:
- to_screen(compat_str(traceback.format_exc()))
+ to_screen(encode_compat_str(traceback.format_exc()))
to_screen('ERROR: unable to overwrite current version')
return
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 206dd56bc..1737ac5f6 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -3,6 +3,7 @@
from __future__ import unicode_literals
+import base64
import calendar
import codecs
import contextlib
@@ -35,6 +36,7 @@ import zlib
from .compat import (
compat_basestring,
compat_chr,
+ compat_etree_fromstring,
compat_html_entities,
compat_http_client,
compat_kwargs,
@@ -177,10 +179,19 @@ def xpath_with_ns(path, ns_map):
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
- if sys.version_info < (2, 7): # Crazy 2.6
- xpath = xpath.encode('ascii')
+ def _find_xpath(xpath):
+ if sys.version_info < (2, 7): # Crazy 2.6
+ xpath = xpath.encode('ascii')
+ return node.find(xpath)
+
+ if isinstance(xpath, (str, compat_str)):
+ n = _find_xpath(xpath)
+ else:
+ for xp in xpath:
+ n = _find_xpath(xp)
+ if n is not None:
+ break
- n = node.find(xpath)
if n is None:
if default is not NO_DEFAULT:
return default
@@ -355,13 +366,20 @@ def sanitize_path(s):
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
- path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part)
+ path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
+# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
+# unwanted failures due to missing protocol
+def sanitized_Request(url, *args, **kwargs):
+ return compat_urllib_request.Request(
+ 'http:%s' % url if url.startswith('//') else url, *args, **kwargs)
+
+
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
@@ -385,10 +403,14 @@ def _htmlentity_transform(entity):
numstr = '0%s' % numstr
else:
base = 10
- return compat_chr(int(numstr, base))
+ # See https://github.com/rg3/youtube-dl/issues/7518
+ try:
+ return compat_chr(int(numstr, base))
+ except ValueError:
+ pass
# Unknown entity in name, return its literal representation
- return ('&%s;' % entity)
+ return '&%s;' % entity
def unescapeHTML(s):
@@ -619,7 +641,7 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/rg3/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
- kwargs['strict'] = True
+ kwargs[b'strict'] = True
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
@@ -641,6 +663,16 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
return hc
+def handle_youtubedl_headers(headers):
+ filtered_headers = headers
+
+ if 'Youtubedl-no-compression' in filtered_headers:
+ filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
+ del filtered_headers['Youtubedl-no-compression']
+
+ return filtered_headers
+
+
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
@@ -648,7 +680,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
- to include the HTTP header "Youtubedl-No-Compression", which will be
+ to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
@@ -709,10 +741,8 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
- if 'Youtubedl-no-compression' in req.headers:
- if 'Accept-encoding' in req.headers:
- del req.headers['Accept-encoding']
- del req.headers['Youtubedl-no-compression']
+
+ req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
@@ -813,9 +843,11 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
if date_str is None:
return None
+ date_str = re.sub(r'\.[0-9]+', '', date_str)
+
if timezone is None:
m = re.search(
- r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
+ r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
@@ -828,9 +860,12 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
- date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
- dt = datetime.datetime.strptime(date_str, date_format) - timezone
- return calendar.timegm(dt.timetuple())
+ try:
+ date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+ dt = datetime.datetime.strptime(date_str, date_format) - timezone
+ return calendar.timegm(dt.timetuple())
+ except ValueError:
+ pass
def unified_strdate(date_str, day_first=True):
@@ -895,7 +930,8 @@ def unified_strdate(date_str, day_first=True):
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
- return upload_date
+ if upload_date is not None:
+ return compat_str(upload_date)
def determine_ext(url, default_ext='unknown_video'):
@@ -904,6 +940,21 @@ def determine_ext(url, default_ext='unknown_video'):
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
+ elif guess.rstrip('/') in (
+ 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
+ 'flv', 'f4v', 'f4a', 'f4b',
+ 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
+ 'mkv', 'mka', 'mk3d',
+ 'avi', 'divx',
+ 'mov',
+ 'asf', 'wmv', 'wma',
+ '3gp', '3g2',
+ 'mp3',
+ 'flac',
+ 'ape',
+ 'wav',
+ 'f4f', 'f4m', 'm3u8', 'smil'):
+ return guess.rstrip('/')
else:
return default_ext
@@ -1355,6 +1406,15 @@ def remove_end(s, end):
return s
+def remove_quotes(s):
+ if s is None or len(s) < 2:
+ return s
+ for quote in ('"', "'", ):
+ if s[0] == quote and s[-1] == quote:
+ return s[1:-1]
+ return s
+
+
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
@@ -1371,7 +1431,12 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
v = getattr(v, get_attr, None)
if v == '':
v = None
- return default if v is None else (int(v) * invscale // scale)
+ if v is None:
+ return default
+ try:
+ return int(v) * invscale // scale
+ except ValueError:
+ return default
def str_or_none(v, default=None):
@@ -1387,7 +1452,12 @@ def str_to_int(int_str):
def float_or_none(v, scale=1, invscale=1, default=None):
- return default if v is None else (float(v) * invscale / scale)
+ if v is None:
+ return default
+ try:
+ return float(v) * invscale / scale
+ except ValueError:
+ return default
def parse_duration(s):
@@ -1637,30 +1707,13 @@ def urlencode_postdata(*args, **kargs):
def encode_dict(d, encoding='utf-8'):
- return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
-
-
-try:
- etree_iter = xml.etree.ElementTree.Element.iter
-except AttributeError: # Python <=2.6
- etree_iter = lambda n: n.findall('.//*')
-
+ def encode(v):
+ return v.encode(encoding) if isinstance(v, compat_basestring) else v
+ return dict((encode(k), encode(v)) for k, v in d.items())
-def parse_xml(s):
- class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
- def doctype(self, name, pubid, system):
- pass # Ignore doctypes
- parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
- kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
- tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
- # Fix up XML parser in Python 2.x
- if sys.version_info < (3, 0):
- for n in etree_iter(tree):
- if n.text is not None:
- if not isinstance(n.text, compat_str):
- n.text = n.text.decode('utf-8')
- return tree
+def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
+ return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
@@ -1690,8 +1743,8 @@ def js_to_json(code):
if v in ('true', 'false', 'null'):
return v
if v.startswith('"'):
- return v
- if v.startswith("'"):
+ v = re.sub(r"\\'", "'", v[1:-1])
+ elif v.startswith("'"):
v = v[1:-1]
v = re.sub(r"\\\\|\\'|\"", lambda m: {
'\\\\': '\\\\',
@@ -1757,6 +1810,15 @@ def args_to_str(args):
return ' '.join(shlex_quote(a) for a in args)
+def error_to_compat_str(err):
+ err_str = str(err)
+ # On python 2 error byte string must be decoded with proper
+ # encoding rather than ascii
+ if sys.version_info[0] < 3:
+ err_str = err_str.decode(preferredencoding())
+ return err_str
+
+
def mimetype2ext(mt):
_, _, res = mt.rpartition('/')
@@ -1785,6 +1847,10 @@ def urlhandle_detect_ext(url_handle):
return mimetype2ext(getheader('Content-Type'))
+def encode_data_uri(data, mime_type):
+ return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
+
+
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
@@ -1923,15 +1989,15 @@ def match_filter_func(filter_str):
def parse_dfxp_time_expr(time_expr):
if not time_expr:
- return 0.0
+ return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
- mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr)
+ mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
- return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3))
+ return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
@@ -1959,7 +2025,7 @@ def dfxp2srt(dfxp_data):
return out
- dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8'))
+ dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
@@ -1967,10 +2033,15 @@ def dfxp2srt(dfxp_data):
raise ValueError('Invalid dfxp/TTML subtitle')
for para, index in zip(paras, itertools.count(1)):
- begin_time = parse_dfxp_time_expr(para.attrib['begin'])
+ begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
+ dur = parse_dfxp_time_expr(para.attrib.get('dur'))
+ if begin_time is None:
+ continue
if not end_time:
- end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur'])
+ if not dur:
+ continue
+ end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 6bc689b75..01607693e 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
from __future__ import unicode_literals
-__version__ = '2015.09.03'
+__version__ = '2015.12.18'