aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md25
-rw-r--r--test/test_download.py2
-rw-r--r--test/test_utils.py16
-rw-r--r--test/test_youtube_lists.py11
-rw-r--r--test/test_youtube_subtitles.py2
-rw-r--r--test/tests.json74
-rw-r--r--youtube_dl/FileDownloader.py29
-rwxr-xr-xyoutube_dl/InfoExtractors.py496
-rw-r--r--youtube_dl/__init__.py48
-rw-r--r--youtube_dl/update.py8
-rw-r--r--youtube_dl/utils.py90
-rw-r--r--youtube_dl/version.py2
12 files changed, 525 insertions, 278 deletions
diff --git a/README.md b/README.md
index 0ab4b660c..c95201c3f 100644
--- a/README.md
+++ b/README.md
@@ -27,8 +27,12 @@ which means you can modify it, redistribute it or use it however you like.
from an initial value of SIZE.
--dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent
+ --referer REF specify a custom referer, use if the video access
+ is restricted to one domain
--list-extractors List all supported extractors and the URLs they
would handle
+ --proxy URL Use the specified HTTP/HTTPS proxy
+ --no-check-certificate Suppress HTTPS certificate validation.
## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1)
@@ -42,10 +46,13 @@ which means you can modify it, redistribute it or use it however you like.
(e.g. 50k or 44.6m)
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
50k or 44.6m)
+ --date DATE download only videos uploaded in this date
+ --datebefore DATE download only videos uploaded before this date
+ --dateafter DATE download only videos uploaded after this date
## Filesystem Options:
- -t, --title use title in file name
- --id use video ID in file name
+ -t, --title use title in file name (default)
+ --id use only video ID in file name
-l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(title)s to get
@@ -78,6 +85,7 @@ which means you can modify it, redistribute it or use it however you like.
file modification time
--write-description write video description to a .description file
--write-info-json write video metadata to a .info.json file
+ --write-thumbnail write thumbnail image to disk
## Verbosity / Simulation Options:
-q, --quiet activates quiet mode
@@ -165,6 +173,19 @@ In some cases, you don't want special characters such as 中, spaces, or &, such
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
youtube-dl_test_video_.mp4 # A simple file name
+# VIDEO SELECTION
+
+Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats:
+
+ - Absolute dates: Dates in the format `YYYYMMDD`.
+ - Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
+
+Examples:
+
+ $ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
+ $ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
+ $ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
+
# FAQ
### Can you please put the -b option back?
diff --git a/test/test_download.py b/test/test_download.py
index cf8028718..3eca333f2 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -67,7 +67,7 @@ class TestDownload(unittest.TestCase):
def generator(test_case):
def test_template(self):
- ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])#getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
+ ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])
if not ie._WORKING:
print('Skipping: IE marked as not _WORKING')
return
diff --git a/test/test_utils.py b/test/test_utils.py
index eeaaa7fad..c4b71362e 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -14,6 +14,8 @@ from youtube_dl.utils import timeconvert
from youtube_dl.utils import sanitize_filename
from youtube_dl.utils import unescapeHTML
from youtube_dl.utils import orderedSet
+from youtube_dl.utils import DateRange
+from youtube_dl.utils import unified_strdate
if sys.version_info < (3, 0):
_compat_str = lambda b: b.decode('unicode-escape')
@@ -95,6 +97,20 @@ class TestUtil(unittest.TestCase):
def test_unescape_html(self):
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
+
+ def test_daterange(self):
+ _20century = DateRange("19000101","20000101")
+ self.assertFalse("17890714" in _20century)
+ _ac = DateRange("00010101")
+ self.assertTrue("19690721" in _ac)
+ _firstmilenium = DateRange(end="10000101")
+ self.assertTrue("07110427" in _firstmilenium)
+
+ def test_unified_dates(self):
+ self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
+ self.assertEqual(unified_strdate('8/7/2009'), '20090708')
+ self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
+ self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index c7f00af32..78657b51c 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -29,7 +29,7 @@ class FakeDownloader(FileDownloader):
self.params = parameters
def to_screen(self, s):
print(s)
- def trouble(self, s):
+ def trouble(self, s, tb=None):
raise Exception(s)
def extract_info(self, url):
self.result.append(url)
@@ -71,6 +71,13 @@ class TestYoutubeLists(unittest.TestCase):
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
self.assertFalse('pElCt5oNDuI' in ytie_results)
self.assertFalse('KdPEApIVdWM' in ytie_results)
+
+ def test_youtube_playlist_empty(self):
+ dl = FakeDownloader()
+ ie = YoutubePlaylistIE(dl)
+ result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')[0]
+ self.assertIsPlaylist(result)
+ self.assertEqual(len(result['entries']), 0)
def test_youtube_course(self):
dl = FakeDownloader()
@@ -90,7 +97,7 @@ class TestYoutubeLists(unittest.TestCase):
self.assertTrue(len(result['entries']) > 90)
#test autogenerated channel
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')[0]
- self.assertTrue(len(result['entries']) > 20)
+ self.assertTrue(len(result['entries']) >= 18)
def test_youtube_user(self):
dl = FakeDownloader()
diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py
index b4909091b..5b3f289af 100644
--- a/test/test_youtube_subtitles.py
+++ b/test/test_youtube_subtitles.py
@@ -30,7 +30,7 @@ class FakeDownloader(object):
self.params = parameters
def to_screen(self, s):
print(s)
- def trouble(self, s):
+ def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
diff --git a/test/tests.json b/test/tests.json
index 2b56738a0..b4d511dd8 100644
--- a/test/tests.json
+++ b/test/tests.json
@@ -112,9 +112,8 @@
{
"name": "Escapist",
"url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
- "file": "6618-Breaking-Down-Baldurs-Gate.flv",
- "md5": "c6793dbda81388f4264c1ba18684a74d",
- "skip": "Fails with timeout on Travis"
+ "file": "6618-Breaking-Down-Baldurs-Gate.mp4",
+ "md5": "c6793dbda81388f4264c1ba18684a74d"
},
{
"name": "GooglePlus",
@@ -344,8 +343,73 @@
"file": "17258355236.mp4",
"md5": "7c6a514d691b034ccf8567999e9e88a3",
"info_dict": {
- "title": "A sample video from LeeAnn. (If you need an idea..."
+ "title": "Calling all Pris! - A sample video from LeeAnn. (If you need an idea..."
+ }
+ },
+ {
+ "name": "SoundcloudSet",
+ "url":"https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep",
+ "playlist":[
+ {
+ "file":"30510138.mp3",
+ "md5":"f9136bf103901728f29e419d2c70f55d",
+ "info_dict": {
+ "title":"D-D-Dance"
+ }
+ },
+ {
+ "file":"47127625.mp3",
+ "md5":"09b6758a018470570f8fd423c9453dd8",
+ "info_dict": {
+ "title":"The Royal Concept - Gimme Twice"
+ }
+ },
+ {
+ "file":"47127627.mp3",
+ "md5":"154abd4e418cea19c3b901f1e1306d9c",
+ "info_dict": {
+ "title":"Goldrushed"
+ }
+ },
+ {
+ "file":"47127629.mp3",
+ "md5":"2f5471edc79ad3f33a683153e96a79c1",
+ "info_dict": {
+ "title":"In the End"
+ }
+ },
+ {
+ "file":"47127631.mp3",
+ "md5":"f9ba87aa940af7213f98949254f1c6e2",
+ "info_dict": {
+ "title":"Knocked Up"
+ }
+ },
+ {
+ "file":"75206121.mp3",
+ "md5":"f9d1fe9406717e302980c30de4af9353",
+ "info_dict": {
+ "title":"World On Fire"
+ }
+ }
+ ]
+ },
+ {
+ "name":"Bandcamp",
+ "url":"http://youtube-dl.bandcamp.com/track/youtube-dl-test-song",
+ "file":"1812978515.mp3",
+ "md5":"cdeb30cdae1921719a3cbcab696ef53c",
+ "info_dict": {
+ "title":"youtube-dl test song \"'/\\ä↭"
+ }
+ },
+ {
+ "name": "RedTube",
+ "url": "http://www.redtube.com/66418",
+ "file": "66418.mp4",
+ "md5": "7b8c22b5e7098a3e1c09709df1126d2d",
+ "info_dict":{
+ "title":"Sucked on a toilet"
}
}
-
]
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index d0378fb14..574863e7c 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -7,6 +7,7 @@ import math
import io
import os
import re
+import shutil
import socket
import subprocess
import sys
@@ -79,6 +80,7 @@ class FileDownloader(object):
updatetime: Use the Last-modified header to set output file timestamps.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
+ writethumbnail: Write the thumbnail image to a file
writesubtitles: Write the video subtitles to a file
onlysubtitles: Downloads only the subtitles of the video
allsubtitles: Downloads all the subtitles of the video
@@ -89,6 +91,7 @@ class FileDownloader(object):
keepvideo: Keep the video file after post-processing
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
+ daterange: A DateRange object, download only if the upload_date is in the range.
"""
params = None
@@ -344,12 +347,13 @@ class FileDownloader(object):
"""Report download progress."""
if self.params.get('noprogress', False):
return
+ clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'')
if self.params.get('progress_with_newline', False):
self.to_screen(u'[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str))
else:
- self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
- (percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
+ self.to_screen(u'\r%s[download] %s of %s at %s ETA %s' %
+ (clear_line, percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
@@ -424,6 +428,11 @@ class FileDownloader(object):
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
+ date = info_dict.get('upload_date', None)
+ if date is not None:
+ dateRange = self.params.get('daterange', DateRange())
+ if date not in dateRange:
+ return u'[download] %s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
return None
def extract_info(self, url, download = True, ie_name = None):
@@ -449,7 +458,7 @@ class FileDownloader(object):
# Warn if the _WORKING attribute is False
if not ie.working():
- self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
+ self.report_warning(u'the program functionality for this site has been marked as broken, '
u'and will probably not work. If you want to go on, use the -i option.')
# Suitable InfoExtractor found
@@ -651,6 +660,20 @@ class FileDownloader(object):
self.report_error(u'Cannot write metadata to JSON file ' + infofn)
return
+ if self.params.get('writethumbnail', False):
+ if 'thumbnail' in info_dict:
+ thumb_format = info_dict['thumbnail'].rpartition(u'/')[2].rpartition(u'.')[2]
+ if not thumb_format:
+ thumb_format = 'jpg'
+ thumb_filename = filename.rpartition('.')[0] + u'.' + thumb_format
+ self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
+ (info_dict['extractor'], info_dict['id']))
+ uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
+ with open(thumb_filename, 'wb') as thumbf:
+ shutil.copyfileobj(uf, thumbf)
+ self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
+ (info_dict['extractor'], info_dict['id'], thumb_filename))
+
if not self.params.get('skip_download', False):
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
success = True
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 4d145dfa1..36343882b 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -124,8 +124,8 @@ class InfoExtractor(object):
errnote = u'Unable to download webpage'
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
- def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
- """ Returns the data of the page as a string """
+ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns a tuple (page content as string, URL handle) """
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
content_type = urlh.headers.get('Content-Type', '')
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
@@ -142,7 +142,12 @@ class InfoExtractor(object):
self.to_screen(u'Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
- return webpage_bytes.decode(encoding, 'replace')
+ content = webpage_bytes.decode(encoding, 'replace')
+ return (content, urlh)
+
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
+ """ Returns the data of the page as a string """
+ return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
@@ -206,7 +211,7 @@ class YoutubeIE(InfoExtractor):
([0-9A-Za-z_-]+) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$"""
- _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
+ _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
@@ -422,7 +427,7 @@ class YoutubeIE(InfoExtractor):
# Log in
login_form_strs = {
- u'continue': u'http://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
u'Email': username,
u'GALX': galx,
u'Passwd': password,
@@ -482,12 +487,12 @@ class YoutubeIE(InfoExtractor):
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
- url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
+ url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
video_id = self._extract_id(url)
# Get video webpage
self.report_video_webpage_download(video_id)
- url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
+ url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
request = compat_urllib_request.Request(url)
try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read()
@@ -562,12 +567,7 @@ class YoutubeIE(InfoExtractor):
mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
- format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y']
- for expression in format_expressions:
- try:
- upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d')
- except:
- pass
+ upload_date = unified_strdate(upload_date)
# description
video_description = get_element_by_id("eow-description", video_webpage)
@@ -784,13 +784,11 @@ class MetacafeIE(InfoExtractor):
'ext': video_extension.decode('utf-8'),
}]
-
class DailymotionIE(InfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
IE_NAME = u'dailymotion'
- _WORKING = False
def _real_extract(self, url):
# Extract id and simplified title from URL
@@ -1075,13 +1073,7 @@ class VimeoIE(InfoExtractor):
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, std_headers)
- try:
- self.report_download_webpage(video_id)
- webpage_bytes = compat_urllib_request.urlopen(request).read()
- webpage = webpage_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(request, video_id)
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
@@ -1093,7 +1085,10 @@ class VimeoIE(InfoExtractor):
config = webpage.split(' = {config:')[1].split(',assets:')[0]
config = json.loads(config)
except:
- self._downloader.report_error(u'unable to extract info section')
+ if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
+ self._downloader.report_error(u'The author has restricted the access to this video, try with the "--referer" option')
+ else:
+ self._downloader.report_error(u'unable to extract info section')
return
# Extract title
@@ -1685,10 +1680,6 @@ class YoutubePlaylistIE(InfoExtractor):
"""Receives a URL and returns True if suitable for this IE."""
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
- def report_download_page(self, playlist_id, pagenum):
- """Report attempt to download playlist page with given number."""
- self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
-
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
@@ -1702,14 +1693,8 @@ class YoutubePlaylistIE(InfoExtractor):
videos = []
while True:
- self.report_download_page(playlist_id, page_num)
-
url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)
- try:
- page = compat_urllib_request.urlopen(url).read().decode('utf8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num)
try:
response = json.loads(page)
@@ -1720,12 +1705,11 @@ class YoutubePlaylistIE(InfoExtractor):
if 'feed' not in response:
self._downloader.report_error(u'Got a malformed response from YouTube API')
return
+ playlist_title = response['feed']['title']['$t']
if 'entry' not in response['feed']:
# Number of videos is a multiple of self._MAX_RESULTS
break
- playlist_title = response['feed']['title']['$t']
-
videos += [ (entry['yt$position']['$t'], entry['content']['src'])
for entry in response['feed']['entry']
if 'content' in entry ]
@@ -1749,10 +1733,6 @@ class YoutubeChannelIE(InfoExtractor):
_MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
IE_NAME = u'youtube:channel'
- def report_download_page(self, channel_id, pagenum):
- """Report attempt to download channel page with given number."""
- self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
-
def extract_videos_from_page(self, page):
ids_in_page = []
for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
@@ -1772,14 +1752,9 @@ class YoutubeChannelIE(InfoExtractor):
video_ids = []
pagenum = 1
- self.report_download_page(channel_id, pagenum)
url = self._TEMPLATE_URL % (channel_id, pagenum)
- request = compat_urllib_request.Request(url)
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ page = self._download_webpage(url, channel_id,
+ u'Downloading page #%s' % pagenum)
# Extract video identifiers
ids_in_page = self.extract_videos_from_page(page)
@@ -1790,14 +1765,9 @@ class YoutubeChannelIE(InfoExtractor):
while True:
pagenum = pagenum + 1
- self.report_download_page(channel_id, pagenum)
url = self._MORE_PAGES_URL % (pagenum, channel_id)
- request = compat_urllib_request.Request(url)
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ page = self._download_webpage(url, channel_id,
+ u'Downloading page #%s' % pagenum)
page = json.loads(page)
@@ -1824,11 +1794,6 @@ class YoutubeUserIE(InfoExtractor):
_VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]'
IE_NAME = u'youtube:user'
- def report_download_page(self, username, start_index):
- """Report attempt to download user page."""
- self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' %
- (username, start_index, start_index + self._GDATA_PAGE_SIZE))
-
def _real_extract(self, url):
# Extract username
mobj = re.match(self._VALID_URL, url)
@@ -1848,15 +1813,10 @@ class YoutubeUserIE(InfoExtractor):
while True:
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
- self.report_download_page(username, start_index)
- request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index))
-
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
+ page = self._download_webpage(gdata_url, username,
+ u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
# Extract video identifiers
ids_in_page = []
@@ -1890,11 +1850,6 @@ class BlipTVUserIE(InfoExtractor):
_PAGE_SIZE = 12
IE_NAME = u'blip.tv:user'
- def report_download_page(self, username, pagenum):
- """Report attempt to download user page."""
- self.to_screen(u'user %s: Downloading video ids from page %d' %
- (username, pagenum))
-
def _real_extract(self, url):
# Extract username
mobj = re.match(self._VALID_URL, url)
@@ -1906,15 +1861,9 @@ class BlipTVUserIE(InfoExtractor):
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
- request = compat_urllib_request.Request(url)
-
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf-8')
- mobj = re.search(r'data-users-id="([^"]+)"', page)
- page_base = page_base % mobj.group(1)
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ page = self._download_webpage(url, username, u'Downloading user page')
+ mobj = re.search(r'data-users-id="([^"]+)"', page)
+ page_base = page_base % mobj.group(1)
# Download video ids using BlipTV Ajax calls. Result size per
@@ -1926,14 +1875,9 @@ class BlipTVUserIE(InfoExtractor):
pagenum = 1
while True:
- self.report_download_page(username, pagenum)
url = page_base + "&page=" + str(pagenum)
- request = compat_urllib_request.Request( url )
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % str(err))
- return
+ page = self._download_webpage(url, username,
+ u'Downloading video ids from page %d' % pagenum)
# Extract video identifiers
ids_in_page = []
@@ -2292,12 +2236,6 @@ class ComedyCentralIE(InfoExtractor):
"""Receives a URL and returns True if suitable for this IE."""
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
- def report_config_download(self, episode_id, media_id):
- self.to_screen(u'%s: Downloading configuration for %s' % (episode_id, media_id))
-
- def report_index_download(self, episode_id):
- self.to_screen(u'%s: Downloading show index' % episode_id)
-
def _print_formats(self, formats):
print('Available formats:')
for x in formats:
@@ -2331,24 +2269,15 @@ class ComedyCentralIE(InfoExtractor):
else:
epTitle = mobj.group('episode')
- req = compat_urllib_request.Request(url)
self.report_extraction(epTitle)
- try:
- htmlHandle = compat_urllib_request.urlopen(req)
- html = htmlHandle.read()
- webpage = html.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.report_error(u'Invalid redirected URL: ' + url)
- return
+ raise ExtractorError(u'Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
- self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
- return
+ raise ExtractorError(u'Redirected URL is still not specific: ' + url)
epTitle = mobj.group('episode')
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
@@ -2360,19 +2289,15 @@ class ComedyCentralIE(InfoExtractor):
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
- self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
- return
+ raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
- self.report_index_download(epTitle)
- try:
- indexXml = compat_urllib_request.urlopen(indexUrl).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
- return
+ indexXml = self._download_webpage(indexUrl, epTitle,
+ u'Downloading show index',
+ u'unable to download episode index')
results = []
@@ -2383,17 +2308,12 @@ class ComedyCentralIE(InfoExtractor):
shortMediaId = mediaId.split(':')[-1]
showId = mediaId.split(':')[-2].replace('.com', '')
officialTitle = itemEl.findall('./title')[0].text
- officialDate = itemEl.findall('./pubDate')[0].text
+ officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
compat_urllib_parse.urlencode({'uri': mediaId}))
- configReq = compat_urllib_request.Request(configUrl)
- self.report_config_download(epTitle, shortMediaId)
- try:
- configXml = compat_urllib_request.urlopen(configReq).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ configXml = self._download_webpage(configUrl, epTitle,
+ u'Downloading configuration for %s' % shortMediaId)
cdoc = xml.etree.ElementTree.fromstring(configXml)
turls = []
@@ -2450,9 +2370,6 @@ class EscapistIE(InfoExtractor):
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
IE_NAME = u'escapist'
- def report_config_download(self, showName):
- self.to_screen(u'%s: Downloading configuration' % showName)
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
@@ -2462,14 +2379,7 @@ class EscapistIE(InfoExtractor):
videoId = mobj.group('episode')
self.report_extraction(showName)
- try:
- webPage = compat_urllib_request.urlopen(url)
- webPageBytes = webPage.read()
- m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
- webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
- return
+ webPage = self._download_webpage(url, showName)
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
description = unescapeHTML(descMatch.group(1))
@@ -2480,14 +2390,9 @@ class EscapistIE(InfoExtractor):
configUrlMatch = re.search('config=(.*)$', playerUrl)
configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1))
- self.report_config_download(showName)
- try:
- configJSON = compat_urllib_request.urlopen(configUrl)
- m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
- configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
- return
+ configJSON = self._download_webpage(configUrl, showName,
+ u'Downloading configuration',
+ u'unable to download configuration')
# Technically, it's JavaScript, not JSON
configJSON = configJSON.replace("'", '"')
@@ -2665,40 +2570,32 @@ class SoundcloudIE(InfoExtractor):
# extract simple title (uploader + slug of song title)
slug_title = mobj.group(2)
simple_title = uploader + u'-' + slug_title
+ full_title = '%s/%s' % (uploader, slug_title)
- self.report_resolve('%s/%s' % (uploader, slug_title))
+ self.report_resolve(full_title)
url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
- request = compat_urllib_request.Request(resolv_url)
- try:
- info_json_bytes = compat_urllib_request.urlopen(request).read()
- info_json = info_json_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
- return
+ info_json = self._download_webpage(resolv_url, full_title, u'Downloading info JSON')
info = json.loads(info_json)
video_id = info['id']
- self.report_extraction('%s/%s' % (uploader, slug_title))
+ self.report_extraction(full_title)
streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
- request = compat_urllib_request.Request(streams_url)
- try:
- stream_json_bytes = compat_urllib_request.urlopen(request).read()
- stream_json = stream_json_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
- return
+ stream_json = self._download_webpage(streams_url, full_title,
+ u'Downloading stream definitions',
+ u'unable to download stream definitions')
streams = json.loads(stream_json)
mediaURL = streams['http_mp3_128_url']
+ upload_date = unified_strdate(info['created_at'])
return [{
'id': info['id'],
'url': mediaURL,
'uploader': info['user']['username'],
- 'upload_date': info['created_at'],
+ 'upload_date': upload_date,
'title': info['title'],
'ext': u'mp3',
'description': info['description'],
@@ -2714,7 +2611,7 @@ class SoundcloudSetIE(InfoExtractor):
"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)'
- IE_NAME = u'soundcloud'
+ IE_NAME = u'soundcloud:set'
def report_resolve(self, video_id):
"""Report information extraction."""
@@ -2731,18 +2628,13 @@ class SoundcloudSetIE(InfoExtractor):
# extract simple title (uploader + slug of song title)
slug_title = mobj.group(2)
simple_title = uploader + u'-' + slug_title
+ full_title = '%s/sets/%s' % (uploader, slug_title)
- self.report_resolve('%s/sets/%s' % (uploader, slug_title))
+ self.report_resolve(full_title)
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
- request = compat_urllib_request.Request(resolv_url)
- try:
- info_json_bytes = compat_urllib_request.urlopen(request).read()
- info_json = info_json_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
- return
+ info_json = self._download_webpage(resolv_url, full_title)
videos = []
info = json.loads(info_json)
@@ -2751,19 +2643,14 @@ class SoundcloudSetIE(InfoExtractor):
self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err['error_message']))
return
+ self.report_extraction(full_title)
for track in info['tracks']:
video_id = track['id']
- self.report_extraction('%s/sets/%s' % (uploader, slug_title))
streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
- request = compat_urllib_request.Request(streams_url)
- try:
- stream_json_bytes = compat_urllib_request.urlopen(request).read()
- stream_json = stream_json_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
- return
+ stream_json = self._download_webpage(streams_url, video_id, u'Downloading track info JSON')
+ self.report_extraction(video_id)
streams = json.loads(stream_json)
mediaURL = streams['http_mp3_128_url']
@@ -2771,7 +2658,7 @@ class SoundcloudSetIE(InfoExtractor):
'id': video_id,
'url': mediaURL,
'uploader': track['user']['username'],
- 'upload_date': track['created_at'],
+ 'upload_date': unified_strdate(track['created_at']),
'title': track['title'],
'ext': u'mp3',
'description': track['description'],
@@ -3158,18 +3045,11 @@ class YoukuIE(InfoExtractor):
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
- request = compat_urllib_request.Request(info_url, None, std_headers)
- try:
- self.report_download_webpage(video_id)
- jsondata = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ jsondata = self._download_webpage(info_url, video_id)
self.report_extraction(video_id)
try:
- jsonstr = jsondata.decode('utf-8')
- config = json.loads(jsonstr)
+ config = json.loads(jsondata)
video_title = config['data'][0]['title']
seed = config['data'][0]['seed']
@@ -3237,15 +3117,8 @@ class XNXXIE(InfoExtractor):
return
video_id = mobj.group(1)
- self.report_download_webpage(video_id)
-
# Get webpage content
- try:
- webpage_bytes = compat_urllib_request.urlopen(url).read()
- webpage = webpage_bytes.decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video webpage: %s' % err)
- return
+ webpage = self._download_webpage(url, video_id)
result = re.search(self.VIDEO_URL_RE, webpage)
if result is None:
@@ -3317,12 +3190,7 @@ class GooglePlusIE(InfoExtractor):
# Step 1, Retrieve post webpage to extract further information
self.report_extract_entry(post_url)
- request = compat_urllib_request.Request(post_url)
- try:
- webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
# Extract update date
upload_date = None
@@ -3359,12 +3227,7 @@ class GooglePlusIE(InfoExtractor):
self._downloader.report_error(u'unable to extract video page URL')
video_page = mobj.group(1)
- request = compat_urllib_request.Request(video_page)
- try:
- webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
self.report_extract_vid_page(video_page)
@@ -3441,7 +3304,13 @@ class JustinTVIE(InfoExtractor):
# starts at 1 and increases. Can we treat all parts as one video?
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
- ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$"""
+ (?:
+ (?P<channelid>[^/]+)|
+ (?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
+ (?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
+ )
+ /?(?:\#.*)?$
+ """
_JUSTIN_PAGE_LIMIT = 100
IE_NAME = u'justin.tv'
@@ -3451,20 +3320,15 @@ class JustinTVIE(InfoExtractor):
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
# Return count of items, list of *valid* items
- def _parse_page(self, url):
- try:
- urlh = compat_urllib_request.urlopen(url)
- webpage_bytes = urlh.read()
- webpage = webpage_bytes.decode('utf-8', 'ignore')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
- return
+ def _parse_page(self, url, video_id):
+ webpage = self._download_webpage(url, video_id,
+ u'Downloading video info JSON',
+ u'unable to download video info JSON')
response = json.loads(webpage)
if type(response) != list:
error_text = response.get('error', 'unknown error')
- self._downloader.report_error(u'Justin.tv API: %s' % error_text)
- return
+ raise ExtractorError(u'Justin.tv API: %s' % error_text)
info = []
for clip in response:
video_url = clip['video_file_url']
@@ -3488,18 +3352,67 @@ class JustinTVIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'invalid URL: %s' % url)
- api = 'http://api.justin.tv'
- video_id = mobj.group(mobj.lastindex)
+ api_base = 'http://api.justin.tv'
paged = False
- if mobj.lastindex == 1:
+ if mobj.group('channelid'):
paged = True
- api += '/channel/archives/%s.json'
+ video_id = mobj.group('channelid')
+ api = api_base + '/channel/archives/%s.json' % video_id
+ elif mobj.group('chapterid'):
+ chapter_id = mobj.group('chapterid')
+
+ webpage = self._download_webpage(url, chapter_id)
+ m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find archive of a chapter')
+ archive_id = m.group(1)
+
+ api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
+ chapter_info_xml = self._download_webpage(api, chapter_id,
+ note=u'Downloading chapter information',
+ errnote=u'Chapter information download failed')
+ doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
+ for a in doc.findall('.//archive'):
+ if archive_id == a.find('./id').text:
+ break
+ else:
+ raise ExtractorError(u'Could not find chapter in chapter information')
+
+ video_url = a.find('./video_file_url').text
+ video_ext = video_url.rpartition('.')[2] or u'flv'
+
+ chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
+ chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
+ note='Downloading chapter metadata',
+ errnote='Download of chapter metadata failed')
+ chapter_info = json.loads(chapter_info_json)
+
+ bracket_start = int(doc.find('.//bracket_start').text)
+ bracket_end = int(doc.find('.//bracket_end').text)
+
+ # TODO determine start (and probably fix up file)
+ # youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
+ #video_url += u'?start=' + TODO:start_timestamp
+ # bracket_start is 13290, but we want 51670615
+ self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
+ u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
+
+ info = {
+ 'id': u'c' + chapter_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': chapter_info['title'],
+ 'thumbnail': chapter_info['preview'],
+ 'description': chapter_info['description'],
+ 'uploader': chapter_info['channel']['display_name'],
+ 'uploader_id': chapter_info['channel']['name'],
+ }
+ return [info]
else:
- api += '/broadcast/by_archive/%s.json'
- api = api % (video_id,)
+ video_id = mobj.group('videoid')
+ api = api_base + '/broadcast/by_archive/%s.json' % video_id
self.report_extraction(video_id)
@@ -3510,7 +3423,7 @@ class JustinTVIE(InfoExtractor):
if paged:
self.report_download_page(video_id, offset)
page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
- page_count, page_info = self._parse_page(page_url)
+ page_count, page_info = self._parse_page(page_url, video_id)
info.extend(page_info)
if not paged or page_count != limit:
break
@@ -3523,8 +3436,7 @@ class FunnyOrDieIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.report_error(u'invalid URL: %s' % url)
- return
+ raise ExtractorError(u'invalid URL: %s' % url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
@@ -3557,7 +3469,8 @@ class FunnyOrDieIE(InfoExtractor):
return [info]
class SteamIE(InfoExtractor):
- _VALID_URL = r"""http://store.steampowered.com/
+ _VALID_URL = r"""http://store\.steampowered\.com/
+ (agecheck/)?
(?P<urltype>video|app)/ #If the page is only for videos or for a game
(?P<gameID>\d+)/?
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
@@ -3623,42 +3536,35 @@ class UstreamIE(InfoExtractor):
return [info]
class WorldStarHipHopIE(InfoExtractor):
- _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
+ _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
IE_NAME = u'WorldStarHipHop'
def _real_extract(self, url):
- _src_url = r"""(http://hw-videos.*(?:mp4|flv))"""
-
- webpage_src = compat_urllib_request.urlopen(url).read()
- webpage_src = webpage_src.decode('utf-8')
-
- mobj = re.search(_src_url, webpage_src)
+ _src_url = r'so\.addVariable\("file","(.*?)"\)'
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
+ webpage_src = self._download_webpage(url, video_id)
+
+ mobj = re.search(_src_url, webpage_src)
+
if mobj is not None:
- video_url = mobj.group()
+ video_url = mobj.group(1)
if 'mp4' in video_url:
ext = 'mp4'
else:
ext = 'flv'
else:
- self._downloader.report_error(u'Cannot find video url for %s' % video_id)
- return
+ raise ExtractorError(u'Cannot find video url for %s' % video_id)
- _title = r"""<title>(.*)</title>"""
+ mobj = re.search(r"<title>(.*)</title>", webpage_src)
- mobj = re.search(_title, webpage_src)
-
- if mobj is not None:
- title = mobj.group(1)
- else:
- title = 'World Start Hip Hop - %s' % time.ctime()
-
- _thumbnail = r"""rel="image_src" href="(.*)" />"""
- mobj = re.search(_thumbnail, webpage_src)
+ if mobj is None:
+ raise ExtractorError(u'Cannot determine title')
+ title = mobj.group(1)
+ mobj = re.search(r'rel="image_src" href="(.*)" />', webpage_src)
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
if mobj is not None:
thumbnail = mobj.group(1)
@@ -3756,7 +3662,7 @@ class YouPornIE(InfoExtractor):
self._downloader.report_warning(u'unable to extract video date')
upload_date = None
else:
- upload_date = result.group('date').strip()
+ upload_date = unified_strdate(result.group('date').strip())
# Get the video uploader
result = re.search(r'Submitted:</label>(?P<uploader>.*)</li>', webpage)
@@ -3863,7 +3769,7 @@ class PornotubeIE(InfoExtractor):
if result is None:
self._downloader.report_error(u'unable to extract video title')
return
- upload_date = result.group('date')
+ upload_date = unified_strdate(result.group('date'))
info = {'id': video_id,
'url': video_url,
@@ -3990,12 +3896,13 @@ class KeekIE(InfoExtractor):
return [info]
class TEDIE(InfoExtractor):
- _VALID_URL=r'''http://www.ted.com/
+ _VALID_URL=r'''http://www\.ted\.com/
(
((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
|
((?P<type_talk>talks)) # We have a simple talk
)
+ (/lang/(.*?))? # The url may contain the language
/(?P<name>\w+) # Here goes the name and then ".html"
'''
@@ -4246,7 +4153,7 @@ class ARDIE(InfoExtractor):
return [info]
class TumblrIE(InfoExtractor):
- _VALID_URL = r'http://(?P<blog_name>.*?).tumblr.com/((post)|(video))/(?P<id>\d*)/(.*?)'
+ _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
@@ -4256,7 +4163,7 @@ class TumblrIE(InfoExtractor):
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage = self._download_webpage(url, video_id)
- re_video = r'src=\\x22(?P<video_url>http://%s.tumblr.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
+ re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
video = re.search(re_video, webpage)
if video is None:
self.to_screen("No video founded")
@@ -4269,8 +4176,8 @@ class TumblrIE(InfoExtractor):
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
- re_title = r'<title>(.*?) - (?P<title>.*?)</title>'
- title = unescapeHTML(re.search(re_title, webpage).group('title'))
+ re_title = r'<title>(?P<title>.*?)</title>'
+ title = unescapeHTML(re.search(re_title, webpage, re.DOTALL).group('title'))
return [{'id': video_id,
'url': video_url,
@@ -4279,6 +4186,83 @@ class TumblrIE(InfoExtractor):
'ext': ext
}]
+class BandcampIE(InfoExtractor):
+ _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ title = mobj.group('title')
+ webpage = self._download_webpage(url, title)
+ # We get the link to the free download page
+ m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
+ if m_download is None:
+ self._downloader.report_error('No free songs founded')
+ return
+ download_link = m_download.group(1)
+ id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
+ webpage, re.MULTILINE|re.DOTALL).group('id')
+
+ download_webpage = self._download_webpage(download_link, id,
+ 'Downloading free downloads page')
+ # We get the dictionary of the track from some javascrip code
+ info = re.search(r'items: (.*?),$',
+ download_webpage, re.MULTILINE).group(1)
+ info = json.loads(info)[0]
+ # We pick mp3-320 for now, until format selection can be easily implemented.
+ mp3_info = info[u'downloads'][u'mp3-320']
+ # If we try to use this url it says the link has expired
+ initial_url = mp3_info[u'url']
+ re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
+ m_url = re.match(re_url, initial_url)
+ #We build the url we will use to get the final track url
+ # This url is build in Bandcamp in the script download_bunde_*.js
+ request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
+ final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
+ # If we could correctly generate the .rand field the url would be
+ #in the "download_url" key
+ final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
+
+ track_info = {'id':id,
+ 'title' : info[u'title'],
+ 'ext' : 'mp3',
+ 'url' : final_url,
+ 'thumbnail' : info[u'thumb_url'],
+ 'uploader' : info[u'artist']
+ }
+
+ return [track_info]
+
+class RedTubeIE(InfoExtractor):
+ """Information Extractor for redtube"""
+ _VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+
+ video_id = mobj.group('id')
+ video_extension = 'mp4'
+ webpage = self._download_webpage(url, video_id)
+ self.report_extraction(video_id)
+ mobj = re.search(r'<source src="'+'(.+)'+'" type="video/mp4">',webpage)
+
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract media URL')
+
+ video_url = mobj.group(1)
+ mobj = re.search('<h1 class="videoTitle slidePanelMovable">(.+)</h1>',webpage)
+ if mobj is None:
+ raise ExtractorError(u'Unable to extract title')
+ video_title = mobj.group(1)
+
+ return [{
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_extension,
+ 'title': video_title,
+ }]
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
@@ -4334,6 +4318,8 @@ def gen_extractors():
LiveLeakIE(),
ARDIE(),
TumblrIE(),
+ BandcampIE(),
+ RedTubeIE(),
GenericIE()
]
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 74375175d..05cb6e36a 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -25,10 +25,12 @@ __authors__ = (
'Jeff Crouse',
'Osama Khalid',
'Michael Walter',
+ 'M. Yasoob Ullah Khalid',
)
__license__ = 'Public Domain'
+import codecs
import getpass
import optparse
import os
@@ -140,9 +142,14 @@ def parseOpts(overrideArguments=None):
help='display the current browser identification', default=False)
general.add_option('--user-agent',
dest='user_agent', help='specify a custom user agent', metavar='UA')
+ general.add_option('--referer',
+ dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
+ metavar='REF', default=None)
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
+ general.add_option('--proxy', dest='proxy', default=None, help='Use the specified HTTP/HTTPS proxy', metavar='URL')
+ general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
selection.add_option('--playlist-start',
@@ -154,6 +161,9 @@ def parseOpts(overrideArguments=None):
selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None)
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
+ selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
+ selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
+ selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
authentication.add_option('-u', '--username',
@@ -230,9 +240,9 @@ def parseOpts(overrideArguments=None):
help='print downloaded pages to debug problems(very verbose)')
filesystem.add_option('-t', '--title',
- action='store_true', dest='usetitle', help='use title in file name', default=False)
+ action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
filesystem.add_option('--id',
- action='store_true', dest='useid', help='use video ID in file name', default=False)
+ action='store_true', dest='useid', help='use only video ID in file name', default=False)
filesystem.add_option('-l', '--literal',
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
filesystem.add_option('-A', '--auto-number',
@@ -277,6 +287,9 @@ def parseOpts(overrideArguments=None):
filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False)
+ filesystem.add_option('--write-thumbnail',
+ action='store_true', dest='writethumbnail',
+ help='write thumbnail image to disk', default=False)
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
@@ -324,6 +337,11 @@ def parseOpts(overrideArguments=None):
return parser, opts, args
def _real_main(argv=None):
+ # Compatibility fixes for Windows
+ if sys.platform == 'win32':
+ # https://github.com/rg3/youtube-dl/issues/820
+ codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
+
parser, opts, args = parseOpts(argv)
# Open appropriate CookieJar
@@ -342,6 +360,10 @@ def _real_main(argv=None):
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
+
+ # Set referer
+ if opts.referer is not None:
+ std_headers['Referer'] = opts.referer
# Dump user agent
if opts.dump_user_agent:
@@ -366,8 +388,16 @@ def _real_main(argv=None):
# General configuration
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
- proxy_handler = compat_urllib_request.ProxyHandler()
- opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
+ if opts.proxy:
+ proxies = {'http': opts.proxy, 'https': opts.proxy}
+ else:
+ proxies = compat_urllib_request.getproxies()
+ # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
+ if 'http' in proxies and 'https' not in proxies:
+ proxies['https'] = proxies['http']
+ proxy_handler = compat_urllib_request.ProxyHandler(proxies)
+ https_handler = make_HTTPS_handler(opts)
+ opener = compat_urllib_request.build_opener(https_handler, proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
@@ -440,6 +470,10 @@ def _real_main(argv=None):
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg']:
parser.error(u'invalid video recode format specified')
+ if opts.date is not None:
+ date = DateRange.day(opts.date)
+ else:
+ date = DateRange(opts.dateafter, opts.datebefore)
if sys.version_info < (3,):
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
@@ -452,7 +486,7 @@ def _real_main(argv=None):
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
- or u'%(id)s.%(ext)s')
+ or u'%(title)s-%(id)s.%(ext)s')
# File downloader
fd = FileDownloader({
@@ -491,6 +525,7 @@ def _real_main(argv=None):
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeinfojson': opts.writeinfojson,
+ 'writethumbnail': opts.writethumbnail,
'writesubtitles': opts.writesubtitles,
'onlysubtitles': opts.onlysubtitles,
'allsubtitles': opts.allsubtitles,
@@ -506,7 +541,8 @@ def _real_main(argv=None):
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
- 'max_filesize': opts.max_filesize
+ 'max_filesize': opts.max_filesize,
+ 'daterange': date,
})
if opts.verbose:
diff --git a/youtube_dl/update.py b/youtube_dl/update.py
index d6e293875..eab8417a5 100644
--- a/youtube_dl/update.py
+++ b/youtube_dl/update.py
@@ -78,7 +78,7 @@ def update_self(to_screen, verbose, filename):
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
version = versions_info['versions'][versions_info['latest']]
- print_notes(versions_info['versions'])
+ print_notes(to_screen, versions_info['versions'])
if not os.access(filename, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % filename)
@@ -157,11 +157,15 @@ del "%s"
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
-def print_notes(versions, fromVersion=__version__):
+def get_notes(versions, fromVersion):
notes = []
for v,vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
+ return notes
+
+def print_notes(to_screen, versions, fromVersion=__version__):
+ notes = get_notes(versions, fromVersion)
if notes:
to_screen(u'PLEASE NOTE:')
for note in notes:
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 017f06c42..f2342b10a 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -12,6 +12,7 @@ import traceback
import zlib
import email.utils
import json
+import datetime
try:
import urllib.request as compat_urllib_request
@@ -429,6 +430,28 @@ def decodeOption(optval):
assert isinstance(optval, compat_str)
return optval
+def formatSeconds(secs):
+ if secs > 3600:
+ return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
+ elif secs > 60:
+ return '%d:%02d' % (secs // 60, secs % 60)
+ else:
+ return '%d' % secs
+
+def make_HTTPS_handler(opts):
+ if sys.version_info < (3,2):
+ # Python's 2.x handler is very simplistic
+ return compat_urllib_request.HTTPSHandler()
+ else:
+ import ssl
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ context.set_default_verify_paths()
+
+ context.verify_mode = (ssl.CERT_NONE
+ if opts.no_check_certificate
+ else ssl.CERT_REQUIRED)
+ return compat_urllib_request.HTTPSHandler(context=context)
+
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None):
@@ -568,3 +591,70 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
https_request = http_request
https_response = http_response
+
+def unified_strdate(date_str):
+ """Return a string with the date in the format YYYYMMDD"""
+ upload_date = None
+ #Replace commas
+ date_str = date_str.replace(',',' ')
+ # %z (UTC offset) is only supported in python>=3.2
+ date_str = re.sub(r' (\+|-)[\d]*$', '', date_str)
+ format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S']
+ for expression in format_expressions:
+ try:
+ upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
+ except:
+ pass
+ return upload_date
+
+def date_from_str(date_str):
+ """
+ Return a datetime object from a string in the format YYYYMMDD or
+ (now|today)[+-][0-9](day|week|month|year)(s)?"""
+ today = datetime.date.today()
+ if date_str == 'now'or date_str == 'today':
+ return today
+ match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
+ if match is not None:
+ sign = match.group('sign')
+ time = int(match.group('time'))
+ if sign == '-':
+ time = -time
+ unit = match.group('unit')
+ #A bad aproximation?
+ if unit == 'month':
+ unit = 'day'
+ time *= 30
+ elif unit == 'year':
+ unit = 'day'
+ time *= 365
+ unit += 's'
+ delta = datetime.timedelta(**{unit: time})
+ return today + delta
+ return datetime.datetime.strptime(date_str, "%Y%m%d").date()
+
+class DateRange(object):
+ """Represents a time interval between two dates"""
+ def __init__(self, start=None, end=None):
+ """start and end must be strings in the format accepted by date"""
+ if start is not None:
+ self.start = date_from_str(start)
+ else:
+ self.start = datetime.datetime.min.date()
+ if end is not None:
+ self.end = date_from_str(end)
+ else:
+ self.end = datetime.datetime.max.date()
+ if self.start > self.end:
+ raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
+ @classmethod
+ def day(cls, day):
+ """Returns a range that only contains the given day"""
+ return cls(day,day)
+ def __contains__(self, date):
+ """Check if the date is in the range"""
+ if not isinstance(date, datetime.date):
+ date = date_from_str(date)
+ return self.start <= date <= self.end
+ def __str__(self):
+ return '%s - %s' % ( self.start.isoformat(), self.end.isoformat())
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index ac8a05ab5..f7227d0ad 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
-__version__ = '2013.04.22'
+__version__ = '2013.05.05'