#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import base64 import datetime import itertools import netrc import os import re import socket import time import email.utils import xml.etree.ElementTree import random import math import operator import hashlib import binascii import urllib from .utils import * from .extractor.common import InfoExtractor, SearchInfoExtractor from .extractor.ard import ARDIE from .extractor.arte import ArteTvIE from .extractor.dailymotion import DailymotionIE from .extractor.metacafe import MetacafeIE from .extractor.statigram import StatigramIE from .extractor.photobucket import PhotobucketIE from .extractor.vimeo import VimeoIE from .extractor.yahoo import YahooIE from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeUserIE, YoutubeChannelIE from .extractor.zdf import ZDFIE class GenericIE(InfoExtractor): """Generic last-resort information extractor.""" _VALID_URL = r'.*' IE_NAME = u'generic' def report_download_webpage(self, video_id): """Report webpage download.""" if not self._downloader.params.get('test', False): self._downloader.report_warning(u'Falling back on generic information extractor.') super(GenericIE, self).report_download_webpage(video_id) def report_following_redirect(self, new_url): """Report information extraction.""" self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) def _test_redirect(self, url): """Check if it is a redirect, like url shorteners, in case return the new url.""" class HeadRequest(compat_urllib_request.Request): def get_method(self): return "HEAD" class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): """ Subclass the HTTPRedirectHandler to make it use our HeadRequest also on the redirected URL """ def redirect_request(self, req, fp, code, msg, headers, newurl): if code in (301, 302, 303, 307): newurl = newurl.replace(' ', '%20') newheaders = dict((k,v) for k,v in req.headers.items() if k.lower() not in ("content-length", "content-type")) return HeadRequest(newurl, headers=newheaders, origin_req_host=req.get_origin_req_host(), unverifiable=True) else: raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) class HTTPMethodFallback(compat_urllib_request.BaseHandler): """ Fallback to GET if HEAD is not allowed (405 HTTP error) """ def http_error_405(self, req, fp, code, msg, headers): fp.read() fp.close() newheaders = dict((k,v) for k,v in req.headers.items() if k.lower() not in ("content-length", "content-type")) return self.parent.open(compat_urllib_request.Request(req.get_full_url(), headers=newheaders, origin_req_host=req.get_origin_req_host(), unverifiable=True)) # Build our opener opener = compat_urllib_request.OpenerDirector() for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler, HTTPMethodFallback, HEADRedirectHandler, compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: opener.add_handler(handler()) response = opener.open(HeadRequest(url)) if response is None: raise ExtractorError(u'Invalid URL protocol') new_url = response.geturl() if url == new_url: return False self.report_following_redirect(new_url) return new_url def _real_extract(self, url): new_url = self._test_redirect(url) if new_url: return [self.url_result(new_url)] video_id = url.split('/')[-1] try: webpage = self._download_webpage(url, video_id) except ValueError as err: # since this is the last-resort InfoExtractor, if # this error is thrown, it'll be thrown here raise ExtractorError(u'Invalid URL: %s' % url) self.report_extraction(video_id) # Start with something easy: JW Player in SWFObject mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) if mobj is None: # Broaden the search a little bit mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) if mobj is None: # Broaden the search a little bit: JWPlayer JS loader mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage) if mobj is None: # Try to find twitter cards info mobj = re.search(r'(.*)', webpage, u'video title') # video uploader is domain name video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*', url, u'video uploader') return [{ 'id': video_id, 'url': video_url, 'uploader': video_uploader, 'upload_date': None, 'title': video_title, 'ext': video_extension, }] class YoutubeSearchIE(SearchInfoExtractor): """Information Extractor for YouTube search queries.""" _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' _MAX_RESULTS = 1000 IE_NAME = u'youtube:search' _SEARCH_KEY = 'ytsearch' def report_download_page(self, query, pagenum): """Report attempt to download search page with given number.""" self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) def _get_n_results(self, query, n): """Get a specified number of results for a query""" video_ids = [] pagenum = 0 limit = n while (50 * pagenum) < limit: self.report_download_page(query, pagenum+1) result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1) request = compat_urllib_request.Request(result_url) try: data = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: raise ExtractorError(u'Unable to download API page: %s' % compat_str(err)) api_response = json.loads(data)['data'] if not 'items' in api_response: raise ExtractorError(u'[youtube] No video results') new_ids = list(video['id'] for video in api_response['items']) video_ids += new_ids limit = min(n, api_response['totalItems']) pagenum += 1 if len(video_ids) > n: video_ids = video_ids[:n] videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids] return self.playlist_result(videos, query) class GoogleSearchIE(SearchInfoExtractor): """Information Extractor for Google Video search queries.""" _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"' _MAX_RESULTS = 1000 IE_NAME = u'video.google:search' _SEARCH_KEY = 'gvsearch' def _get_n_results(self, query, n): """Get a specified number of results for a query""" res = { '_type': 'playlist', 'id': query, 'entries': [] } for pagenum in itertools.count(1): result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10) webpage = self._download_webpage(result_url, u'gvsearch:' + query, note='Downloading result page ' + str(pagenum)) for mobj in re.finditer(r'