diff options
| author | Philipp Hagemeister <phihag@phihag.de> | 2015-01-07 11:21:40 +0100 | 
|---|---|---|
| committer | Philipp Hagemeister <phihag@phihag.de> | 2015-01-08 16:14:49 +0100 | 
| commit | 99673f04bc5b179241d121cf2aa50f25b6b1c7d0 (patch) | |
| tree | 5cea7ad8a6f320349e02bf3d0bc12724b9fa3f81 | |
| parent | e9a537774d8671fd476fd5930bfe3c6f1b17c307 (diff) | |
[washingtonpost] Modernize and correct test case
| -rw-r--r-- | youtube_dl/extractor/washingtonpost.py | 11 | 
1 files changed, 5 insertions, 6 deletions
diff --git a/youtube_dl/extractor/washingtonpost.py b/youtube_dl/extractor/washingtonpost.py index 88bbbb219..c17bebd6e 100644 --- a/youtube_dl/extractor/washingtonpost.py +++ b/youtube_dl/extractor/washingtonpost.py @@ -10,14 +10,14 @@ from ..utils import (  class WashingtonPostIE(InfoExtractor): -    _VALID_URL = r'^https?://(?:www\.)?washingtonpost\.com/.*?/(?P<id>[^/]+)/(?:$|[?#])' +    _VALID_URL = r'https?://(?:www\.)?washingtonpost\.com/.*?/(?P<id>[^/]+)/(?:$|[?#])'      _TEST = {          'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/',          'info_dict': {              'title': 'Sinkhole of bureaucracy',          },          'playlist': [{ -            'md5': 'c3f4b4922ffa259243f68e928db2db8c', +            'md5': '79132cc09ec5309fa590ae46e4cc31bc',              'info_dict': {                  'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',                  'ext': 'mp4', @@ -29,7 +29,7 @@ class WashingtonPostIE(InfoExtractor):                  'upload_date': '20140322',              },          }, { -            'md5': 'f645a07652c2950cd9134bb852c5f5eb', +            'md5': 'e1d5734c06865cc504ad99dc2de0d443',              'info_dict': {                  'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',                  'ext': 'mp4', @@ -44,10 +44,9 @@ class WashingtonPostIE(InfoExtractor):      }      def _real_extract(self, url): -        mobj = re.match(self._VALID_URL, url) -        page_id = mobj.group('id') - +        page_id = self._match_id(url)          webpage = self._download_webpage(url, page_id) +          title = self._og_search_title(webpage)          uuids = re.findall(r'data-video-uuid="([^"]+)"', webpage)          entries = []  | 
