aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor/lrt.py
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl/extractor/lrt.py')
-rw-r--r--youtube_dl/extractor/lrt.py93
1 files changed, 52 insertions, 41 deletions
diff --git a/youtube_dl/extractor/lrt.py b/youtube_dl/extractor/lrt.py
index 1072405b3..89d549858 100644
--- a/youtube_dl/extractor/lrt.py
+++ b/youtube_dl/extractor/lrt.py
@@ -1,64 +1,75 @@
# coding: utf-8
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
- int_or_none,
- parse_duration,
- remove_end,
+ clean_html,
+ merge_dicts,
)
class LRTIE(InfoExtractor):
IE_NAME = 'lrt.lt'
- _VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
- _TEST = {
- 'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
+ _VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
+ _TESTS = [{
+ # m3u8 download
+ 'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
+ 'md5': '85cb2bb530f31d91a9c65b479516ade4',
'info_dict': {
- 'id': '54391',
+ 'id': '2000127261',
'ext': 'mp4',
- 'title': 'Septynios Kauno dienos',
- 'description': 'md5:24d84534c7dc76581e59f5689462411a',
- 'duration': 1783,
+ 'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
+ 'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
+ 'duration': 3035,
+ 'timestamp': 1604079000,
+ 'upload_date': '20201030',
+ },
+ }, {
+ # direct mp3 download
+ 'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/',
+ 'md5': '389da8ca3cad0f51d12bed0c844f6a0a',
+ 'info_dict': {
+ 'id': '1013074524',
+ 'ext': 'mp3',
+ 'title': 'Kita tema 2016-09-05 15:05',
+ 'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
+ 'duration': 3008,
'view_count': int,
'like_count': int,
},
- 'params': {
- 'skip_download': True, # m3u8 download
- },
- }
+ }]
+
+ def _extract_js_var(self, webpage, var_name, default):
+ return self._search_regex(
+ r'%s\s*=\s*(["\'])((?:(?!\1).)+)\1' % var_name,
+ webpage, var_name.replace('_', ' '), default, group=2)
def _real_extract(self, url):
- video_id = self._match_id(url)
+ path, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id)
- title = remove_end(self._og_search_title(webpage), ' - LRT')
- m3u8_url = self._search_regex(
- r'file\s*:\s*(["\'])(?P<url>.+?)\1\s*\+\s*location\.hash\.substring\(1\)',
- webpage, 'm3u8 url', group='url')
- formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
- self._sort_formats(formats)
+ media_url = self._extract_js_var(webpage, 'main_url', path)
+ media = self._download_json(self._extract_js_var(
+ webpage, 'media_info_url',
+ 'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
+ video_id, query={'url': media_url})
+ jw_data = self._parse_jwplayer_data(
+ media['playlist_item'], video_id, base_url=url)
- thumbnail = self._og_search_thumbnail(webpage)
- description = self._og_search_description(webpage)
- duration = parse_duration(self._search_regex(
- r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1',
- webpage, 'duration', default=None, group='duration'))
+ json_ld_data = self._search_json_ld(webpage, video_id)
- view_count = int_or_none(self._html_search_regex(
- r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>',
- webpage, 'view count', fatal=False, group='count'))
- like_count = int_or_none(self._search_regex(
- r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<',
- webpage, 'like count', fatal=False, group='count'))
+ tags = []
+ for tag in (media.get('tags') or []):
+ tag_name = tag.get('name')
+ if not tag_name:
+ continue
+ tags.append(tag_name)
- return {
- 'id': video_id,
- 'title': title,
- 'formats': formats,
- 'thumbnail': thumbnail,
- 'description': description,
- 'duration': duration,
- 'view_count': view_count,
- 'like_count': like_count,
+ clean_info = {
+ 'description': clean_html(media.get('content')),
+ 'tags': tags,
}
+
+ return merge_dicts(clean_info, jw_data, json_ld_data)