repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tagesschau.py
youtube_dl/extractor/tagesschau.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, js_to_json, parse_iso8601, parse_filesize, ) class TagesschauPlayerIE(InfoExtractor): IE_NAME = 'tagesschau:player' _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html' _TESTS = [{ 'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html', 'md5': '8d09548d5c15debad38bee3a4d15ca21', 'info_dict': { 'id': '179517', 'ext': 'mp4', 'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD', 'thumbnail': r're:^https?:.*\.jpg$', 'formats': 'mincount:6', }, }, { 'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html', 'md5': '76e6eec6ebd40740671cf0a2c88617e5', 'info_dict': { 'id': '29417', 'ext': 'mp3', 'title': 'Trabi - Bye, bye Rennpappe', 'thumbnail': r're:^https?:.*\.jpg$', 'formats': 'mincount:2', }, }, { 'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html', 'only_matching': True, }] _FORMATS = { 'xs': {'quality': 0}, 's': {'width': 320, 'height': 180, 'quality': 1}, 'm': {'width': 512, 'height': 288, 'quality': 2}, 'l': {'width': 960, 'height': 540, 'quality': 3}, 'xl': {'width': 1280, 'height': 720, 'quality': 4}, 'xxl': {'quality': 5}, } def _extract_via_api(self, kind, video_id): info = self._download_json( 'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id), video_id) title = info['headline'] formats = [] for media in info['mediadata']: for format_id, format_url in media.items(): if determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) else: formats.append({ 'url': format_url, 'format_id': format_id, 'vcodec': 'none' if kind == 'audio' else None, }) self._sort_formats(formats) timestamp = parse_iso8601(info.get('date')) return { 'id': video_id, 'title': title, 'timestamp': timestamp, 'formats': formats, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') # kind = mobj.group('kind').lower() # if kind == 'video': # return self._extract_via_api(kind, video_id) # JSON api does not provide some audio formats (e.g. ogg) thus # extracting audio via webpage webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage).strip() formats = [] for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage): media = self._parse_json(js_to_json(media_json), video_id, fatal=False) if not media: continue src = media.get('src') if not src: return quality = media.get('quality') kind = media.get('type', '').split('/')[0] ext = determine_ext(src) f = { 'url': src, 'format_id': '%s_%s' % (quality, ext) if quality else ext, 'ext': ext, 'vcodec': 'none' if kind == 'audio' else None, } f.update(self._FORMATS.get(quality, {})) formats.append(f) self._sort_formats(formats) thumbnail = self._og_search_thumbnail(webpage) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, } class TagesschauIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html' _TESTS = [{ 'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html', 'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6', 'info_dict': { 'id': 'video-102143', 'ext': 'mp4', 'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt', 'description': '18.07.2015 20:10 Uhr', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html', 'md5': '3c54c1f6243d279b706bde660ceec633', 'info_dict': { 'id': 'ts-5727', 'ext': 'mp4', 'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr', 'description': 'md5:695c01bfd98b7e313c501386327aea59', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { # exclusive audio 'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html', 'md5': '76e6eec6ebd40740671cf0a2c88617e5', 'info_dict': { 'id': 'audio-29417', 'ext': 'mp3', 'title': 'Trabi - Bye, bye Rennpappe', 'description': 'md5:8687dda862cbbe2cfb2df09b56341317', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { # audio in article 'url': 'http://www.tagesschau.de/inland/bnd-303.html', 'md5': 'e0916c623e85fc1d2b26b78f299d3958', 'info_dict': { 'id': 'bnd-303', 'ext': 'mp3', 'title': 'Viele Baustellen für neuen BND-Chef', 'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4', 'thumbnail': r're:^https?:.*\.jpg$', }, }, { 'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html', 'info_dict': { 'id': 'afd-parteitag-135', 'title': 'Möchtegern-Underdog mit Machtanspruch', }, 'playlist_count': 2, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html', 'only_matching': True, }, { 'url': 'http://www.tagesschau.de/100sekunden/index.html', 'only_matching': True, }, { # playlist article with collapsing sections 'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url) def _extract_formats(self, download_text, media_kind): links = re.finditer( r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>', download_text) formats = [] for l in links: link_url = l.group('url') if not link_url: continue format_id = self._search_regex( r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID', default=determine_ext(link_url)) format = { 'format_id': format_id, 'url': l.group('url'), 'format_name': l.group('name'), } title = l.group('title') if title: if media_kind.lower() == 'video': m = re.match( r'''(?x) Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10; (?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10; (?P<vbr>[0-9]+)kbps&\#10; Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10; Gr&ouml;&szlig;e:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''', title) if m: format.update({ 'format_note': m.group('audio_desc'), 'vcodec': m.group('vcodec'), 'width': int(m.group('width')), 'height': int(m.group('height')), 'abr': int(m.group('abr')), 'vbr': int(m.group('vbr')), 'filesize_approx': parse_filesize(m.group('filesize_approx')), }) else: m = re.match( r'(?P<format>.+?)-Format\s*:\s*(?P<abr>\d+)kbps\s*,\s*(?P<note>.+)', title) if m: format.update({ 'format_note': '%s, %s' % (m.group('format'), m.group('note')), 'vcodec': 'none', 'abr': int(m.group('abr')), }) formats.append(format) self._sort_formats(formats) return formats def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('path') display_id = video_id.lstrip('-') webpage = self._download_webpage(url, display_id) title = self._html_search_regex( r'<span[^>]*class="headline"[^>]*>(.+?)</span>', webpage, 'title', default=None) or self._og_search_title(webpage) DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>' webpage_type = self._og_search_property('type', webpage, default=None) if webpage_type == 'website': # Article entries = [] for num, (entry_title, media_kind, download_text) in enumerate(re.findall( r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX, webpage), 1): entries.append({ 'id': '%s-%d' % (display_id, num), 'title': '%s' % entry_title, 'formats': self._extract_formats(download_text, media_kind), }) if len(entries) > 1: return self.playlist_result(entries, display_id, title) formats = entries[0]['formats'] else: # Assume single video download_text = self._search_regex( DOWNLOAD_REGEX, webpage, 'download links', group='links') media_kind = self._search_regex( DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind') formats = self._extract_formats(download_text, media_kind) thumbnail = self._og_search_thumbnail(webpage) description = self._html_search_regex( r'(?s)<p class="teasertext">(.*?)</p>', webpage, 'description', default=None) self._sort_formats(formats) return { 'id': display_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, 'description': description, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tf1.py
youtube_dl/extractor/tf1.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, try_get, ) class TF1IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html' _TESTS = [{ 'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html', 'info_dict': { 'id': '13641379', 'ext': 'mp4', 'title': 'md5:f392bc52245dc5ad43771650c96fb620', 'description': 'md5:a02cdb217141fb2d469d6216339b052f', 'upload_date': '20190611', 'timestamp': 1560273989, 'duration': 1738, 'series': 'Quotidien avec Yann Barthès', 'tags': ['intégrale', 'quotidien', 'Replay'], }, 'params': { # Sometimes wat serves the whole file with the --test option 'skip_download': True, 'format': 'bestvideo', }, }, { 'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html', 'only_matching': True, }, { 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html', 'only_matching': True, }] def _real_extract(self, url): program_slug, slug = re.match(self._VALID_URL, url).groups() video = self._download_json( 'https://www.tf1.fr/graphql/web', slug, query={ 'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f', 'variables': json.dumps({ 'programSlug': program_slug, 'slug': slug, }) })['data']['videoBySlug'] wat_id = video['streamId'] tags = [] for tag in (video.get('tags') or []): label = tag.get('label') if not label: continue tags.append(label) decoration = video.get('decoration') or {} thumbnails = [] for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []): source_url = source.get('url') if not source_url: continue thumbnails.append({ 'url': source_url, 'width': int_or_none(source.get('width')), }) return { '_type': 'url_transparent', 'id': wat_id, 'url': 'wat:' + wat_id, 'title': video.get('title'), 'thumbnails': thumbnails, 'description': decoration.get('description'), 'timestamp': parse_iso8601(video.get('date')), 'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])), 'tags': tags, 'series': decoration.get('programLabel'), 'season_number': int_or_none(video.get('season')), 'episode_number': int_or_none(video.get('episode')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cbsnews.py
youtube_dl/extractor/cbsnews.py
# coding: utf-8 from __future__ import unicode_literals import re import zlib from .common import InfoExtractor from .cbs import CBSIE from ..compat import ( compat_b64decode, compat_urllib_parse_unquote, ) from ..utils import ( parse_duration, ) class CBSNewsEmbedIE(CBSIE): IE_NAME = 'cbsnews:embed' _VALID_URL = r'https?://(?:www\.)?cbsnews\.com/embed/video[^#]*#(?P<id>.+)' _TESTS = [{ 'url': 'https://www.cbsnews.com/embed/video/?v=1.c9b5b61492913d6660db0b2f03579ef25e86307a#1Vb7b9s2EP5XBAHbT6Gt98PAMKTJ0se6LVjWYWtdGBR1stlIpEBSTtwi%2F%2FvuJNkNhmHdGxgM2NL57vjd6zt%2B8PngdN%2Fyg79qeGvhzN%2FLGrS%2F%2BuBLB531V28%2B%2BO7Qg7%2Fy97r2z3xZ42NW8yLhDbA0S0KWlHnIijwKWJBHZZnHBa8Cgbpdf%2F89NM9Hi9fXifhpr8sr%2FlP848tn%2BTdXycX25zh4cdX%2FvHl6PmmPqnWQv9w8Ed%2B9GjYRim07bFEqdG%2BZVHuwTm65A7bVRrYtR5lAyMox7pigF6W4k%2By91mjspGsJ%2BwVae4%2BsvdnaO1p73HkXs%2FVisUDTGm7R8IcdnOROeq%2B19qT1amhA1VJtPenoTUgrtfKc9m7Rq8dP7nnjwOB7wg7ADdNt7VX64DWAWlKhPtmDEq22g4GF99x6Dk9E8OSsankHXqPNKDxC%2FdK7MLKTircTDgsI3mmj4OBdSq64dy7fd1x577RU1rt4cvMtOaulFYOd%2FLewRWvDO9lIgXFpZSnkZmjbv5SxKTPoQXClFbpsf%2Fhbbpzs0IB3vb8KkyzJQ%2BywOAgCrMpgRrz%2BKk4fvb7kFbR4XJCu0gAdtNO7woCwZTu%2BBUs9bam%2Fds71drVerpeisgrubLjAB4nnOSkWQnfr5W6o1ku5Xpr1MgrCbL0M0vUyDtfLLK15WiYp47xKWSLyjFVpwVmVJSLIoCjSOFkv3W7oKsVliwZJcB9nwXpZ5GEQQwY8jNKqKCBrgjTLeFxgdCIpazojDgnRtn43J6kG7nZ6cAbxh0EeFFk4%2B1u867cY5u4344n%2FxXjCqAjucdTHgLKojNKmSfO8KRsOFY%2FzKEYCKEJBzv90QA9nfm9gL%2BHulaFqUkz9ULUYxl62B3U%2FRVNLA8IhggaPycOoBuwOCESciDQVSSUgiOMsROB%2FhKfwCKOzEk%2B4k6rWd4uuT%2FwTDz7K7t3d3WLO8ISD95jSPQbayBacthbz86XVgxHwhex5zawzgDOmtp%2F3GPcXn0VXHdSS029%2Fj99UC%2FwJUvyKQ%2FzKyixIEVlYJOn4RxxuaH43Ty9fbJ5OObykHH435XAzJTHeOF4hhEUXD8URe%2FQ%2FBT%2BMpf8d5GN02Ox%2FfiGsl7TA7POu1xZ5%2BbTzcAVKMe48mqcC21hkacVEVScM26liVVBnrKkC4CLKyzAvHu0lhEaTKMFwI3a4SN9MsrfYzdBLq2vkwRD1gVviLT8kY9h2CHH6Y%2Bix6609weFtey4ESp60WtyeWMy%2BsmBuhsoKIyuoT%2Bq2R%2FrW5qi3g%2FvzS2j40DoixDP8%2BKP0yUdpXJ4l6Vla%2Bg9vce%2BC4yM5YlUcbA%2F0jLKdpmTwvsdN5z88nAIe08%2F0HgxeG1iv%2B6Hlhjh7uiW0SDzYNI92L401uha3JKYk268UVRzdOzNQvAaJqoXzAc80dAV440NZ1WVVAAMRYQ2KrGJFmDUsq8saWSnjvIj8t78y%2FRa3JRnbHVfyFpfwoDiGpPgjzekyUiKNlU3OMlwuLMmzgvEojllYVE2Z1HhImvsnk%2BuhusTEoB21PAtSFodeFK3iYhXEH9WOG2%2FkOE833sfeG%2Ff5cfHtEFNXgYes0%2FXj7aGivUgJ9XpusCtoNcNYVVnJVrrDo0OmJAutHCpuZul4W9lLcfy7BnuLPT02%2ByXsCTk%2B9zhzswIN04YueNSK%2BPtM0jS88QdLqSLJDTLsuGZJNolm2yO0PXh3UPnz9Ix5bfIAqxPjvETQsDCEiPG4QbqNyhBZISxybLnZYCrW5H3Axp690%2F0BJdXtDZ5ITuM4xj3f4oUHGzc5JeJmZKpp%2FjwKh4wMV%2FV1yx3emLoR0MwbG4K%2F%2BZgVep3PnzXGDHZ6a3i%2Fk%2BJrONDN13%2Bnq6tBTYk4o7cLGhBtqCC4KwacGHpEVuoH5JNro%2FE6JfE6d5RydbiR76k%2BW5wioDHBIjw1euhHjUGRB0y5A97KoaPx6MlL%2BwgboUVtUFRI%2FLemgTpdtF59ii7pab08kuPcfWzs0l%2FRI5takWnFpka0zOgWRtYcuf9aIxZMxlwr6IiGpsb6j2DQUXPl%2FimXI599Ev7fWjoPD78A', 'only_matching': True, }] def _real_extract(self, url): item = self._parse_json(zlib.decompress(compat_b64decode( compat_urllib_parse_unquote(self._match_id(url))), -zlib.MAX_WBITS).decode('utf-8'), None)['video']['items'][0] return self._extract_video_info(item['mpxRefId'], 'cbsnews') class CBSNewsIE(CBSIE): IE_NAME = 'cbsnews' IE_DESC = 'CBS News' _VALID_URL = r'https?://(?:www\.)?cbsnews\.com/(?:news|video)/(?P<id>[\da-z_-]+)' _TESTS = [ { # 60 minutes 'url': 'http://www.cbsnews.com/news/artificial-intelligence-positioned-to-be-a-game-changer/', 'info_dict': { 'id': 'Y_nf_aEg6WwO9OLAq0MpKaPgfnBUxfW4', 'ext': 'flv', 'title': 'Artificial Intelligence, real-life applications', 'description': 'md5:a7aaf27f1b4777244de8b0b442289304', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 317, 'uploader': 'CBSI-NEW', 'timestamp': 1476046464, 'upload_date': '20161009', }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'https://www.cbsnews.com/video/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/', 'info_dict': { 'id': 'SNJBOYzXiWBOvaLsdzwH8fmtP1SCd91Y', 'ext': 'mp4', 'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack', 'description': 'md5:4a6983e480542d8b333a947bfc64ddc7', 'upload_date': '20140404', 'timestamp': 1396650660, 'uploader': 'CBSI-NEW', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 205, 'subtitles': { 'en': [{ 'ext': 'ttml', }], }, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # 48 hours 'url': 'http://www.cbsnews.com/news/maria-ridulph-murder-will-the-nations-oldest-cold-case-to-go-to-trial-ever-get-solved/', 'info_dict': { 'title': 'Cold as Ice', 'description': 'Can a childhood memory solve the 1957 murder of 7-year-old Maria Ridulph?', }, 'playlist_mincount': 7, }, ] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) entries = [] for embed_url in re.findall(r'<iframe[^>]+data-src="(https?://(?:www\.)?cbsnews\.com/embed/video/[^#]*#[^"]+)"', webpage): entries.append(self.url_result(embed_url, CBSNewsEmbedIE.ie_key())) if entries: return self.playlist_result( entries, playlist_title=self._html_search_meta(['og:title', 'twitter:title'], webpage), playlist_description=self._html_search_meta(['og:description', 'twitter:description', 'description'], webpage)) item = self._parse_json(self._html_search_regex( r'CBSNEWS\.defaultPayload\s*=\s*({.+})', webpage, 'video JSON info'), display_id)['items'][0] return self._extract_video_info(item['mpxRefId'], 'cbsnews') class CBSNewsLiveVideoIE(InfoExtractor): IE_NAME = 'cbsnews:livevideo' IE_DESC = 'CBS News Live Videos' _VALID_URL = r'https?://(?:www\.)?cbsnews\.com/live/video/(?P<id>[^/?#]+)' # Live videos get deleted soon. See http://www.cbsnews.com/live/ for the latest examples _TEST = { 'url': 'http://www.cbsnews.com/live/video/clinton-sanders-prepare-to-face-off-in-nh/', 'info_dict': { 'id': 'clinton-sanders-prepare-to-face-off-in-nh', 'ext': 'mp4', 'title': 'Clinton, Sanders Prepare To Face Off In NH', 'duration': 334, }, 'skip': 'Video gone', } def _real_extract(self, url): display_id = self._match_id(url) video_info = self._download_json( 'http://feeds.cbsn.cbsnews.com/rundown/story', display_id, query={ 'device': 'desktop', 'dvr_slug': display_id, }) formats = self._extract_akamai_formats(video_info['url'], display_id) self._sort_formats(formats) return { 'id': display_id, 'display_id': display_id, 'title': video_info['headline'], 'thumbnail': video_info.get('thumbnail_url_hd') or video_info.get('thumbnail_url_sd'), 'duration': parse_duration(video_info.get('segmentDur')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lenta.py
youtube_dl/extractor/lenta.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class LentaIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/', 'info_dict': { 'id': '964400', 'ext': 'mp4', 'title': 'Надежду Савченко задержали', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 61, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { # EaglePlatform iframe embed 'url': 'http://lenta.ru/news/2015/03/06/navalny/', 'info_dict': { 'id': '227304', 'ext': 'mp4', 'title': 'Навальный вышел на свободу', 'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 87, 'view_count': int, 'age_limit': 0, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id', default=None) if video_id: return self.url_result( 'eagleplatform:lentaru.media.eagleplatform.com:%s' % video_id, ie='EaglePlatform', video_id=video_id) return self.url_result(url, ie='Generic')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tmz.py
youtube_dl/extractor/tmz.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .jwplatform import JWPlatformIE from .kaltura import KalturaIE from ..utils import ( int_or_none, unified_timestamp, ) class TMZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.tmz.com/videos/0-cegprt2p/', 'md5': '31f9223e20eef55954973359afa61a20', 'info_dict': { 'id': 'P6YjLBLk', 'ext': 'mp4', 'title': "No Charges Against Hillary Clinton? Harvey Says It Ain't Over Yet", 'description': 'md5:b714359fc18607715ebccbd2da8ff488', 'timestamp': 1467831837, 'upload_date': '20160706', }, 'add_ie': [JWPlatformIE.ie_key()], }, { 'url': 'http://www.tmz.com/videos/0_okj015ty/', 'only_matching': True, }, { 'url': 'https://www.tmz.com/videos/071119-chris-morgan-women-4590005-0-zcsejvcr/', 'only_matching': True, }, { 'url': 'https://www.tmz.com/videos/2021-02-19-021921-floyd-mayweather-1043872/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url).replace('-', '_') webpage = self._download_webpage(url, video_id, fatal=False) if webpage: tmz_video_id = self._search_regex( r'nodeRef\s*:\s*["\']tmz:video:([\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12})', webpage, 'video id', default=None) video = self._download_json( 'https://www.tmz.com/_/video/%s' % tmz_video_id, video_id, fatal=False) if video: message = video['message'] info = { '_type': 'url_transparent', 'title': message.get('title'), 'description': message.get('description'), 'timestamp': unified_timestamp(message.get('published_at')), 'duration': int_or_none(message.get('duration')), } jwplatform_id = message.get('jwplayer_media_id') if jwplatform_id: info.update({ 'url': 'jwplatform:%s' % jwplatform_id, 'ie_key': JWPlatformIE.ie_key(), }) else: kaltura_entry_id = message.get('kaltura_entry_id') or video_id kaltura_partner_id = message.get('kaltura_partner_id') or '591531' info.update({ 'url': 'kaltura:%s:%s' % (kaltura_partner_id, kaltura_entry_id), 'ie_key': KalturaIE.ie_key(), }) return info return self.url_result( 'kaltura:591531:%s' % video_id, KalturaIE.ie_key(), video_id) class TMZArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tmz\.com/\d{4}/\d{2}/\d{2}/(?P<id>[^/?#&]+)' _TEST = { 'url': 'http://www.tmz.com/2015/04/19/bobby-brown-bobbi-kristina-awake-video-concert', 'info_dict': { 'id': 'PAKZa97W', 'ext': 'mp4', 'title': 'Bobby Brown Tells Crowd ... Bobbi Kristina is Awake', 'description': 'Bobby Brown stunned his audience during a concert Saturday night, when he told the crowd, "Bobbi is awake. She\'s watching me."', 'timestamp': 1429466400, 'upload_date': '20150419', }, 'params': { 'skip_download': True, }, 'add_ie': [JWPlatformIE.ie_key()], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) tmz_url = self._search_regex( r'clickLink\s*\(\s*["\'](?P<url>%s)' % TMZIE._VALID_URL, webpage, 'video id', default=None, group='url') if tmz_url: return self.url_result(tmz_url, ie=TMZIE.ie_key()) embedded_video_info = self._parse_json(self._html_search_regex( r'tmzVideoEmbed\(({.+?})\);', webpage, 'embedded video info'), video_id) return self.url_result( 'http://www.tmz.com/videos/%s/' % embedded_video_info['id'], ie=TMZIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/uplynk.py
youtube_dl/extractor/uplynk.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( float_or_none, ExtractorError, ) class UplynkIE(InfoExtractor): IE_NAME = 'uplynk' _VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?' _TEST = { 'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8', 'info_dict': { 'id': 'e89eaf2ce9054aa89d92ddb2d817a52e', 'ext': 'mp4', 'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4', 'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa', }, 'params': { # m3u8 download 'skip_download': True, }, } def _extract_uplynk_info(self, uplynk_content_url): path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups() display_id = video_id or external_id formats = self._extract_m3u8_formats( 'http://content.uplynk.com/%s.m3u8' % path, display_id, 'mp4', 'm3u8_native') if session_id: for f in formats: f['extra_param_to_segment_url'] = 'pbs=' + session_id self._sort_formats(formats) asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id) if asset.get('error') == 1: raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True) return { 'id': asset['asset'], 'title': asset['desc'], 'thumbnail': asset.get('default_poster_url'), 'duration': float_or_none(asset.get('duration')), 'uploader_id': asset.get('owner'), 'formats': formats, } def _real_extract(self, url): return self._extract_uplynk_info(url) class UplynkPreplayIE(UplynkIE): IE_NAME = 'uplynk:preplay' _VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json' _TEST = None def _real_extract(self, url): path, external_id, video_id = re.match(self._VALID_URL, url).groups() display_id = video_id or external_id preplay = self._download_json(url, display_id) content_url = 'http://content.uplynk.com/%s.m3u8' % path session_id = preplay.get('sid') if session_id: content_url += '?pbs=' + session_id return self._extract_uplynk_info(content_url)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tv5unis.py
youtube_dl/extractor/tv5unis.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_age_limit, smuggle_url, try_get, ) class TV5UnisBaseIE(InfoExtractor): _GEO_COUNTRIES = ['CA'] def _real_extract(self, url): groups = re.match(self._VALID_URL, url).groups() product = self._download_json( 'https://api.tv5unis.ca/graphql', groups[0], query={ 'query': '''{ %s(%s) { collection { title } episodeNumber rating { name } seasonNumber tags title videoElement { ... on Video { mediaId } } } }''' % (self._GQL_QUERY_NAME, self._gql_args(groups)), })['data'][self._GQL_QUERY_NAME] media_id = product['videoElement']['mediaId'] return { '_type': 'url_transparent', 'id': media_id, 'title': product.get('title'), 'url': smuggle_url('limelight:media:' + media_id, {'geo_countries': self._GEO_COUNTRIES}), 'age_limit': parse_age_limit(try_get(product, lambda x: x['rating']['name'])), 'tags': product.get('tags'), 'series': try_get(product, lambda x: x['collection']['title']), 'season_number': int_or_none(product.get('seasonNumber')), 'episode_number': int_or_none(product.get('episodeNumber')), 'ie_key': 'LimelightMedia', } class TV5UnisVideoIE(TV5UnisBaseIE): IE_NAME = 'tv5unis:video' _VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://www.tv5unis.ca/videos/bande-annonces/71843', 'md5': '3d794164928bda97fb87a17e89923d9b', 'info_dict': { 'id': 'a883684aecb2486cad9bdc7bbe17f861', 'ext': 'mp4', 'title': 'Watatatow', 'duration': 10.01, } } _GQL_QUERY_NAME = 'productById' @staticmethod def _gql_args(groups): return 'id: %s' % groups class TV5UnisIE(TV5UnisBaseIE): IE_NAME = 'tv5unis' _VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/(?P<id>[^/]+)(?:/saisons/(?P<season_number>\d+)/episodes/(?P<episode_number>\d+))?/?(?:[?#&]|$)' _TESTS = [{ 'url': 'https://www.tv5unis.ca/videos/watatatow/saisons/6/episodes/1', 'md5': 'a479907d2e531a73e1f8dc48d6388d02', 'info_dict': { 'id': 'e5ee23a586c44612a56aad61accf16ef', 'ext': 'mp4', 'title': 'Je ne peux pas lui résister', 'description': "Atys, le nouveau concierge de l'école, a réussi à ébranler la confiance de Mado en affirmant qu\'une médaille, ce n'est que du métal. Comme Mado essaie de lui prouver que ses valeurs sont solides, il veut la mettre à l'épreuve...", 'subtitles': { 'fr': 'count:1', }, 'duration': 1370, 'age_limit': 8, 'tags': 'count:3', 'series': 'Watatatow', 'season_number': 6, 'episode_number': 1, }, }, { 'url': 'https://www.tv5unis.ca/videos/le-voyage-de-fanny', 'md5': '9ca80ebb575c681d10cae1adff3d4774', 'info_dict': { 'id': '726188eefe094d8faefb13381d42bc06', 'ext': 'mp4', 'title': 'Le voyage de Fanny', 'description': "Fanny, 12 ans, cachée dans un foyer loin de ses parents, s'occupe de ses deux soeurs. Devant fuir, Fanny prend la tête d'un groupe de huit enfants et s'engage dans un dangereux périple à travers la France occupée pour rejoindre la frontière suisse.", 'subtitles': { 'fr': 'count:1', }, 'duration': 5587.034, 'tags': 'count:4', }, }] _GQL_QUERY_NAME = 'productByRootProductSlug' @staticmethod def _gql_args(groups): args = 'rootProductSlug: "%s"' % groups[0] if groups[1]: args += ', seasonNumber: %s, episodeNumber: %s' % groups[1:] return args
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/channel9.py
youtube_dl/extractor/channel9.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, ExtractorError, int_or_none, parse_iso8601, qualities, unescapeHTML, ) class Channel9IE(InfoExtractor): IE_DESC = 'Channel 9' IE_NAME = 'channel9' _VALID_URL = r'https?://(?:www\.)?(?:channel9\.msdn\.com|s\.ch9\.ms)/(?P<contentpath>.+?)(?P<rss>/RSS)?/?(?:[?#&]|$)' _TESTS = [{ 'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002', 'md5': '32083d4eaf1946db6d454313f44510ca', 'info_dict': { 'id': '6c413323-383a-49dc-88f9-a22800cab024', 'ext': 'wmv', 'title': 'Developer Kick-Off Session: Stuff We Love', 'description': 'md5:b80bf9355a503c193aff7ec6cd5a7731', 'duration': 4576, 'thumbnail': r're:https?://.*\.jpg', 'timestamp': 1377717420, 'upload_date': '20130828', 'session_code': 'KOS002', 'session_room': 'Arena 1A', 'session_speakers': 'count:5', }, }, { 'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing', 'md5': 'dcf983ee6acd2088e7188c3cf79b46bc', 'info_dict': { 'id': 'fe8e435f-bb93-4e01-8e97-a28c01887024', 'ext': 'wmv', 'title': 'Self-service BI with Power BI - nuclear testing', 'description': 'md5:2d17fec927fc91e9e17783b3ecc88f54', 'duration': 1540, 'thumbnail': r're:https?://.*\.jpg', 'timestamp': 1386381991, 'upload_date': '20131207', 'authors': ['Mike Wilmot'], }, }, { # low quality mp4 is best 'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library', 'info_dict': { 'id': '33ad69d2-6a4e-4172-83a1-a523013dec76', 'ext': 'mp4', 'title': 'Ranges for the Standard Library', 'description': 'md5:9895e0a9fd80822d2f01c454b8f4a372', 'duration': 5646, 'thumbnail': r're:https?://.*\.jpg', 'upload_date': '20150930', 'timestamp': 1443640735, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://channel9.msdn.com/Events/DEVintersection/DEVintersection-2016/RSS', 'info_dict': { 'id': 'Events/DEVintersection/DEVintersection-2016', 'title': 'DEVintersection 2016 Orlando Sessions', }, 'playlist_mincount': 14, }, { 'url': 'https://channel9.msdn.com/Niners/Splendid22/Queue/76acff796e8f411184b008028e0d492b/RSS', 'only_matching': True, }, { 'url': 'https://channel9.msdn.com/Events/Speakers/scott-hanselman/RSS?UrlSafeName=scott-hanselman', 'only_matching': True, }] _RSS_URL = 'http://channel9.msdn.com/%s/RSS' @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\'](https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b', webpage) def _extract_list(self, video_id, rss_url=None): if not rss_url: rss_url = self._RSS_URL % video_id rss = self._download_xml(rss_url, video_id, 'Downloading RSS') entries = [self.url_result(session_url.text, 'Channel9') for session_url in rss.findall('./channel/item/link')] title_text = rss.find('./channel/title').text return self.playlist_result(entries, video_id, title_text) def _real_extract(self, url): content_path, rss = re.match(self._VALID_URL, url).groups() if rss: return self._extract_list(content_path, url) webpage = self._download_webpage( url, content_path, 'Downloading web page') episode_data = self._search_regex( r"data-episode='([^']+)'", webpage, 'episode data', default=None) if episode_data: episode_data = self._parse_json(unescapeHTML( episode_data), content_path) content_id = episode_data['contentId'] is_session = '/Sessions(' in episode_data['api'] content_url = 'https://channel9.msdn.com/odata' + episode_data['api'] + '?$select=Captions,CommentCount,MediaLengthInSeconds,PublishedDate,Rating,RatingCount,Title,VideoMP4High,VideoMP4Low,VideoMP4Medium,VideoPlayerPreviewImage,VideoWMV,VideoWMVHQ,Views,' if is_session: content_url += 'Code,Description,Room,Slides,Speakers,ZipFile&$expand=Speakers' else: content_url += 'Authors,Body&$expand=Authors' content_data = self._download_json(content_url, content_id) title = content_data['Title'] QUALITIES = ( 'mp3', 'wmv', 'mp4', 'wmv-low', 'mp4-low', 'wmv-mid', 'mp4-mid', 'wmv-high', 'mp4-high', ) quality_key = qualities(QUALITIES) def quality(quality_id, format_url): return (len(QUALITIES) if '_Source.' in format_url else quality_key(quality_id)) formats = [] urls = set() SITE_QUALITIES = { 'MP3': 'mp3', 'MP4': 'mp4', 'Low Quality WMV': 'wmv-low', 'Low Quality MP4': 'mp4-low', 'Mid Quality WMV': 'wmv-mid', 'Mid Quality MP4': 'mp4-mid', 'High Quality WMV': 'wmv-high', 'High Quality MP4': 'mp4-high', } formats_select = self._search_regex( r'(?s)<select[^>]+name=["\']format[^>]+>(.+?)</select', webpage, 'formats select', default=None) if formats_select: for mobj in re.finditer( r'<option\b[^>]+\bvalue=(["\'])(?P<url>(?:(?!\1).)+)\1[^>]*>\s*(?P<format>[^<]+?)\s*<', formats_select): format_url = mobj.group('url') if format_url in urls: continue urls.add(format_url) format_id = mobj.group('format') quality_id = SITE_QUALITIES.get(format_id, format_id) formats.append({ 'url': format_url, 'format_id': quality_id, 'quality': quality(quality_id, format_url), 'vcodec': 'none' if quality_id == 'mp3' else None, }) API_QUALITIES = { 'VideoMP4Low': 'mp4-low', 'VideoWMV': 'wmv-mid', 'VideoMP4Medium': 'mp4-mid', 'VideoMP4High': 'mp4-high', 'VideoWMVHQ': 'wmv-hq', } for format_id, q in API_QUALITIES.items(): q_url = content_data.get(format_id) if not q_url or q_url in urls: continue urls.add(q_url) formats.append({ 'url': q_url, 'format_id': q, 'quality': quality(q, q_url), }) self._sort_formats(formats) slides = content_data.get('Slides') zip_file = content_data.get('ZipFile') if not formats and not slides and not zip_file: raise ExtractorError( 'None of recording, slides or zip are available for %s' % content_path) subtitles = {} for caption in content_data.get('Captions', []): caption_url = caption.get('Url') if not caption_url: continue subtitles.setdefault(caption.get('Language', 'en'), []).append({ 'url': caption_url, 'ext': 'vtt', }) common = { 'id': content_id, 'title': title, 'description': clean_html(content_data.get('Description') or content_data.get('Body')), 'thumbnail': content_data.get('VideoPlayerPreviewImage'), 'duration': int_or_none(content_data.get('MediaLengthInSeconds')), 'timestamp': parse_iso8601(content_data.get('PublishedDate')), 'avg_rating': int_or_none(content_data.get('Rating')), 'rating_count': int_or_none(content_data.get('RatingCount')), 'view_count': int_or_none(content_data.get('Views')), 'comment_count': int_or_none(content_data.get('CommentCount')), 'subtitles': subtitles, } if is_session: speakers = [] for s in content_data.get('Speakers', []): speaker_name = s.get('FullName') if not speaker_name: continue speakers.append(speaker_name) common.update({ 'session_code': content_data.get('Code'), 'session_room': content_data.get('Room'), 'session_speakers': speakers, }) else: authors = [] for a in content_data.get('Authors', []): author_name = a.get('DisplayName') if not author_name: continue authors.append(author_name) common['authors'] = authors contents = [] if slides: d = common.copy() d.update({'title': title + '-Slides', 'url': slides}) contents.append(d) if zip_file: d = common.copy() d.update({'title': title + '-Zip', 'url': zip_file}) contents.append(d) if formats: d = common.copy() d.update({'title': title, 'formats': formats}) contents.append(d) return self.playlist_result(contents) else: return self._extract_list(content_path)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/telemb.py
youtube_dl/extractor/telemb.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import remove_start class TeleMBIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html' _TESTS = [ { 'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html', 'md5': 'f45ea69878516ba039835794e0f8f783', 'info_dict': { 'id': '13466', 'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-', 'ext': 'mp4', 'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages', 'description': 'md5:bc5225f47b17c309761c856ad4776265', 'thumbnail': r're:^http://.*\.(?:jpg|png)$', } }, { # non-ASCII characters in download URL 'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html', 'md5': '6e9682736e5ccd4eab7f21e855350733', 'info_dict': { 'id': '13514', 'display_id': 'les-reportages-havre-incendie-mortel', 'ext': 'mp4', 'title': 'Havré - Incendie mortel - Les reportages', 'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a', 'thumbnail': r're:^http://.*\.(?:jpg|png)$', } }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) formats = [] for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage): fmt = { 'url': video_url, 'format_id': video_url.split(':')[0] } rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url) if rtmp: fmt.update({ 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf', 'page_url': 'http://www.telemb.be', 'preference': -1, }) formats.append(fmt) self._sort_formats(formats) title = remove_start(self._og_search_title(webpage), 'TéléMB : ') description = self._html_search_regex( r'<meta property="og:description" content="(.+?)" />', webpage, 'description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/beeg.py
youtube_dl/extractor/beeg.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( int_or_none, unified_timestamp, ) class BeegIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P<id>\d+)' _TESTS = [{ # api/v6 v1 'url': 'http://beeg.com/5416503', 'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820', 'info_dict': { 'id': '5416503', 'ext': 'mp4', 'title': 'Sultry Striptease', 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2', 'timestamp': 1391813355, 'upload_date': '20140207', 'duration': 383, 'tags': list, 'age_limit': 18, } }, { # api/v6 v2 'url': 'https://beeg.com/1941093077?t=911-1391', 'only_matching': True, }, { # api/v6 v2 w/o t 'url': 'https://beeg.com/1277207756', 'only_matching': True, }, { 'url': 'https://beeg.porn/video/5416503', 'only_matching': True, }, { 'url': 'https://beeg.porn/5416503', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) beeg_version = self._search_regex( r'beeg_version\s*=\s*([\da-zA-Z_-]+)', webpage, 'beeg version', default='1546225636701') if len(video_id) >= 10: query = { 'v': 2, } qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) t = qs.get('t', [''])[0].split('-') if len(t) > 1: query.update({ 's': t[0], 'e': t[1], }) else: query = {'v': 1} for api_path in ('', 'api.'): video = self._download_json( 'https://%sbeeg.com/api/v6/%s/video/%s' % (api_path, beeg_version, video_id), video_id, fatal=api_path == 'api.', query=query) if video: break formats = [] for format_id, video_url in video.items(): if not video_url: continue height = self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None) if not height: continue formats.append({ 'url': self._proto_relative_url( video_url.replace('{DATA_MARKERS}', 'data=pc_XX__%s_0' % beeg_version), 'https:'), 'format_id': format_id, 'height': int(height), }) self._sort_formats(formats) title = video['title'] video_id = compat_str(video.get('id') or video_id) display_id = video.get('code') description = video.get('desc') series = video.get('ps_name') timestamp = unified_timestamp(video.get('date')) duration = int_or_none(video.get('duration')) tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'series': series, 'timestamp': timestamp, 'duration': duration, 'tags': tags, 'formats': formats, 'age_limit': self._rta_search(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vidio.py
youtube_dl/extractor/vidio.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, str_or_none, strip_or_none, try_get, ) class VidioIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?vidio\.com/watch/(?P<id>\d+)-(?P<display_id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015', 'md5': 'cd2801394afc164e9775db6a140b91fe', 'info_dict': { 'id': '165683', 'display_id': 'dj_ambred-booyah-live-2015', 'ext': 'mp4', 'title': 'DJ_AMBRED - Booyah (Live 2015)', 'description': 'md5:27dc15f819b6a78a626490881adbadf8', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 149, 'like_count': int, 'uploader': 'TWELVE Pic', 'timestamp': 1444902800, 'upload_date': '20151015', 'uploader_id': 'twelvepictures', 'channel': 'Cover Music Video', 'channel_id': '280236', 'view_count': int, 'dislike_count': int, 'comment_count': int, 'tags': 'count:4', }, }, { 'url': 'https://www.vidio.com/watch/77949-south-korea-test-fires-missile-that-can-strike-all-of-the-north', 'only_matching': True, }] def _real_initialize(self): self._api_key = self._download_json( 'https://www.vidio.com/auth', None, data=b'')['api_key'] def _real_extract(self, url): video_id, display_id = re.match(self._VALID_URL, url).groups() data = self._download_json( 'https://api.vidio.com/videos/' + video_id, display_id, headers={ 'Content-Type': 'application/vnd.api+json', 'X-API-KEY': self._api_key, }) video = data['videos'][0] title = video['title'].strip() formats = self._extract_m3u8_formats( data['clips'][0]['hls_url'], display_id, 'mp4', 'm3u8_native') self._sort_formats(formats) get_first = lambda x: try_get(data, lambda y: y[x + 's'][0], dict) or {} channel = get_first('channel') user = get_first('user') username = user.get('username') get_count = lambda x: int_or_none(video.get('total_' + x)) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': strip_or_none(video.get('description')), 'thumbnail': video.get('image_url_medium'), 'duration': int_or_none(video.get('duration')), 'like_count': get_count('likes'), 'formats': formats, 'uploader': user.get('name'), 'timestamp': parse_iso8601(video.get('created_at')), 'uploader_id': username, 'uploader_url': 'https://www.vidio.com/@' + username if username else None, 'channel': channel.get('name'), 'channel_id': str_or_none(channel.get('id')), 'view_count': get_count('view_count'), 'dislike_count': get_count('dislikes'), 'comment_count': get_count('comments'), 'tags': video.get('tag_list'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hornbunny.py
youtube_dl/extractor/hornbunny.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_duration, ) class HornBunnyIE(InfoExtractor): _VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html' _TEST = { 'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html', 'md5': 'e20fd862d1894b67564c96f180f43924', 'info_dict': { 'id': '5227', 'ext': 'mp4', 'title': 'panty slut jerk off instruction', 'duration': 550, 'age_limit': 18, 'view_count': int, 'thumbnail': r're:^https?://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0] duration = parse_duration(self._search_regex( r'<strong>Runtime:</strong>\s*([0-9:]+)</div>', webpage, 'duration', fatal=False)) view_count = int_or_none(self._search_regex( r'<strong>Views:</strong>\s*(\d+)</div>', webpage, 'view count', fatal=False)) info_dict.update({ 'id': video_id, 'title': title, 'duration': duration, 'view_count': view_count, 'age_limit': 18, }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/helsinki.py
youtube_dl/extractor/helsinki.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import js_to_json class HelsinkiIE(InfoExtractor): IE_DESC = 'helsinki.fi' _VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)' _TEST = { 'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258', 'info_dict': { 'id': '20258', 'ext': 'mp4', 'title': 'Tietotekniikkafoorumi-iltapäivä', 'description': 'md5:f5c904224d43c133225130fe156a5ee0', }, 'params': { 'skip_download': True, # RTMP } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) params = self._parse_json(self._html_search_regex( r'(?s)jwplayer\("player"\).setup\((\{.*?\})\);', webpage, 'player code'), video_id, transform_source=js_to_json) formats = [{ 'url': s['file'], 'ext': 'mp4', } for s in params['sources']] self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage).replace('Video: ', ''), 'description': self._og_search_description(webpage), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/reddit.py
youtube_dl/extractor/reddit.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, float_or_none, try_get, unescapeHTML, url_or_none, ) class RedditIE(InfoExtractor): _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)' _TEST = { # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/ 'url': 'https://v.redd.it/zv89llsvexdz', 'md5': '0a070c53eba7ec4534d95a5a1259e253', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'zv89llsvexdz', }, 'params': { 'format': 'bestvideo', }, } def _real_extract(self, url): video_id = self._match_id(url) formats = self._extract_m3u8_formats( 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) return { 'id': video_id, 'title': video_id, 'formats': formats, } class RedditRIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 'info_dict': { 'id': 'zv89llsvexdz', 'ext': 'mp4', 'title': 'That small heart attack.', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'thumbnails': 'count:4', 'timestamp': 1501941939, 'upload_date': '20170805', 'uploader': 'Antw87', 'duration': 12, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'age_limit': 0, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 'only_matching': True, }, { # imgur 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # imgur @ old reddit 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 'only_matching': True, }, { # streamable 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', 'only_matching': True, }, { # youtube 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 'only_matching': True, }, { # reddit video @ nm reddit 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) url, video_id = mobj.group('url', 'id') video_id = self._match_id(url) data = self._download_json( url + '/.json', video_id)[0]['data']['children'][0]['data'] video_url = data['url'] # Avoid recursing into the same reddit URL if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: raise ExtractorError('No media found', expected=True) over_18 = data.get('over_18') if over_18 is True: age_limit = 18 elif over_18 is False: age_limit = 0 else: age_limit = None thumbnails = [] def add_thumbnail(src): if not isinstance(src, dict): return thumbnail_url = url_or_none(src.get('url')) if not thumbnail_url: return thumbnails.append({ 'url': unescapeHTML(thumbnail_url), 'width': int_or_none(src.get('width')), 'height': int_or_none(src.get('height')), }) for image in try_get(data, lambda x: x['preview']['images']) or []: if not isinstance(image, dict): continue add_thumbnail(image.get('source')) resolutions = image.get('resolutions') if isinstance(resolutions, list): for resolution in resolutions: add_thumbnail(resolution) return { '_type': 'url_transparent', 'url': video_url, 'title': data.get('title'), 'thumbnails': thumbnails, 'timestamp': float_or_none(data.get('created_utc')), 'uploader': data.get('author'), 'duration': int_or_none(try_get( data, (lambda x: x['media']['reddit_video']['duration'], lambda x: x['secure_media']['reddit_video']['duration']))), 'like_count': int_or_none(data.get('ups')), 'dislike_count': int_or_none(data.get('downs')), 'comment_count': int_or_none(data.get('num_comments')), 'age_limit': age_limit, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/europa.py
youtube_dl/extractor/europa.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, orderedSet, parse_duration, qualities, unified_strdate, xpath_text ) class EuropaIE(InfoExtractor): _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758', 'md5': '574f080699ddd1e19a675b0ddf010371', 'info_dict': { 'id': 'I107758', 'ext': 'mp4', 'title': 'TRADE - Wikileaks on TTIP', 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150811', 'duration': 34, 'view_count': int, 'formats': 'mincount:3', } }, { 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786', 'only_matching': True, }, { 'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) playlist = self._download_xml( 'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id) def get_item(type_, preference): items = {} for item in playlist.findall('./info/%s/item' % type_): lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None) if lang and label: items[lang] = label.strip() for p in preference: if items.get(p): return items[p] query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) preferred_lang = query.get('sitelang', ('en', ))[0] preferred_langs = orderedSet((preferred_lang, 'en', 'int')) title = get_item('title', preferred_langs) or video_id description = get_item('description', preferred_langs) thumbnail = xpath_text(playlist, './info/thumburl', 'thumbnail') upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date')) duration = parse_duration(xpath_text(playlist, './info/duration', 'duration')) view_count = int_or_none(xpath_text(playlist, './info/views', 'views')) language_preference = qualities(preferred_langs[::-1]) formats = [] for file_ in playlist.findall('./files/file'): video_url = xpath_text(file_, './url') if not video_url: continue lang = xpath_text(file_, './lg') formats.append({ 'url': video_url, 'format_id': lang, 'format_note': xpath_text(file_, './lglabel'), 'language_preference': language_preference(lang) }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/appleconnect.py
youtube_dl/extractor/appleconnect.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( str_to_int, ExtractorError ) class AppleConnectIE(InfoExtractor): _VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/(?:id)?sa\.(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'md5': 'c1d41f72c8bcaf222e089434619316e4', 'info_dict': { 'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3', 'ext': 'm4v', 'title': 'Energy', 'uploader': 'Drake', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150710', 'timestamp': 1436545535, }, }, { 'url': 'https://itunes.apple.com/us/post/sa.0fe0229f-2457-11e5-9f40-1bb645f2d5d9', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) try: video_json = self._html_search_regex( r'class="auc-video-data">(\{.*?\})', webpage, 'json') except ExtractorError: raise ExtractorError('This post doesn\'t contain a video', expected=True) video_data = self._parse_json(video_json, video_id) timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp')) like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count', default=None)) return { 'id': video_id, 'url': video_data['sslSrc'], 'title': video_data['title'], 'description': video_data['description'], 'uploader': video_data['artistName'], 'thumbnail': video_data['artworkUrl'], 'timestamp': timestamp, 'like_count': like_count, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hitrecord.py
youtube_dl/extractor/hitrecord.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( clean_html, float_or_none, int_or_none, try_get, ) class HitRecordIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hitrecord\.org/records/(?P<id>\d+)' _TEST = { 'url': 'https://hitrecord.org/records/2954362', 'md5': 'fe1cdc2023bce0bbb95c39c57426aa71', 'info_dict': { 'id': '2954362', 'ext': 'mp4', 'title': 'A Very Different World (HITRECORD x ACLU)', 'description': 'md5:e62defaffab5075a5277736bead95a3d', 'duration': 139.327, 'timestamp': 1471557582, 'upload_date': '20160818', 'uploader': 'Zuzi.C12', 'uploader_id': '362811', 'view_count': int, 'like_count': int, 'comment_count': int, 'tags': list, } } def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://hitrecord.org/api/web/records/%s' % video_id, video_id) title = video['title'] video_url = video['source_url']['mp4_url'] tags = None tags_list = try_get(video, lambda x: x['tags'], list) if tags_list: tags = [ t['text'] for t in tags_list if isinstance(t, dict) and t.get('text') and isinstance(t['text'], compat_str)] return { 'id': video_id, 'url': video_url, 'title': title, 'description': clean_html(video.get('body')), 'duration': float_or_none(video.get('duration'), 1000), 'timestamp': int_or_none(video.get('created_at_i')), 'uploader': try_get( video, lambda x: x['user']['username'], compat_str), 'uploader_id': try_get( video, lambda x: compat_str(x['user']['id'])), 'view_count': int_or_none(video.get('total_views_count')), 'like_count': int_or_none(video.get('hearts_count')), 'comment_count': int_or_none(video.get('comments_count')), 'tags': tags, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/lrt.py
youtube_dl/extractor/lrt.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, merge_dicts, ) class LRTIE(InfoExtractor): IE_NAME = 'lrt.lt' _VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))' _TESTS = [{ # m3u8 download 'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene', 'md5': '85cb2bb530f31d91a9c65b479516ade4', 'info_dict': { 'id': '2000127261', 'ext': 'mp4', 'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė', 'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa', 'duration': 3035, 'timestamp': 1604079000, 'upload_date': '20201030', }, }, { # direct mp3 download 'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/', 'md5': '389da8ca3cad0f51d12bed0c844f6a0a', 'info_dict': { 'id': '1013074524', 'ext': 'mp3', 'title': 'Kita tema 2016-09-05 15:05', 'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5', 'duration': 3008, 'view_count': int, 'like_count': int, }, }] def _extract_js_var(self, webpage, var_name, default): return self._search_regex( r'%s\s*=\s*(["\'])((?:(?!\1).)+)\1' % var_name, webpage, var_name.replace('_', ' '), default, group=2) def _real_extract(self, url): path, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id) media_url = self._extract_js_var(webpage, 'main_url', path) media = self._download_json(self._extract_js_var( webpage, 'media_info_url', 'https://www.lrt.lt/servisai/stream_url/vod/media_info/'), video_id, query={'url': media_url}) jw_data = self._parse_jwplayer_data( media['playlist_item'], video_id, base_url=url) json_ld_data = self._search_json_ld(webpage, video_id) tags = [] for tag in (media.get('tags') or []): tag_name = tag.get('name') if not tag_name: continue tags.append(tag_name) clean_info = { 'description': clean_html(media.get('content')), 'tags': tags, } return merge_dicts(clean_info, jw_data, json_ld_data)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/adobetv.py
youtube_dl/extractor/adobetv.py
from __future__ import unicode_literals import functools import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( float_or_none, int_or_none, ISO639Utils, OnDemandPagedList, parse_duration, str_or_none, str_to_int, unified_strdate, ) class AdobeTVBaseIE(InfoExtractor): def _call_api(self, path, video_id, query, note=None): return self._download_json( 'http://tv.adobe.com/api/v4/' + path, video_id, note, query=query)['data'] def _parse_subtitles(self, video_data, url_key): subtitles = {} for translation in video_data.get('translations', []): vtt_path = translation.get(url_key) if not vtt_path: continue lang = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium']) subtitles.setdefault(lang, []).append({ 'ext': 'vtt', 'url': vtt_path, }) return subtitles def _parse_video_data(self, video_data): video_id = compat_str(video_data['id']) title = video_data['title'] s3_extracted = False formats = [] for source in video_data.get('videos', []): source_url = source.get('url') if not source_url: continue f = { 'format_id': source.get('quality_level'), 'fps': int_or_none(source.get('frame_rate')), 'height': int_or_none(source.get('height')), 'tbr': int_or_none(source.get('video_data_rate')), 'width': int_or_none(source.get('width')), 'url': source_url, } original_filename = source.get('original_filename') if original_filename: if not (f.get('height') and f.get('width')): mobj = re.search(r'_(\d+)x(\d+)', original_filename) if mobj: f.update({ 'height': int(mobj.group(2)), 'width': int(mobj.group(1)), }) if original_filename.startswith('s3://') and not s3_extracted: formats.append({ 'format_id': 'original', 'preference': 1, 'url': original_filename.replace('s3://', 'https://s3.amazonaws.com/'), }) s3_extracted = True formats.append(f) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnail'), 'upload_date': unified_strdate(video_data.get('start_date')), 'duration': parse_duration(video_data.get('duration')), 'view_count': str_to_int(video_data.get('playcount')), 'formats': formats, 'subtitles': self._parse_subtitles(video_data, 'vtt'), } class AdobeTVEmbedIE(AdobeTVBaseIE): IE_NAME = 'adobetv:embed' _VALID_URL = r'https?://tv\.adobe\.com/embed/\d+/(?P<id>\d+)' _TEST = { 'url': 'https://tv.adobe.com/embed/22/4153', 'md5': 'c8c0461bf04d54574fc2b4d07ac6783a', 'info_dict': { 'id': '4153', 'ext': 'flv', 'title': 'Creating Graphics Optimized for BlackBerry', 'description': 'md5:eac6e8dced38bdaae51cd94447927459', 'thumbnail': r're:https?://.*\.jpg$', 'upload_date': '20091109', 'duration': 377, 'view_count': int, }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._call_api( 'episode/' + video_id, video_id, {'disclosure': 'standard'})[0] return self._parse_video_data(video_data) class AdobeTVIE(AdobeTVBaseIE): IE_NAME = 'adobetv' _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/', 'md5': '9bc5727bcdd55251f35ad311ca74fa1e', 'info_dict': { 'id': '10981', 'ext': 'mp4', 'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop', 'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311', 'thumbnail': r're:https?://.*\.jpg$', 'upload_date': '20110914', 'duration': 60, 'view_count': int, }, } def _real_extract(self, url): language, show_urlname, urlname = re.match(self._VALID_URL, url).groups() if not language: language = 'en' video_data = self._call_api( 'episode/get', urlname, { 'disclosure': 'standard', 'language': language, 'show_urlname': show_urlname, 'urlname': urlname, })[0] return self._parse_video_data(video_data) class AdobeTVPlaylistBaseIE(AdobeTVBaseIE): _PAGE_SIZE = 25 def _fetch_page(self, display_id, query, page): page += 1 query['page'] = page for element_data in self._call_api( self._RESOURCE, display_id, query, 'Download Page %d' % page): yield self._process_data(element_data) def _extract_playlist_entries(self, display_id, query): return OnDemandPagedList(functools.partial( self._fetch_page, display_id, query), self._PAGE_SIZE) class AdobeTVShowIE(AdobeTVPlaylistBaseIE): IE_NAME = 'adobetv:show' _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)' _TEST = { 'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost', 'info_dict': { 'id': '36', 'title': 'The Complete Picture with Julieanne Kost', 'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27', }, 'playlist_mincount': 136, } _RESOURCE = 'episode' _process_data = AdobeTVBaseIE._parse_video_data def _real_extract(self, url): language, show_urlname = re.match(self._VALID_URL, url).groups() if not language: language = 'en' query = { 'disclosure': 'standard', 'language': language, 'show_urlname': show_urlname, } show_data = self._call_api( 'show/get', show_urlname, query)[0] return self.playlist_result( self._extract_playlist_entries(show_urlname, query), str_or_none(show_data.get('id')), show_data.get('show_name'), show_data.get('show_description')) class AdobeTVChannelIE(AdobeTVPlaylistBaseIE): IE_NAME = 'adobetv:channel' _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?' _TEST = { 'url': 'http://tv.adobe.com/channel/development', 'info_dict': { 'id': 'development', }, 'playlist_mincount': 96, } _RESOURCE = 'show' def _process_data(self, show_data): return self.url_result( show_data['url'], 'AdobeTVShow', str_or_none(show_data.get('id'))) def _real_extract(self, url): language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups() if not language: language = 'en' query = { 'channel_urlname': channel_urlname, 'language': language, } if category_urlname: query['category_urlname'] = category_urlname return self.playlist_result( self._extract_playlist_entries(channel_urlname, query), channel_urlname) class AdobeTVVideoIE(AdobeTVBaseIE): IE_NAME = 'adobetv:video' _VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)' _TEST = { # From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners 'url': 'https://video.tv.adobe.com/v/2456/', 'md5': '43662b577c018ad707a63766462b1e87', 'info_dict': { 'id': '2456', 'ext': 'mp4', 'title': 'New experience with Acrobat DC', 'description': 'New experience with Acrobat DC', 'duration': 248.667, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) video_data = self._parse_json(self._search_regex( r'var\s+bridge\s*=\s*([^;]+);', webpage, 'bridged data'), video_id) title = video_data['title'] formats = [] sources = video_data.get('sources') or [] for source in sources: source_src = source.get('src') if not source_src: continue formats.append({ 'filesize': int_or_none(source.get('kilobytes') or None, invscale=1000), 'format_id': '-'.join(filter(None, [source.get('format'), source.get('label')])), 'height': int_or_none(source.get('height') or None), 'tbr': int_or_none(source.get('bitrate') or None), 'width': int_or_none(source.get('width') or None), 'url': source_src, }) self._sort_formats(formats) # For both metadata and downloaded files the duration varies among # formats. I just pick the max one duration = max(filter(None, [ float_or_none(source.get('duration'), scale=1000) for source in sources])) return { 'id': video_id, 'formats': formats, 'title': title, 'description': video_data.get('description'), 'thumbnail': video_data.get('video', {}).get('poster'), 'duration': duration, 'subtitles': self._parse_subtitles(video_data, 'vttPath'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tube8.py
youtube_dl/extractor/tube8.py
from __future__ import unicode_literals import re from ..utils import ( int_or_none, str_to_int, ) from .keezmovies import KeezMoviesIE class Tube8IE(KeezMoviesIE): _VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.tube8.com/teen/kasia-music-video/229795/', 'md5': '65e20c48e6abff62ed0c3965fff13a39', 'info_dict': { 'id': '229795', 'display_id': 'kasia-music-video', 'ext': 'mp4', 'description': 'hot teen Kasia grinding', 'uploader': 'unknown', 'title': 'Kasia music video', 'age_limit': 18, 'duration': 230, 'categories': ['Teen'], 'tags': ['dancing'], }, }, { 'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?tube8\.com/embed/(?:[^/]+/)+\d+)', webpage) def _real_extract(self, url): webpage, info = self._extract_info(url) if not info['title']: info['title'] = self._html_search_regex( r'videoTitle\s*=\s*"([^"]+)', webpage, 'title') description = self._html_search_regex( r'(?s)Description:</dt>\s*<dd>(.+?)</dd>', webpage, 'description', fatal=False) uploader = self._html_search_regex( r'<span class="username">\s*(.+?)\s*<', webpage, 'uploader', fatal=False) like_count = int_or_none(self._search_regex( r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False)) dislike_count = int_or_none(self._search_regex( r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False)) view_count = str_to_int(self._search_regex( r'Views:\s*</dt>\s*<dd>([\d,\.]+)', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._search_regex( r'<span id="allCommentsCount">(\d+)</span>', webpage, 'comment count', fatal=False)) category = self._search_regex( r'Category:\s*</dt>\s*<dd>\s*<a[^>]+href=[^>]+>([^<]+)', webpage, 'category', fatal=False) categories = [category] if category else None tags_str = self._search_regex( r'(?s)Tags:\s*</dt>\s*<dd>(.+?)</(?!a)', webpage, 'tags', fatal=False) tags = [t for t in re.findall( r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None info.update({ 'description': description, 'uploader': uploader, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'categories': categories, 'tags': tags, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vidme.py
youtube_dl/extractor/vidme.py
from __future__ import unicode_literals import itertools from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, float_or_none, parse_iso8601, url_or_none, ) class VidmeIE(InfoExtractor): IE_NAME = 'vidme' _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{,5})(?:[^\da-zA-Z]|$)' _TESTS = [{ 'url': 'https://vid.me/QNB', 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82', 'info_dict': { 'id': 'QNB', 'ext': 'mp4', 'title': 'Fishing for piranha - the easy way', 'description': 'source: https://www.facebook.com/photo.php?v=312276045600871', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1406313244, 'upload_date': '20140725', 'age_limit': 0, 'duration': 119.92, 'view_count': int, 'like_count': int, 'comment_count': int, }, }, { 'url': 'https://vid.me/Gc6M', 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82', 'info_dict': { 'id': 'Gc6M', 'ext': 'mp4', 'title': 'O Mere Dil ke chain - Arnav and Khushi VM', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1441211642, 'upload_date': '20150902', 'uploader': 'SunshineM', 'uploader_id': '3552827', 'age_limit': 0, 'duration': 223.72, 'view_count': int, 'like_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }, { # tests uploader field 'url': 'https://vid.me/4Iib', 'info_dict': { 'id': '4Iib', 'ext': 'mp4', 'title': 'The Carver', 'description': 'md5:e9c24870018ae8113be936645b93ba3c', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1433203629, 'upload_date': '20150602', 'uploader': 'Thomas', 'uploader_id': '109747', 'age_limit': 0, 'duration': 97.859999999999999, 'view_count': int, 'like_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }, { # nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching 'url': 'https://vid.me/e/Wmur', 'info_dict': { 'id': 'Wmur', 'ext': 'mp4', 'title': 'naked smoking & stretching', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1430931613, 'upload_date': '20150506', 'uploader': 'naked-yogi', 'uploader_id': '1638622', 'age_limit': 18, 'duration': 653.26999999999998, 'view_count': int, 'like_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }, { # nsfw, user-disabled 'url': 'https://vid.me/dzGJ', 'only_matching': True, }, { # suspended 'url': 'https://vid.me/Ox3G', 'only_matching': True, }, { # deleted 'url': 'https://vid.me/KTPm', 'only_matching': True, }, { # no formats in the API response 'url': 'https://vid.me/e5g', 'info_dict': { 'id': 'e5g', 'ext': 'mp4', 'title': 'Video upload (e5g)', 'thumbnail': r're:^https?://.*\.jpg', 'timestamp': 1401480195, 'upload_date': '20140530', 'uploader': None, 'uploader_id': None, 'age_limit': 0, 'duration': 483, 'view_count': int, 'like_count': int, 'comment_count': int, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) try: response = self._download_json( 'https://api.vid.me/videoByUrl/%s' % video_id, video_id) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: response = self._parse_json(e.cause.read(), video_id) else: raise error = response.get('error') if error: raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) video = response['video'] if video.get('state') == 'deleted': raise ExtractorError( 'Vidme said: Sorry, this video has been deleted.', expected=True) if video.get('state') in ('user-disabled', 'suspended'): raise ExtractorError( 'Vidme said: This video has been suspended either due to a copyright claim, ' 'or for violating the terms of use.', expected=True) formats = [] for f in video.get('formats', []): format_url = url_or_none(f.get('uri')) if not format_url: continue format_type = f.get('type') if format_type == 'dash': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False)) elif format_type == 'hls': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'format_id': f.get('type'), 'url': format_url, 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), 'preference': 0 if f.get('type', '').endswith( 'clip') else 1, }) if not formats and video.get('complete_url'): formats.append({ 'url': video.get('complete_url'), 'width': int_or_none(video.get('width')), 'height': int_or_none(video.get('height')), }) self._sort_formats(formats) title = video['title'] description = video.get('description') thumbnail = video.get('thumbnail_url') timestamp = parse_iso8601(video.get('date_created'), ' ') uploader = video.get('user', {}).get('username') uploader_id = video.get('user', {}).get('user_id') age_limit = 18 if video.get('nsfw') is True else 0 duration = float_or_none(video.get('duration')) view_count = int_or_none(video.get('view_count')) like_count = int_or_none(video.get('likes_count')) comment_count = int_or_none(video.get('comment_count')) return { 'id': video_id, 'title': title or 'Video upload (%s)' % video_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'age_limit': age_limit, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'like_count': like_count, 'comment_count': comment_count, 'formats': formats, } class VidmeListBaseIE(InfoExtractor): # Max possible limit according to https://docs.vid.me/#api-Videos-List _LIMIT = 100 def _entries(self, user_id, user_name): for page_num in itertools.count(1): page = self._download_json( 'https://api.vid.me/videos/%s?user=%s&limit=%d&offset=%d' % (self._API_ITEM, user_id, self._LIMIT, (page_num - 1) * self._LIMIT), user_name, 'Downloading user %s page %d' % (self._API_ITEM, page_num)) videos = page.get('videos', []) if not videos: break for video in videos: video_url = video.get('full_url') or video.get('embed_url') if video_url: yield self.url_result(video_url, VidmeIE.ie_key()) total = int_or_none(page.get('page', {}).get('total')) if total and self._LIMIT * page_num >= total: break def _real_extract(self, url): user_name = self._match_id(url) user_id = self._download_json( 'https://api.vid.me/userByUsername?username=%s' % user_name, user_name)['user']['user_id'] return self.playlist_result( self._entries(user_id, user_name), user_id, '%s - %s' % (user_name, self._TITLE)) class VidmeUserIE(VidmeListBaseIE): IE_NAME = 'vidme:user' _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z_-]{6,})(?!/likes)(?:[^\da-zA-Z_-]|$)' _API_ITEM = 'list' _TITLE = 'Videos' _TESTS = [{ 'url': 'https://vid.me/MasakoX', 'info_dict': { 'id': '16112341', 'title': 'MasakoX - %s' % _TITLE, }, 'playlist_mincount': 191, }, { 'url': 'https://vid.me/unsQuare_netWork', 'only_matching': True, }] class VidmeUserLikesIE(VidmeListBaseIE): IE_NAME = 'vidme:user:likes' _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z_-]{6,})/likes' _API_ITEM = 'likes' _TITLE = 'Likes' _TESTS = [{ 'url': 'https://vid.me/ErinAlexis/likes', 'info_dict': { 'id': '6483530', 'title': 'ErinAlexis - %s' % _TITLE, }, 'playlist_mincount': 415, }, { 'url': 'https://vid.me/Kaleidoscope-Ish/likes', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/commonprotocols.py
youtube_dl/extractor/commonprotocols.py
from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urlparse, ) class RtmpIE(InfoExtractor): IE_DESC = False # Do not list _VALID_URL = r'(?i)rtmp[est]?://.+' _TESTS = [{ 'url': 'rtmp://cp44293.edgefcs.net/ondemand?auth=daEcTdydfdqcsb8cZcDbAaCbhamacbbawaS-bw7dBb-bWG-GqpGFqCpNCnGoyL&aifp=v001&slist=public/unsecure/audio/2c97899446428e4301471a8cb72b4b97--audio--pmg-20110908-0900a_flv_aac_med_int.mp4', 'only_matching': True, }, { 'url': 'rtmp://edge.live.hitbox.tv/live/dimak', 'only_matching': True, }] def _real_extract(self, url): video_id = self._generic_id(url) title = self._generic_title(url) return { 'id': video_id, 'title': title, 'formats': [{ 'url': url, 'ext': 'flv', 'format_id': compat_urlparse.urlparse(url).scheme, }], } class MmsIE(InfoExtractor): IE_DESC = False # Do not list _VALID_URL = r'(?i)mms://.+' _TEST = { # Direct MMS link 'url': 'mms://kentro.kaist.ac.kr/200907/MilesReid(0709).wmv', 'info_dict': { 'id': 'MilesReid(0709)', 'ext': 'wmv', 'title': 'MilesReid(0709)', }, 'params': { 'skip_download': True, # rtsp downloads, requiring mplayer or mpv }, } def _real_extract(self, url): video_id = self._generic_id(url) title = self._generic_title(url) return { 'id': video_id, 'title': title, 'url': url, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/vice.py
youtube_dl/extractor/vice.py
# coding: utf-8 from __future__ import unicode_literals import functools import hashlib import json import random import re import time from .adobepass import AdobePassIE from .common import InfoExtractor from .youtube import YoutubeIE from ..compat import ( compat_HTTPError, compat_str, ) from ..utils import ( clean_html, ExtractorError, int_or_none, OnDemandPagedList, parse_age_limit, str_or_none, try_get, ) class ViceBaseIE(InfoExtractor): def _call_api(self, resource, resource_key, resource_id, locale, fields, args=''): return self._download_json( 'https://video.vice.com/api/v1/graphql', resource_id, query={ 'query': '''{ %s(locale: "%s", %s: "%s"%s) { %s } }''' % (resource, locale, resource_key, resource_id, args, fields), })['data'][resource] class ViceIE(ViceBaseIE, AdobePassIE): IE_NAME = 'vice' _VALID_URL = r'https?://(?:(?:video|vms)\.vice|(?:www\.)?vice(?:land|tv))\.com/(?P<locale>[^/]+)/(?:video/[^/]+|embed)/(?P<id>[\da-f]{24})' _TESTS = [{ 'url': 'https://video.vice.com/en_us/video/pet-cremator/58c69e38a55424f1227dc3f7', 'info_dict': { 'id': '58c69e38a55424f1227dc3f7', 'ext': 'mp4', 'title': '10 Questions You Always Wanted To Ask: Pet Cremator', 'description': 'md5:fe856caacf61fe0e74fab15ce2b07ca5', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1489664942, 'upload_date': '20170316', 'age_limit': 14, }, 'params': { # m3u8 download 'skip_download': True, }, }, { # geo restricted to US 'url': 'https://video.vice.com/en_us/video/the-signal-from-tolva/5816510690b70e6c5fd39a56', 'info_dict': { 'id': '5816510690b70e6c5fd39a56', 'ext': 'mp4', 'uploader': 'vice', 'title': 'The Signal From Tölva', 'description': 'md5:3927e3c79f9e8094606a2b3c5b5e55d5', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1477941983, 'upload_date': '20161031', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'https://video.vice.com/alps/video/ulfs-wien-beruchtigste-grafitti-crew-part-1/581b12b60a0e1f4c0fb6ea2f', 'info_dict': { 'id': '581b12b60a0e1f4c0fb6ea2f', 'ext': 'mp4', 'title': 'ULFs - Wien berüchtigste Grafitti Crew - Part 1', 'description': 'Zwischen Hinterzimmer-Tattoos und U-Bahnschächten erzählen uns die Ulfs, wie es ist, "süchtig nach Sachbeschädigung" zu sein.', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1485368119, 'upload_date': '20170125', 'age_limit': 14, }, 'params': { # AES-encrypted m3u8 'skip_download': True, }, }, { 'url': 'https://video.vice.com/en_us/video/pizza-show-trailer/56d8c9a54d286ed92f7f30e4', 'only_matching': True, }, { 'url': 'https://video.vice.com/en_us/embed/57f41d3556a0a80f54726060', 'only_matching': True, }, { 'url': 'https://vms.vice.com/en_us/video/preplay/58c69e38a55424f1227dc3f7', 'only_matching': True, }, { 'url': 'https://www.viceland.com/en_us/video/thursday-march-1-2018/5a8f2d7ff1cdb332dd446ec1', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe\b[^>]+\bsrc=["\']((?:https?:)?//video\.vice\.com/[^/]+/embed/[\da-f]{24})', webpage) @staticmethod def _extract_url(webpage): urls = ViceIE._extract_urls(webpage) return urls[0] if urls else None def _real_extract(self, url): locale, video_id = re.match(self._VALID_URL, url).groups() video = self._call_api('videos', 'id', video_id, locale, '''body locked rating thumbnail_url title''')[0] title = video['title'].strip() rating = video.get('rating') query = {} if video.get('locked'): resource = self._get_mvpd_resource( 'VICELAND', title, video_id, rating) query['tvetoken'] = self._extract_mvpd_auth( url, video_id, 'VICELAND', resource) # signature generation algorithm is reverse engineered from signatureGenerator in # webpack:///../shared/~/vice-player/dist/js/vice-player.js in # https://www.viceland.com/assets/common/js/web.vendor.bundle.js # new JS is located here https://vice-web-statics-cdn.vice.com/vice-player/player-embed.js exp = int(time.time()) + 1440 query.update({ 'exp': exp, 'sign': hashlib.sha512(('%s:GET:%d' % (video_id, exp)).encode()).hexdigest(), 'skipadstitching': 1, 'platform': 'desktop', 'rn': random.randint(10000, 100000), }) try: preplay = self._download_json( 'https://vms.vice.com/%s/video/preplay/%s' % (locale, video_id), video_id, query=query) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401): error = json.loads(e.cause.read().decode()) error_message = error.get('error_description') or error['details'] raise ExtractorError('%s said: %s' % ( self.IE_NAME, error_message), expected=True) raise video_data = preplay['video'] formats = self._extract_m3u8_formats( preplay['playURL'], video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) episode = video_data.get('episode') or {} channel = video_data.get('channel') or {} season = video_data.get('season') or {} subtitles = {} for subtitle in preplay.get('subtitleURLs', []): cc_url = subtitle.get('url') if not cc_url: continue language_code = try_get(subtitle, lambda x: x['languages'][0]['language_code'], compat_str) or 'en' subtitles.setdefault(language_code, []).append({ 'url': cc_url, }) return { 'formats': formats, 'id': video_id, 'title': title, 'description': clean_html(video.get('body')), 'thumbnail': video.get('thumbnail_url'), 'duration': int_or_none(video_data.get('video_duration')), 'timestamp': int_or_none(video_data.get('created_at'), 1000), 'age_limit': parse_age_limit(video_data.get('video_rating') or rating), 'series': try_get(video_data, lambda x: x['show']['base']['display_title'], compat_str), 'episode_number': int_or_none(episode.get('episode_number')), 'episode_id': str_or_none(episode.get('id') or video_data.get('episode_id')), 'season_number': int_or_none(season.get('season_number')), 'season_id': str_or_none(season.get('id') or video_data.get('season_id')), 'uploader': channel.get('name'), 'uploader_id': str_or_none(channel.get('id')), 'subtitles': subtitles, } class ViceShowIE(ViceBaseIE): IE_NAME = 'vice:show' _VALID_URL = r'https?://(?:video\.vice|(?:www\.)?vice(?:land|tv))\.com/(?P<locale>[^/]+)/show/(?P<id>[^/?#&]+)' _PAGE_SIZE = 25 _TESTS = [{ 'url': 'https://video.vice.com/en_us/show/fck-thats-delicious', 'info_dict': { 'id': '57a2040c8cb727dec794c901', 'title': 'F*ck, That’s Delicious', 'description': 'The life and eating habits of rap’s greatest bon vivant, Action Bronson.', }, 'playlist_mincount': 64, }, { 'url': 'https://www.vicetv.com/en_us/show/fck-thats-delicious', 'only_matching': True, }] def _fetch_page(self, locale, show_id, page): videos = self._call_api('videos', 'show_id', show_id, locale, '''body id url''', ', page: %d, per_page: %d' % (page + 1, self._PAGE_SIZE)) for video in videos: yield self.url_result( video['url'], ViceIE.ie_key(), video.get('id')) def _real_extract(self, url): locale, display_id = re.match(self._VALID_URL, url).groups() show = self._call_api('shows', 'slug', display_id, locale, '''dek id title''')[0] show_id = show['id'] entries = OnDemandPagedList( functools.partial(self._fetch_page, locale, show_id), self._PAGE_SIZE) return self.playlist_result( entries, show_id, show.get('title'), show.get('dek')) class ViceArticleIE(ViceBaseIE): IE_NAME = 'vice:article' _VALID_URL = r'https://(?:www\.)?vice\.com/(?P<locale>[^/]+)/article/(?:[0-9a-z]{6}/)?(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://www.vice.com/en_us/article/on-set-with-the-woman-making-mormon-porn-in-utah', 'info_dict': { 'id': '58dc0a3dee202d2a0ccfcbd8', 'ext': 'mp4', 'title': 'Mormon War on Porn', 'description': 'md5:1c5d91fe25fa8aa304f9def118b92dbf', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1491883129, 'upload_date': '20170411', 'age_limit': 17, }, 'params': { # AES-encrypted m3u8 'skip_download': True, }, 'add_ie': [ViceIE.ie_key()], }, { 'url': 'https://www.vice.com/en_us/article/how-to-hack-a-car', 'md5': '13010ee0bc694ea87ec40724397c2349', 'info_dict': { 'id': '3jstaBeXgAs', 'ext': 'mp4', 'title': 'How to Hack a Car: Phreaked Out (Episode 2)', 'description': 'md5:ee95453f7ff495db8efe14ae8bf56f30', 'uploader': 'Motherboard', 'uploader_id': 'MotherboardTV', 'upload_date': '20140529', }, 'add_ie': [YoutubeIE.ie_key()], }, { 'url': 'https://www.vice.com/en_us/article/znm9dx/karley-sciortino-slutever-reloaded', 'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2', 'info_dict': { 'id': '57f41d3556a0a80f54726060', 'ext': 'mp4', 'title': "Making The World's First Male Sex Doll", 'description': 'md5:19b00b215b99961cf869c40fbe9df755', 'uploader': 'vice', 'uploader_id': '57a204088cb727dec794c67b', 'timestamp': 1476919911, 'upload_date': '20161019', 'age_limit': 17, }, 'params': { 'skip_download': True, 'format': 'bestvideo', }, 'add_ie': [ViceIE.ie_key()], }, { 'url': 'https://www.vice.com/en_us/article/cowboy-capitalists-part-1', 'only_matching': True, }, { 'url': 'https://www.vice.com/ru/article/big-night-out-ibiza-clive-martin-229', 'only_matching': True, }] def _real_extract(self, url): locale, display_id = re.match(self._VALID_URL, url).groups() article = self._call_api('articles', 'slug', display_id, locale, '''body embed_code''')[0] body = article['body'] def _url_res(video_url, ie_key): return { '_type': 'url_transparent', 'url': video_url, 'display_id': display_id, 'ie_key': ie_key, } vice_url = ViceIE._extract_url(body) if vice_url: return _url_res(vice_url, ViceIE.ie_key()) embed_code = self._search_regex( r'embedCode=([^&\'"]+)', body, 'ooyala embed code', default=None) if embed_code: return _url_res('ooyala:%s' % embed_code, 'Ooyala') youtube_url = YoutubeIE._extract_url(body) if youtube_url: return _url_res(youtube_url, YoutubeIE.ie_key()) video_url = self._html_search_regex( r'data-video-url="([^"]+)"', article['embed_code'], 'video URL') return _url_res(video_url, ViceIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/youjizz.py
youtube_dl/extractor/youjizz.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_duration, url_or_none, ) class YouJizzIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/(?:[^/#?]*-(?P<id>\d+)\.html|embed/(?P<embed_id>\d+))' _TESTS = [{ 'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html', 'md5': 'b1e1dfaa8bb9537d8b84eeda9cf4acf4', 'info_dict': { 'id': '2189178', 'ext': 'mp4', 'title': 'Zeichentrick 1', 'age_limit': 18, 'duration': 2874, } }, { 'url': 'http://www.youjizz.com/videos/-2189178.html', 'only_matching': True, }, { 'url': 'https://www.youjizz.com/videos/embed/31991001', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('embed_id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title') formats = [] encodings = self._parse_json( self._search_regex( r'[Ee]ncodings\s*=\s*(\[.+?\]);\n', webpage, 'encodings', default='[]'), video_id, fatal=False) for encoding in encodings: if not isinstance(encoding, dict): continue format_url = url_or_none(encoding.get('filename')) if not format_url: continue if determine_ext(format_url) == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: format_id = encoding.get('name') or encoding.get('quality') height = int_or_none(self._search_regex( r'^(\d+)[pP]', format_id, 'height', default=None)) formats.append({ 'url': format_url, 'format_id': format_id, 'height': height, }) if formats: info_dict = { 'formats': formats, } else: # YouJizz's HTML5 player has invalid HTML webpage = webpage.replace('"controls', '" controls') info_dict = self._parse_html5_media_entries( url, webpage, video_id)[0] duration = parse_duration(self._search_regex( r'<strong>Runtime:</strong>([^<]+)', webpage, 'duration', default=None)) uploader = self._search_regex( r'<strong>Uploaded By:.*?<a[^>]*>([^<]+)', webpage, 'uploader', default=None) info_dict.update({ 'id': video_id, 'title': title, 'age_limit': self._rta_search(webpage), 'duration': duration, 'uploader': uploader, }) return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/weibo.py
youtube_dl/extractor/weibo.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor import json import random import re from ..compat import ( compat_parse_qs, compat_str, ) from ..utils import ( js_to_json, strip_jsonp, urlencode_postdata, ) class WeiboIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?weibo\.com/[0-9]+/(?P<id>[a-zA-Z0-9]+)' _TEST = { 'url': 'https://weibo.com/6275294458/Fp6RGfbff?type=comment', 'info_dict': { 'id': 'Fp6RGfbff', 'ext': 'mp4', 'title': 'You should have servants to massage you,... 来自Hosico_猫 - 微博', } } def _real_extract(self, url): video_id = self._match_id(url) # to get Referer url for genvisitor webpage, urlh = self._download_webpage_handle(url, video_id) visitor_url = urlh.geturl() if 'passport.weibo.com' in visitor_url: # first visit visitor_data = self._download_json( 'https://passport.weibo.com/visitor/genvisitor', video_id, note='Generating first-visit data', transform_source=strip_jsonp, headers={'Referer': visitor_url}, data=urlencode_postdata({ 'cb': 'gen_callback', 'fp': json.dumps({ 'os': '2', 'browser': 'Gecko57,0,0,0', 'fonts': 'undefined', 'screenInfo': '1440*900*24', 'plugins': '', }), })) tid = visitor_data['data']['tid'] cnfd = '%03d' % visitor_data['data']['confidence'] self._download_webpage( 'https://passport.weibo.com/visitor/visitor', video_id, note='Running first-visit callback', query={ 'a': 'incarnate', 't': tid, 'w': 2, 'c': cnfd, 'cb': 'cross_domain', 'from': 'weibo', '_rand': random.random(), }) webpage = self._download_webpage( url, video_id, note='Revisiting webpage') title = self._html_search_regex( r'<title>(.+?)</title>', webpage, 'title') video_formats = compat_parse_qs(self._search_regex( r'video-sources=\\\"(.+?)\"', webpage, 'video_sources')) formats = [] supported_resolutions = (480, 720) for res in supported_resolutions: vid_urls = video_formats.get(compat_str(res)) if not vid_urls or not isinstance(vid_urls, list): continue vid_url = vid_urls[0] formats.append({ 'url': vid_url, 'height': res, }) self._sort_formats(formats) uploader = self._og_search_property( 'nick-name', webpage, 'uploader', default=None) return { 'id': video_id, 'title': title, 'uploader': uploader, 'formats': formats } class WeiboMobileIE(InfoExtractor): _VALID_URL = r'https?://m\.weibo\.cn/status/(?P<id>[0-9]+)(\?.+)?' _TEST = { 'url': 'https://m.weibo.cn/status/4189191225395228?wm=3333_2001&sourcetype=weixin&featurecode=newtitle&from=singlemessage&isappinstalled=0', 'info_dict': { 'id': '4189191225395228', 'ext': 'mp4', 'title': '午睡当然是要甜甜蜜蜜的啦', 'uploader': '柴犬柴犬' } } def _real_extract(self, url): video_id = self._match_id(url) # to get Referer url for genvisitor webpage = self._download_webpage(url, video_id, note='visit the page') weibo_info = self._parse_json(self._search_regex( r'var\s+\$render_data\s*=\s*\[({.*})\]\[0\]\s*\|\|\s*{};', webpage, 'js_code', flags=re.DOTALL), video_id, transform_source=js_to_json) status_data = weibo_info.get('status', {}) page_info = status_data.get('page_info') title = status_data['status_title'] uploader = status_data.get('user', {}).get('screen_name') return { 'id': video_id, 'title': title, 'uploader': uploader, 'url': page_info['media_info']['stream_url'] }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/atresplayer.py
youtube_dl/extractor/atresplayer.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, urlencode_postdata, ) class AtresPlayerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?atresplayer\.com/[^/]+/[^/]+/[^/]+/[^/]+/(?P<display_id>.+?)_(?P<id>[0-9a-f]{24})' _NETRC_MACHINE = 'atresplayer' _TESTS = [ { 'url': 'https://www.atresplayer.com/antena3/series/pequenas-coincidencias/temporada-1/capitulo-7-asuntos-pendientes_5d4aa2c57ed1a88fc715a615/', 'info_dict': { 'id': '5d4aa2c57ed1a88fc715a615', 'ext': 'mp4', 'title': 'Capítulo 7: Asuntos pendientes', 'description': 'md5:7634cdcb4d50d5381bedf93efb537fbc', 'duration': 3413, }, 'params': { 'format': 'bestvideo', }, 'skip': 'This video is only available for registered users' }, { 'url': 'https://www.atresplayer.com/lasexta/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_5ad08edf986b2855ed47adc4/', 'only_matching': True, }, { 'url': 'https://www.atresplayer.com/antena3/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_5ad51046986b2886722ccdea/', 'only_matching': True, }, ] _API_BASE = 'https://api.atresplayer.com/' def _real_initialize(self): self._login() def _handle_error(self, e, code): if isinstance(e.cause, compat_HTTPError) and e.cause.code == code: error = self._parse_json(e.cause.read(), None) if error.get('error') == 'required_registered': self.raise_login_required() raise ExtractorError(error['error_description'], expected=True) raise def _login(self): username, password = self._get_login_info() if username is None: return self._request_webpage( self._API_BASE + 'login', None, 'Downloading login page') try: target_url = self._download_json( 'https://account.atresmedia.com/api/login', None, 'Logging in', headers={ 'Content-Type': 'application/x-www-form-urlencoded' }, data=urlencode_postdata({ 'username': username, 'password': password, }))['targetUrl'] except ExtractorError as e: self._handle_error(e, 400) self._request_webpage(target_url, None, 'Following Target URL') def _real_extract(self, url): display_id, video_id = re.match(self._VALID_URL, url).groups() try: episode = self._download_json( self._API_BASE + 'client/v1/player/episode/' + video_id, video_id) except ExtractorError as e: self._handle_error(e, 403) title = episode['titulo'] formats = [] for source in episode.get('sources', []): src = source.get('src') if not src: continue src_type = source.get('type') if src_type == 'application/vnd.apple.mpegurl': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif src_type == 'application/dash+xml': formats.extend(self._extract_mpd_formats( src, video_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) heartbeat = episode.get('heartbeat') or {} omniture = episode.get('omniture') or {} get_meta = lambda x: heartbeat.get(x) or omniture.get(x) return { 'display_id': display_id, 'id': video_id, 'title': title, 'description': episode.get('descripcion'), 'thumbnail': episode.get('imgPoster'), 'duration': int_or_none(episode.get('duration')), 'formats': formats, 'channel': get_meta('channel'), 'season': get_meta('season'), 'episode_number': int_or_none(get_meta('episodeNumber')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/syfy.py
youtube_dl/extractor/syfy.py
from __future__ import unicode_literals from .adobepass import AdobePassIE from ..utils import ( update_url_query, smuggle_url, ) class SyfyIE(AdobePassIE): _VALID_URL = r'https?://(?:www\.)?syfy\.com/(?:[^/]+/)?videos/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.syfy.com/theinternetruinedmylife/videos/the-internet-ruined-my-life-season-1-trailer', 'info_dict': { 'id': '2968097', 'ext': 'mp4', 'title': 'The Internet Ruined My Life: Season 1 Trailer', 'description': 'One tweet, one post, one click, can destroy everything.', 'uploader': 'NBCU-MPAT', 'upload_date': '20170113', 'timestamp': 1484345640, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['ThePlatform'], }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) syfy_mpx = list(self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id)['syfy']['syfy_mpx'].values())[0] video_id = syfy_mpx['mpxGUID'] title = syfy_mpx['episodeTitle'] query = { 'mbr': 'true', 'manifest': 'm3u', } if syfy_mpx.get('entitlement') == 'auth': resource = self._get_mvpd_resource( 'syfy', title, video_id, syfy_mpx.get('mpxRating', 'TV-14')) query['auth'] = self._extract_mvpd_auth( url, video_id, 'syfy', resource) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(update_url_query( self._proto_relative_url(syfy_mpx['releaseURL']), query), {'force_smil_url': True}), 'title': title, 'id': video_id, 'display_id': display_id, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/twitcasting.py
youtube_dl/extractor/twitcasting.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, float_or_none, get_element_by_class, get_element_by_id, parse_duration, str_to_int, unified_timestamp, urlencode_postdata, ) class TwitCastingIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?twitcasting\.tv/(?P<uploader_id>[^/]+)/movie/(?P<id>\d+)' _TESTS = [{ 'url': 'https://twitcasting.tv/ivetesangalo/movie/2357609', 'md5': '745243cad58c4681dc752490f7540d7f', 'info_dict': { 'id': '2357609', 'ext': 'mp4', 'title': 'Live #2357609', 'uploader_id': 'ivetesangalo', 'description': 'Twitter Oficial da cantora brasileira Ivete Sangalo.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20110822', 'timestamp': 1314010824, 'duration': 32, 'view_count': int, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://twitcasting.tv/mttbernardini/movie/3689740', 'info_dict': { 'id': '3689740', 'ext': 'mp4', 'title': 'Live playing something #3689740', 'uploader_id': 'mttbernardini', 'description': 'Salve, io sono Matto (ma con la e). Questa è la mia presentazione, in quanto sono letteralmente matto (nel senso di strano), con qualcosa in più.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20120212', 'timestamp': 1329028024, 'duration': 681, 'view_count': int, }, 'params': { 'skip_download': True, 'videopassword': 'abc', }, }] def _real_extract(self, url): uploader_id, video_id = re.match(self._VALID_URL, url).groups() video_password = self._downloader.params.get('videopassword') request_data = None if video_password: request_data = urlencode_postdata({ 'password': video_password, }) webpage = self._download_webpage(url, video_id, data=request_data) title = clean_html(get_element_by_id( 'movietitle', webpage)) or self._html_search_meta( ['og:title', 'twitter:title'], webpage, fatal=True) video_js_data = {} m3u8_url = self._search_regex( r'data-movie-url=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'm3u8 url', group='url', default=None) if not m3u8_url: video_js_data = self._parse_json(self._search_regex( r"data-movie-playlist='(\[[^']+\])'", webpage, 'movie playlist'), video_id)[0] m3u8_url = video_js_data['source']['url'] # use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol formats = self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', m3u8_id='hls') thumbnail = video_js_data.get('thumbnailUrl') or self._og_search_thumbnail(webpage) description = clean_html(get_element_by_id( 'authorcomment', webpage)) or self._html_search_meta( ['description', 'og:description', 'twitter:description'], webpage) duration = float_or_none(video_js_data.get( 'duration'), 1000) or parse_duration(clean_html( get_element_by_class('tw-player-duration-time', webpage))) view_count = str_to_int(self._search_regex( r'Total\s*:\s*([\d,]+)\s*Views', webpage, 'views', None)) timestamp = unified_timestamp(self._search_regex( r'data-toggle="true"[^>]+datetime="([^"]+)"', webpage, 'datetime', None)) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/radiofrance.py
youtube_dl/extractor/radiofrance.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class RadioFranceIE(InfoExtractor): _VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)' IE_NAME = 'radiofrance' _TEST = { 'url': 'http://maison.radiofrance.fr/radiovisions/one-one', 'md5': 'bdbb28ace95ed0e04faab32ba3160daf', 'info_dict': { 'id': 'one-one', 'ext': 'ogg', 'title': 'One to one', 'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.", 'uploader': 'Thomas Hercouët', }, } def _real_extract(self, url): m = re.match(self._VALID_URL, url) video_id = m.group('id') webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title') description = self._html_search_regex( r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>', webpage, 'description', fatal=False) uploader = self._html_search_regex( r'<div class="credit">&nbsp;&nbsp;&copy;&nbsp;(.*?)</div>', webpage, 'uploader', fatal=False) formats_str = self._html_search_regex( r'class="jp-jplayer[^"]*" data-source="([^"]+)">', webpage, 'audio URLs') formats = [ { 'format_id': fm[0], 'url': fm[1], 'vcodec': 'none', 'preference': i, } for i, fm in enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str)) ] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'uploader': uploader, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/byutv.py
youtube_dl/extractor/byutv.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, merge_dicts, parse_duration, url_or_none, ) class BYUtvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?byutv\.org/(?:watch|player)/(?!event/)(?P<id>[0-9a-f-]+)(?:/(?P<display_id>[^/?#&]+))?' _TESTS = [{ # ooyalaVOD 'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5', 'info_dict': { 'id': 'ZvanRocTpW-G5_yZFeltTAMv6jxOU9KH', 'display_id': 'studio-c-season-5-episode-5', 'ext': 'mp4', 'title': 'Season 5 Episode 5', 'description': 'md5:1d31dc18ef4f075b28f6a65937d22c65', 'thumbnail': r're:^https?://.*', 'duration': 1486.486, }, 'params': { 'skip_download': True, }, 'add_ie': ['Ooyala'], }, { # dvr 'url': 'https://www.byutv.org/player/8f1dab9b-b243-47c8-b525-3e2d021a3451/byu-softball-pacific-vs-byu-41219---game-2', 'info_dict': { 'id': '8f1dab9b-b243-47c8-b525-3e2d021a3451', 'display_id': 'byu-softball-pacific-vs-byu-41219---game-2', 'ext': 'mp4', 'title': 'Pacific vs. BYU (4/12/19)', 'description': 'md5:1ac7b57cb9a78015910a4834790ce1f3', 'duration': 11645, }, 'params': { 'skip_download': True }, }, { 'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d', 'only_matching': True, }, { 'url': 'https://www.byutv.org/player/27741493-dc83-40b0-8420-e7ae38a2ae98/byu-football-toledo-vs-byu-93016?listid=4fe0fee5-0d3c-4a29-b725-e4948627f472&listindex=0&q=toledo', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id video = self._download_json( 'https://api.byutv.org/api3/catalog/getvideosforcontent', display_id, query={ 'contentid': video_id, 'channel': 'byutv', 'x-byutv-context': 'web$US', }, headers={ 'x-byutv-context': 'web$US', 'x-byutv-platformkey': 'xsaaw9c7y5', }) ep = video.get('ooyalaVOD') if ep: return { '_type': 'url_transparent', 'ie_key': 'Ooyala', 'url': 'ooyala:%s' % ep['providerId'], 'id': video_id, 'display_id': display_id, 'title': ep.get('title'), 'description': ep.get('description'), 'thumbnail': ep.get('imageThumbnail'), } info = {} formats = [] for format_id, ep in video.items(): if not isinstance(ep, dict): continue video_url = url_or_none(ep.get('videoUrl')) if not video_url: continue ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) else: formats.append({ 'url': video_url, 'format_id': format_id, }) merge_dicts(info, { 'title': ep.get('title'), 'description': ep.get('description'), 'thumbnail': ep.get('imageThumbnail'), 'duration': parse_duration(ep.get('length')), }) self._sort_formats(formats) return merge_dicts(info, { 'id': video_id, 'display_id': display_id, 'title': display_id, 'formats': formats, })
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/libraryofcongress.py
youtube_dl/extractor/libraryofcongress.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, int_or_none, parse_filesize, ) class LibraryOfCongressIE(InfoExtractor): IE_NAME = 'loc' IE_DESC = 'Library of Congress' _VALID_URL = r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9a-z_.]+)' _TESTS = [{ # embedded via <div class="media-player" 'url': 'http://loc.gov/item/90716351/', 'md5': '6ec0ae8f07f86731b1b2ff70f046210a', 'info_dict': { 'id': '90716351', 'ext': 'mp4', 'title': "Pa's trip to Mars", 'duration': 0, 'view_count': int, }, }, { # webcast embedded via mediaObjectId 'url': 'https://www.loc.gov/today/cyberlc/feature_wdesc.php?rec=5578', 'info_dict': { 'id': '5578', 'ext': 'mp4', 'title': 'Help! Preservation Training Needs Here, There & Everywhere', 'duration': 3765, 'view_count': int, 'subtitles': 'mincount:1', }, 'params': { 'skip_download': True, }, }, { # with direct download links 'url': 'https://www.loc.gov/item/78710669/', 'info_dict': { 'id': '78710669', 'ext': 'mp4', 'title': 'La vie et la passion de Jesus-Christ', 'duration': 0, 'view_count': int, 'formats': 'mincount:4', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.loc.gov/item/ihas.200197114/', 'only_matching': True, }, { 'url': 'https://www.loc.gov/item/afc1981005_afs20503/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) media_id = self._search_regex( (r'id=(["\'])media-player-(?P<id>.+?)\1', r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1', r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1', r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1', r'data-tab="share-media-(?P<id>[0-9A-F]{32})"'), webpage, 'media id', group='id') data = self._download_json( 'https://media.loc.gov/services/v1/media?id=%s&context=json' % media_id, media_id)['mediaObject'] derivative = data['derivatives'][0] media_url = derivative['derivativeUrl'] title = derivative.get('shortName') or data.get('shortName') or self._og_search_title( webpage) # Following algorithm was extracted from setAVSource js function # found in webpage media_url = media_url.replace('rtmp', 'https') is_video = data.get('mediaType', 'v').lower() == 'v' ext = determine_ext(media_url) if ext not in ('mp4', 'mp3'): media_url += '.mp4' if is_video else '.mp3' formats = [] if '/vod/mp4:' in media_url: formats.append({ 'url': media_url.replace('/vod/mp4:', '/hls-vod/media/') + '.m3u8', 'format_id': 'hls', 'ext': 'mp4', 'protocol': 'm3u8_native', 'quality': 1, }) http_format = { 'url': re.sub(r'(://[^/]+/)(?:[^/]+/)*(?:mp4|mp3):', r'\1', media_url), 'format_id': 'http', 'quality': 1, } if not is_video: http_format['vcodec'] = 'none' formats.append(http_format) download_urls = set() for m in re.finditer( r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?:&nbsp;|\s+)\((?P<size>.+?)\))?\s*<', webpage): format_id = m.group('id').lower() if format_id in ('gif', 'jpeg'): continue download_url = m.group('url') if download_url in download_urls: continue download_urls.add(download_url) formats.append({ 'url': download_url, 'format_id': format_id, 'filesize_approx': parse_filesize(m.group('size')), }) self._sort_formats(formats) duration = float_or_none(data.get('duration')) view_count = int_or_none(data.get('viewCount')) subtitles = {} cc_url = data.get('ccUrl') if cc_url: subtitles.setdefault('en', []).append({ 'url': cc_url, 'ext': 'ttml', }) return { 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'view_count': view_count, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/xvideos.py
youtube_dl/extractor/xvideos.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote from ..utils import ( clean_html, determine_ext, ExtractorError, int_or_none, parse_duration, ) class XVideosIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?: (?:[^/]+\.)?xvideos2?\.com/video| (?:www\.)?xvideos\.es/video| flashservice\.xvideos\.com/embedframe/| static-hw\.xvideos\.com/swf/xv-player\.swf\?.*?\bid_video= ) (?P<id>[0-9]+) ''' _TESTS = [{ 'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl', 'md5': '14cea69fcb84db54293b1e971466c2e1', 'info_dict': { 'id': '4588838', 'ext': 'mp4', 'title': 'Biker Takes his Girl', 'duration': 108, 'age_limit': 18, } }, { 'url': 'https://flashservice.xvideos.com/embedframe/4588838', 'only_matching': True, }, { 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=4588838', 'only_matching': True, }, { 'url': 'http://xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://xvideos.es/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://www.xvideos.es/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'http://xvideos.es/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'http://www.xvideos.es/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'http://fr.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://fr.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'http://it.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://it.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'http://de.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }, { 'url': 'https://de.xvideos.com/video4588838/biker_takes_his_girl', 'only_matching': True }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://www.xvideos.com/video%s/0' % video_id, video_id) mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage) if mobj: raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True) title = self._html_search_regex( (r'<title>(?P<title>.+?)\s+-\s+XVID', r'setVideoTitle\s*\(\s*(["\'])(?P<title>(?:(?!\1).)+)\1'), webpage, 'title', default=None, group='title') or self._og_search_title(webpage) thumbnails = [] for preference, thumbnail in enumerate(('', '169')): thumbnail_url = self._search_regex( r'setThumbUrl%s\(\s*(["\'])(?P<thumbnail>(?:(?!\1).)+)\1' % thumbnail, webpage, 'thumbnail', default=None, group='thumbnail') if thumbnail_url: thumbnails.append({ 'url': thumbnail_url, 'preference': preference, }) duration = int_or_none(self._og_search_property( 'duration', webpage, default=None)) or parse_duration( self._search_regex( r'<span[^>]+class=["\']duration["\'][^>]*>.*?(\d[^<]+)', webpage, 'duration', fatal=False)) formats = [] video_url = compat_urllib_parse_unquote(self._search_regex( r'flv_url=(.+?)&', webpage, 'video URL', default='')) if video_url: formats.append({ 'url': video_url, 'format_id': 'flv', }) for kind, _, format_url in re.findall( r'setVideo([^(]+)\((["\'])(http.+?)\2\)', webpage): format_id = kind.lower() if format_id == 'hls': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif format_id in ('urllow', 'urlhigh'): formats.append({ 'url': format_url, 'format_id': '%s-%s' % (determine_ext(format_url, 'mp4'), format_id[3:]), 'quality': -2 if format_id.endswith('low') else None, }) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, 'thumbnails': thumbnails, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cpac.py
youtube_dl/extractor/cpac.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, str_or_none, try_get, unified_timestamp, update_url_query, urljoin, ) # compat_range try: if callable(xrange): range = xrange except (NameError, TypeError): pass class CPACIE(InfoExtractor): IE_NAME = 'cpac' _VALID_URL = r'https?://(?:www\.)?cpac\.ca/(?P<fr>l-)?episode\?id=(?P<id>[\da-f]{8}(?:-[\da-f]{4}){3}-[\da-f]{12})' _TEST = { # 'url': 'http://www.cpac.ca/en/programs/primetime-politics/episodes/65490909', 'url': 'https://www.cpac.ca/episode?id=fc7edcae-4660-47e1-ba61-5b7f29a9db0f', 'md5': 'e46ad699caafd7aa6024279f2614e8fa', 'info_dict': { 'id': 'fc7edcae-4660-47e1-ba61-5b7f29a9db0f', 'ext': 'mp4', 'upload_date': '20220215', 'title': 'News Conference to Celebrate National Kindness Week – February 15, 2022', 'description': 'md5:466a206abd21f3a6f776cdef290c23fb', 'timestamp': 1644901200, }, 'params': { 'format': 'bestvideo', 'hls_prefer_native': True, }, } def _real_extract(self, url): video_id = self._match_id(url) url_lang = 'fr' if '/l-episode?' in url else 'en' content = self._download_json( 'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/episode/index.xml&crafterSite=cpacca&id=' + video_id, video_id) video_url = try_get(content, lambda x: x['page']['details']['videoUrl'], compat_str) formats = [] if video_url: content = content['page'] title = str_or_none(content['details']['title_%s_t' % (url_lang, )]) formats = self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', ext='mp4') for fmt in formats: # prefer language to match URL fmt_lang = fmt.get('language') if fmt_lang == url_lang: fmt['language_preference'] = 10 elif not fmt_lang: fmt['language_preference'] = -1 else: fmt['language_preference'] = -10 self._sort_formats(formats) category = str_or_none(content['details']['category_%s_t' % (url_lang, )]) def is_live(v_type): return (v_type == 'live') if v_type is not None else None return { 'id': video_id, 'formats': formats, 'title': title, 'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))), 'timestamp': unified_timestamp(content['details'].get('liveDateTime')), 'category': [category] if category else None, 'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))), 'is_live': is_live(content['details'].get('type')), } class CPACPlaylistIE(InfoExtractor): IE_NAME = 'cpac:playlist' _VALID_URL = r'(?i)https?://(?:www\.)?cpac\.ca/(?:program|search|(?P<fr>emission|rechercher))\?(?:[^&]+&)*?(?P<id>(?:id=\d+|programId=\d+|key=[^&]+))' _TESTS = [{ 'url': 'https://www.cpac.ca/program?id=6', 'info_dict': { 'id': 'id=6', 'title': 'Headline Politics', 'description': 'Watch CPAC’s signature long-form coverage of the day’s pressing political events as they unfold.', }, 'playlist_count': 10, }, { 'url': 'https://www.cpac.ca/search?key=hudson&type=all&order=desc', 'info_dict': { 'id': 'key=hudson', 'title': 'hudson', }, 'playlist_count': 22, }, { 'url': 'https://www.cpac.ca/search?programId=50', 'info_dict': { 'id': 'programId=50', 'title': '50', }, 'playlist_count': 9, }, { 'url': 'https://www.cpac.ca/emission?id=6', 'only_matching': True, }, { 'url': 'https://www.cpac.ca/rechercher?key=hudson&type=all&order=desc', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) url_lang = 'fr' if any(x in url for x in ('/emission?', '/rechercher?')) else 'en' pl_type, list_type = ('program', 'itemList') if any(x in url for x in ('/program?', '/emission?')) else ('search', 'searchResult') api_url = ( 'https://www.cpac.ca/api/1/services/contentModel.json?url=/site/website/%s/index.xml&crafterSite=cpacca&%s' % (pl_type, video_id, )) content = self._download_json(api_url, video_id) entries = [] total_pages = int_or_none(try_get(content, lambda x: x['page'][list_type]['totalPages']), default=1) for page in range(1, total_pages + 1): if page > 1: api_url = update_url_query(api_url, {'page': '%d' % (page, ), }) content = self._download_json( api_url, video_id, note='Downloading continuation - %d' % (page, ), fatal=False) for item in try_get(content, lambda x: x['page'][list_type]['item'], list) or []: episode_url = urljoin(url, try_get(item, lambda x: x['url_%s_s' % (url_lang, )])) if episode_url: entries.append(episode_url) return self.playlist_result( (self.url_result(entry) for entry in entries), playlist_id=video_id, playlist_title=try_get(content, lambda x: x['page']['program']['title_%s_t' % (url_lang, )]) or video_id.split('=')[-1], playlist_description=try_get(content, lambda x: x['page']['program']['description_%s_t' % (url_lang, )]), )
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hungama.py
youtube_dl/extractor/hungama.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, urlencode_postdata, ) class HungamaIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)?hungama\.com/ (?: (?:video|movie)/[^/]+/| tv-show/(?:[^/]+/){2}\d+/episode/[^/]+/ ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://www.hungama.com/video/krishna-chants/39349649/', 'md5': 'a845a6d1ebd08d80c1035126d49bd6a0', 'info_dict': { 'id': '2931166', 'ext': 'mp4', 'title': 'Lucky Ali - Kitni Haseen Zindagi', 'track': 'Kitni Haseen Zindagi', 'artist': 'Lucky Ali', 'album': 'Aks', 'release_year': 2000, } }, { 'url': 'https://www.hungama.com/movie/kahaani-2/44129919/', 'only_matching': True, }, { 'url': 'https://www.hungama.com/tv-show/padded-ki-pushup/season-1/44139461/episode/ep-02-training-sasu-pathlaag-karing/44139503/', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info = self._search_json_ld(webpage, video_id) m3u8_url = self._download_json( 'https://www.hungama.com/index.php', video_id, data=urlencode_postdata({'content_id': video_id}), headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', }, query={ 'c': 'common', 'm': 'get_video_mdn_url', })['stream_url'] formats = self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) info.update({ 'id': video_id, 'formats': formats, }) return info class HungamaSongIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hungama\.com/song/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://www.hungama.com/song/kitni-haseen-zindagi/2931166/', 'md5': 'a845a6d1ebd08d80c1035126d49bd6a0', 'info_dict': { 'id': '2931166', 'ext': 'mp4', 'title': 'Lucky Ali - Kitni Haseen Zindagi', 'track': 'Kitni Haseen Zindagi', 'artist': 'Lucky Ali', 'album': 'Aks', 'release_year': 2000, } } def _real_extract(self, url): audio_id = self._match_id(url) data = self._download_json( 'https://www.hungama.com/audio-player-data/track/%s' % audio_id, audio_id, query={'_country': 'IN'})[0] track = data['song_name'] artist = data.get('singer_name') m3u8_url = self._download_json( data.get('file') or data['preview_link'], audio_id)['response']['media_url'] formats = self._extract_m3u8_formats( m3u8_url, audio_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) title = '%s - %s' % (artist, track) if artist else track thumbnail = data.get('img_src') or data.get('album_image') return { 'id': audio_id, 'title': title, 'thumbnail': thumbnail, 'track': track, 'artist': artist, 'album': data.get('album_name'), 'release_year': int_or_none(data.get('date')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/firsttv.py
youtube_dl/extractor/firsttv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( int_or_none, qualities, unified_strdate, url_or_none, ) class FirstTVIE(InfoExtractor): IE_NAME = '1tv' IE_DESC = 'Первый канал' _VALID_URL = r'https?://(?:www\.)?1tv\.ru/(?:[^/]+/)+(?P<id>[^/?#]+)' _TESTS = [{ # single format 'url': 'http://www.1tv.ru/shows/naedine-so-vsemi/vypuski/gost-lyudmila-senchina-naedine-so-vsemi-vypusk-ot-12-02-2015', 'md5': 'a1b6b60d530ebcf8daacf4565762bbaf', 'info_dict': { 'id': '40049', 'ext': 'mp4', 'title': 'Гость Людмила Сенчина. Наедине со всеми. Выпуск от 12.02.2015', 'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$', 'upload_date': '20150212', 'duration': 2694, }, }, { # multiple formats 'url': 'http://www.1tv.ru/shows/dobroe-utro/pro-zdorove/vesennyaya-allergiya-dobroe-utro-fragment-vypuska-ot-07042016', 'info_dict': { 'id': '364746', 'ext': 'mp4', 'title': 'Весенняя аллергия. Доброе утро. Фрагмент выпуска от 07.04.2016', 'thumbnail': r're:^https?://.*\.(?:jpg|JPG)$', 'upload_date': '20160407', 'duration': 179, 'formats': 'mincount:3', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.1tv.ru/news/issue/2016-12-01/14:00', 'info_dict': { 'id': '14:00', 'title': 'Выпуск новостей в 14:00 1 декабря 2016 года. Новости. Первый канал', 'description': 'md5:2e921b948f8c1ff93901da78ebdb1dfd', }, 'playlist_count': 13, }, { 'url': 'http://www.1tv.ru/shows/tochvtoch-supersezon/vystupleniya/evgeniy-dyatlov-vladimir-vysockiy-koni-priveredlivye-toch-v-toch-supersezon-fragment-vypuska-ot-06-11-2016', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) playlist_url = compat_urlparse.urljoin(url, self._search_regex( r'data-playlist-url=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'playlist url', group='url')) parsed_url = compat_urlparse.urlparse(playlist_url) qs = compat_urlparse.parse_qs(parsed_url.query) item_ids = qs.get('videos_ids[]') or qs.get('news_ids[]') items = self._download_json(playlist_url, display_id) if item_ids: items = [ item for item in items if item.get('uid') and compat_str(item['uid']) in item_ids] else: items = [items[0]] entries = [] QUALITIES = ('ld', 'sd', 'hd', ) for item in items: title = item['title'] quality = qualities(QUALITIES) formats = [] path = None for f in item.get('mbr', []): src = url_or_none(f.get('src')) if not src: continue tbr = int_or_none(self._search_regex( r'_(\d{3,})\.mp4', src, 'tbr', default=None)) if not path: path = self._search_regex( r'//[^/]+/(.+?)_\d+\.mp4', src, 'm3u8 path', default=None) formats.append({ 'url': src, 'format_id': f.get('name'), 'tbr': tbr, 'source_preference': quality(f.get('name')), # quality metadata of http formats may be incorrect 'preference': -1, }) # m3u8 URL format is reverse engineered from [1] (search for # master.m3u8). dashEdges (that is currently balancer-vod.1tv.ru) # is taken from [2]. # 1. http://static.1tv.ru/player/eump1tv-current/eump-1tv.all.min.js?rnd=9097422834:formatted # 2. http://static.1tv.ru/player/eump1tv-config/config-main.js?rnd=9097422834 if not path and len(formats) == 1: path = self._search_regex( r'//[^/]+/(.+?$)', formats[0]['url'], 'm3u8 path', default=None) if path: if len(formats) == 1: m3u8_path = ',' else: tbrs = [compat_str(t) for t in sorted(f['tbr'] for f in formats)] m3u8_path = '_,%s,%s' % (','.join(tbrs), '.mp4') formats.extend(self._extract_m3u8_formats( 'http://balancer-vod.1tv.ru/%s%s.urlset/master.m3u8' % (path, m3u8_path), display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats) thumbnail = item.get('poster') or self._og_search_thumbnail(webpage) duration = int_or_none(item.get('duration') or self._html_search_meta( 'video:duration', webpage, 'video duration', fatal=False)) upload_date = unified_strdate(self._html_search_meta( 'ya:ovs:upload_date', webpage, 'upload date', default=None)) entries.append({ 'id': compat_str(item.get('id') or item['uid']), 'thumbnail': thumbnail, 'title': title, 'upload_date': upload_date, 'duration': int_or_none(duration), 'formats': formats }) title = self._html_search_regex( (r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', r"'title'\s*:\s*'([^']+)'"), webpage, 'title', default=None) or self._og_search_title( webpage, default=None) description = self._html_search_regex( r'<div class="descr">\s*<div>&nbsp;</div>\s*<p>([^<]*)</p></div>', webpage, 'description', default=None) or self._html_search_meta( 'description', webpage, 'description', default=None) return self.playlist_result(entries, display_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/myvideoge.py
youtube_dl/extractor/myvideoge.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_id, get_element_by_class, int_or_none, js_to_json, MONTH_NAMES, qualities, unified_strdate, ) class MyVideoGeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?myvideo\.ge/v/(?P<id>[0-9]+)' _TEST = { 'url': 'https://www.myvideo.ge/v/3941048', 'md5': '8c192a7d2b15454ba4f29dc9c9a52ea9', 'info_dict': { 'id': '3941048', 'ext': 'mp4', 'title': 'The best prikol', 'upload_date': '20200611', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'chixa33', 'description': 'md5:5b067801318e33c2e6eea4ab90b1fdd3', }, # working from local dev system 'skip': 'site blocks CI servers', } _MONTH_NAMES_KA = ['იანვარი', 'თებერვალი', 'მარტი', 'აპრილი', 'მაისი', 'ივნისი', 'ივლისი', 'აგვისტო', 'სექტემბერი', 'ოქტომბერი', 'ნოემბერი', 'დეკემბერი'] _quality = staticmethod(qualities(('SD', 'HD'))) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = ( self._og_search_title(webpage, default=None) or clean_html(get_element_by_class('my_video_title', webpage)) or self._html_search_regex(r'<title\b[^>]*>([^<]+)</title\b', webpage, 'title')) jwplayer_sources = self._parse_json( self._search_regex( r'''(?s)jwplayer\s*\(\s*['"]mvplayer['"]\s*\)\s*\.\s*setup\s*\(.*?\bsources\s*:\s*(\[.*?])\s*[,});]''', webpage, 'jwplayer sources', fatal=False) or '', video_id, transform_source=js_to_json, fatal=False) formats = self._parse_jwplayer_formats(jwplayer_sources or [], video_id) for f in formats or []: f['preference'] = self._quality(f['format_id']) self._sort_formats(formats) description = ( self._og_search_description(webpage) or get_element_by_id('long_desc_holder', webpage) or self._html_search_meta('description', webpage)) uploader = self._search_regex(r'<a[^>]+class="mv_user_name"[^>]*>([^<]+)<', webpage, 'uploader', fatal=False) upload_date = get_element_by_class('mv_vid_upl_date', webpage) # as ka locale may not be present roll a local date conversion upload_date = (unified_strdate( # translate any ka month to an en one re.sub('|'.join(self._MONTH_NAMES_KA), lambda m: MONTH_NAMES['en'][self._MONTH_NAMES_KA.index(m.group(0))], upload_date, re.I)) if upload_date else None) return { 'id': video_id, 'title': title, 'description': description, 'uploader': uploader, 'formats': formats, 'thumbnail': self._og_search_thumbnail(webpage), 'upload_date': upload_date, 'view_count': int_or_none(get_element_by_class('mv_vid_views', webpage)), 'like_count': int_or_none(get_element_by_id('likes_count', webpage)), 'dislike_count': int_or_none(get_element_by_id('dislikes_count', webpage)), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/apa.py
youtube_dl/extractor/apa.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, url_or_none, ) class APAIE(InfoExtractor): _VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' _TESTS = [{ 'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029', 'md5': '2b12292faeb0a7d930c778c7a5b4759b', 'info_dict': { 'id': '293f6d17-692a-44e3-9fd5-7b178f3a1029', 'ext': 'mp4', 'title': '293f6d17-692a-44e3-9fd5-7b178f3a1029', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78', 'only_matching': True, }, { 'url': 'http://uvp-rma.sf.apa.at/embed/70404cca-2f47-4855-bbb8-20b1fae58f76', 'only_matching': True, }, { 'url': 'http://uvp-kleinezeitung.sf.apa.at/embed/f1c44979-dba2-4ebf-b021-e4cf2cac3c81', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return [ mobj.group('url') for mobj in re.finditer( r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1', webpage)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, base_url = mobj.group('id', 'base_url') webpage = self._download_webpage( '%s/player/%s' % (base_url, video_id), video_id) jwplatform_id = self._search_regex( r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage, 'jwplatform id', default=None) if jwplatform_id: return self.url_result( 'jwplatform:' + jwplatform_id, ie='JWPlatform', video_id=video_id) def extract(field, name=None): return self._search_regex( r'\b%s["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1' % field, webpage, name or field, default=None, group='value') title = extract('title') or video_id description = extract('description') thumbnail = extract('poster', 'thumbnail') formats = [] for format_id in ('hls', 'progressive'): source_url = url_or_none(extract(format_id)) if not source_url: continue ext = determine_ext(source_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: height = int_or_none(self._search_regex( r'(\d+)\.mp4', source_url, 'height', default=None)) formats.append({ 'url': source_url, 'format_id': format_id, 'height': height, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvn24.py
youtube_dl/extractor/tvn24.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, NO_DEFAULT, unescapeHTML, ) class TVN24IE(InfoExtractor): _VALID_URL = r'https?://(?:(?:[^/]+)\.)?tvn24(?:bis)?\.pl/(?:[^/]+/)*(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html', 'md5': 'fbdec753d7bc29d96036808275f2130c', 'info_dict': { 'id': '1584444', 'ext': 'mp4', 'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"', 'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości Szkła kontaktowego.', 'thumbnail': 're:https?://.*[.]jpeg', } }, { # different layout 'url': 'https://tvnmeteo.tvn24.pl/magazyny/maja-w-ogrodzie,13/odcinki-online,1,4,1,0/pnacza-ptaki-i-iglaki-odc-691-hgtv-odc-29,1771763.html', 'info_dict': { 'id': '1771763', 'ext': 'mp4', 'title': 'Pnącza, ptaki i iglaki (odc. 691 /HGTV odc. 29)', 'thumbnail': 're:https?://.*', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://fakty.tvn24.pl/ogladaj-online,60/53-konferencja-bezpieczenstwa-w-monachium,716431.html', 'only_matching': True, }, { 'url': 'http://sport.tvn24.pl/pilka-nozna,105/ligue-1-kamil-glik-rozcial-glowe-monaco-tylko-remisuje-z-bastia,716522.html', 'only_matching': True, }, { 'url': 'http://tvn24bis.pl/poranek,146,m/gen-koziej-w-tvn24-bis-wracamy-do-czasow-zimnej-wojny,715660.html', 'only_matching': True, }, { 'url': 'https://www.tvn24.pl/magazyn-tvn24/angie-w-jednej-czwartej-polka-od-szarej-myszki-do-cesarzowej-europy,119,2158', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = self._og_search_title( webpage, default=None) or self._search_regex( r'<h\d+[^>]+class=["\']magazineItemHeader[^>]+>(.+?)</h', webpage, 'title') def extract_json(attr, name, default=NO_DEFAULT, fatal=True): return self._parse_json( self._search_regex( r'\b%s=(["\'])(?P<json>(?!\1).+?)\1' % attr, webpage, name, group='json', default=default, fatal=fatal) or '{}', display_id, transform_source=unescapeHTML, fatal=fatal) quality_data = extract_json('data-quality', 'formats') formats = [] for format_id, url in quality_data.items(): formats.append({ 'url': url, 'format_id': format_id, 'height': int_or_none(format_id.rstrip('p')), }) self._sort_formats(formats) description = self._og_search_description(webpage, default=None) thumbnail = self._og_search_thumbnail( webpage, default=None) or self._html_search_regex( r'\bdata-poster=(["\'])(?P<url>(?!\1).+?)\1', webpage, 'thumbnail', group='url') video_id = None share_params = extract_json( 'data-share-params', 'share params', default=None) if isinstance(share_params, dict): video_id = share_params.get('id') if not video_id: video_id = self._search_regex( r'data-vid-id=["\'](\d+)', webpage, 'video id', default=None) or self._search_regex( r',(\d+)\.html', url, 'video id', default=display_id) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cammodels.py
youtube_dl/extractor/cammodels.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, url_or_none, ) class CamModelsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cammodels\.com/cam/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.cammodels.com/cam/AutumnKnight/', 'only_matching': True, 'age_limit': 18 }] def _real_extract(self, url): user_id = self._match_id(url) manifest = self._download_json( 'https://manifest-server.naiadsystems.com/live/s:%s.json' % user_id, user_id) formats = [] thumbnails = [] for format_id, format_dict in manifest['formats'].items(): if not isinstance(format_dict, dict): continue encodings = format_dict.get('encodings') if not isinstance(encodings, list): continue vcodec = format_dict.get('videoCodec') acodec = format_dict.get('audioCodec') for media in encodings: if not isinstance(media, dict): continue media_url = url_or_none(media.get('location')) if not media_url: continue format_id_list = [format_id] height = int_or_none(media.get('videoHeight')) if height is not None: format_id_list.append('%dp' % height) f = { 'url': media_url, 'format_id': '-'.join(format_id_list), 'width': int_or_none(media.get('videoWidth')), 'height': height, 'vbr': int_or_none(media.get('videoKbps')), 'abr': int_or_none(media.get('audioKbps')), 'fps': int_or_none(media.get('fps')), 'vcodec': vcodec, 'acodec': acodec, } if 'rtmp' in format_id: f['ext'] = 'flv' elif 'hls' in format_id: f.update({ 'ext': 'mp4', # hls skips fragments, preferring rtmp 'preference': -1, }) else: if format_id == 'jpeg': thumbnails.append({ 'url': f['url'], 'width': f['width'], 'height': f['height'], 'format_id': f['format_id'], }) continue formats.append(f) self._sort_formats(formats) return { 'id': user_id, 'title': self._live_title(user_id), 'thumbnails': thumbnails, 'is_live': True, 'formats': formats, 'age_limit': 18 }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pornotube.py
youtube_dl/extractor/pornotube.py
from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import int_or_none class PornotubeIE(InfoExtractor): _VALID_URL = r'https?://(?:\w+\.)?pornotube\.com/(?:[^?#]*?)/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.pornotube.com/orientation/straight/video/4964/title/weird-hot-and-wet-science', 'md5': '60fc5a4f0d93a97968fc7999d98260c9', 'info_dict': { 'id': '4964', 'ext': 'mp4', 'upload_date': '20141203', 'title': 'Weird Hot and Wet Science', 'description': 'md5:a8304bef7ef06cb4ab476ca6029b01b0', 'categories': ['Adult Humor', 'Blondes'], 'uploader': 'Alpha Blue Archives', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1417582800, 'age_limit': 18, } } def _real_extract(self, url): video_id = self._match_id(url) token = self._download_json( 'https://api.aebn.net/auth/v2/origins/authenticate', video_id, note='Downloading token', data=json.dumps({'credentials': 'Clip Application'}).encode('utf-8'), headers={ 'Content-Type': 'application/json', 'Origin': 'http://www.pornotube.com', })['tokenKey'] video_url = self._download_json( 'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id, video_id, note='Downloading delivery information', headers={'Authorization': token})['mediaUrl'] FIELDS = ( 'title', 'description', 'startSecond', 'endSecond', 'publishDate', 'studios{name}', 'categories{name}', 'movieId', 'primaryImageNumber' ) info = self._download_json( 'https://api.aebn.net/content/v2/clips/%s?fields=%s' % (video_id, ','.join(FIELDS)), video_id, note='Downloading metadata', headers={'Authorization': token}) if isinstance(info, list): info = info[0] title = info['title'] timestamp = int_or_none(info.get('publishDate'), scale=1000) uploader = info.get('studios', [{}])[0].get('name') movie_id = info.get('movieId') primary_image_number = info.get('primaryImageNumber') thumbnail = None if movie_id and primary_image_number: thumbnail = 'http://pic.aebn.net/dis/t/%s/%s_%08d.jpg' % ( movie_id, movie_id, primary_image_number) start = int_or_none(info.get('startSecond')) end = int_or_none(info.get('endSecond')) duration = end - start if start and end else None categories = [c['name'] for c in info.get('categories', []) if c.get('name')] return { 'id': video_id, 'url': video_url, 'title': title, 'description': info.get('description'), 'duration': duration, 'timestamp': timestamp, 'uploader': uploader, 'thumbnail': thumbnail, 'categories': categories, 'age_limit': 18, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/afreecatv.py
youtube_dl/extractor/afreecatv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_xpath from ..utils import ( determine_ext, ExtractorError, int_or_none, url_or_none, urlencode_postdata, xpath_text, ) class AfreecaTVIE(InfoExtractor): IE_NAME = 'afreecatv' IE_DESC = 'afreecatv.com' _VALID_URL = r'''(?x) https?:// (?: (?:(?:live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)? (?: /app/(?:index|read_ucc_bbs)\.cgi| /player/[Pp]layer\.(?:swf|html) )\?.*?\bnTitleNo=| vod\.afreecatv\.com/PLAYER/STATION/ ) (?P<id>\d+) ''' _NETRC_MACHINE = 'afreecatv' _TESTS = [{ 'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=', 'md5': 'f72c89fe7ecc14c1b5ce506c4996046e', 'info_dict': { 'id': '36164052', 'ext': 'mp4', 'title': '데일리 에이프릴 요정들의 시상식!', 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': 'dailyapril', 'uploader_id': 'dailyapril', 'upload_date': '20160503', }, 'skip': 'Video is gone', }, { 'url': 'http://afbbs.afreecatv.com:8080/app/read_ucc_bbs.cgi?nStationNo=16711924&nTitleNo=36153164&szBjId=dailyapril&nBbsNo=18605867', 'info_dict': { 'id': '36153164', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'", 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': 'dailyapril', 'uploader_id': 'dailyapril', }, 'playlist_count': 2, 'playlist': [{ 'md5': 'd8b7c174568da61d774ef0203159bf97', 'info_dict': { 'id': '36153164_1', 'ext': 'mp4', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'", 'upload_date': '20160502', }, }, { 'md5': '58f2ce7f6044e34439ab2d50612ab02b', 'info_dict': { 'id': '36153164_2', 'ext': 'mp4', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'", 'upload_date': '20160502', }, }], 'skip': 'Video is gone', }, { 'url': 'http://vod.afreecatv.com/PLAYER/STATION/18650793', 'info_dict': { 'id': '18650793', 'ext': 'mp4', 'title': '오늘은 다르다! 쏘님의 우월한 위아래~ 댄스리액션!', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': '윈아디', 'uploader_id': 'badkids', 'duration': 107, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://vod.afreecatv.com/PLAYER/STATION/10481652', 'info_dict': { 'id': '10481652', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!'", 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': 'dailyapril', 'uploader_id': 'dailyapril', 'duration': 6492, }, 'playlist_count': 2, 'playlist': [{ 'md5': 'd8b7c174568da61d774ef0203159bf97', 'info_dict': { 'id': '20160502_c4c62b9d_174361386_1', 'ext': 'mp4', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!' (part 1)", 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': 'dailyapril', 'uploader_id': 'dailyapril', 'upload_date': '20160502', 'duration': 3601, }, }, { 'md5': '58f2ce7f6044e34439ab2d50612ab02b', 'info_dict': { 'id': '20160502_39e739bb_174361386_2', 'ext': 'mp4', 'title': "BJ유트루와 함께하는 '팅커벨 메이크업!' (part 2)", 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': 'dailyapril', 'uploader_id': 'dailyapril', 'upload_date': '20160502', 'duration': 2891, }, }], 'params': { 'skip_download': True, }, }, { # non standard key 'url': 'http://vod.afreecatv.com/PLAYER/STATION/20515605', 'info_dict': { 'id': '20170411_BE689A0E_190960999_1_2_h', 'ext': 'mp4', 'title': '혼자사는여자집', 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': '♥이슬이', 'uploader_id': 'dasl8121', 'upload_date': '20170411', 'duration': 213, }, 'params': { 'skip_download': True, }, }, { # PARTIAL_ADULT 'url': 'http://vod.afreecatv.com/PLAYER/STATION/32028439', 'info_dict': { 'id': '20180327_27901457_202289533_1', 'ext': 'mp4', 'title': '[생]빨개요♥ (part 1)', 'thumbnail': 're:^https?://(?:video|st)img.afreecatv.com/.*$', 'uploader': '[SA]서아', 'uploader_id': 'bjdyrksu', 'upload_date': '20180327', 'duration': 3601, }, 'params': { 'skip_download': True, }, 'expected_warnings': ['adult content'], }, { 'url': 'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652', 'only_matching': True, }, { 'url': 'http://vod.afreecatv.com/PLAYER/STATION/15055030', 'only_matching': True, }] @staticmethod def parse_video_key(key): video_key = {} m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key) if m: video_key['upload_date'] = m.group('upload_date') video_key['part'] = int(m.group('part')) return video_key def _real_initialize(self): self._login() def _login(self): username, password = self._get_login_info() if username is None: return login_form = { 'szWork': 'login', 'szType': 'json', 'szUid': username, 'szPassword': password, 'isSaveId': 'false', 'szScriptVar': 'oLoginRet', 'szAction': '', } response = self._download_json( 'https://login.afreecatv.com/app/LoginAction.php', None, 'Logging in', data=urlencode_postdata(login_form)) _ERRORS = { -4: 'Your account has been suspended due to a violation of our terms and policies.', -5: 'https://member.afreecatv.com/app/user_delete_progress.php', -6: 'https://login.afreecatv.com/membership/changeMember.php', -8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.", -9: 'https://member.afreecatv.com/app/pop_login_block.php', -11: 'https://login.afreecatv.com/afreeca/second_login.php', -12: 'https://member.afreecatv.com/app/user_security.php', 0: 'The username does not exist or you have entered the wrong password.', -1: 'The username does not exist or you have entered the wrong password.', -3: 'You have entered your username/password incorrectly.', -7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.', -10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.', -32008: 'You have failed to log in. Please contact our Help Center.', } result = int_or_none(response.get('RESULT')) if result != 1: error = _ERRORS.get(result, 'You have failed to log in.') raise ExtractorError( 'Unable to login: %s said: %s' % (self.IE_NAME, error), expected=True) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) if re.search(r'alert\(["\']This video has been deleted', webpage): raise ExtractorError( 'Video %s has been deleted' % video_id, expected=True) station_id = self._search_regex( r'nStationNo\s*=\s*(\d+)', webpage, 'station') bbs_id = self._search_regex( r'nBbsNo\s*=\s*(\d+)', webpage, 'bbs') video_id = self._search_regex( r'nTitleNo\s*=\s*(\d+)', webpage, 'title', default=video_id) partial_view = False for _ in range(2): query = { 'nTitleNo': video_id, 'nStationNo': station_id, 'nBbsNo': bbs_id, } if partial_view: query['partialView'] = 'SKIP_ADULT' video_xml = self._download_xml( 'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php', video_id, 'Downloading video info XML%s' % (' (skipping adult)' if partial_view else ''), video_id, headers={ 'Referer': url, }, query=query) flag = xpath_text(video_xml, './track/flag', 'flag', default=None) if flag and flag == 'SUCCEED': break if flag == 'PARTIAL_ADULT': self._downloader.report_warning( 'In accordance with local laws and regulations, underage users are restricted from watching adult content. ' 'Only content suitable for all ages will be downloaded. ' 'Provide account credentials if you wish to download restricted content.') partial_view = True continue elif flag == 'ADULT': error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.' else: error = flag raise ExtractorError( '%s said: %s' % (self.IE_NAME, error), expected=True) else: raise ExtractorError('Unable to download video info') video_element = video_xml.findall(compat_xpath('./track/video'))[-1] if video_element is None or video_element.text is None: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) video_url = video_element.text.strip() title = xpath_text(video_xml, './track/title', 'title', fatal=True) uploader = xpath_text(video_xml, './track/nickname', 'uploader') uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id') duration = int_or_none(xpath_text( video_xml, './track/duration', 'duration')) thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail') common_entry = { 'uploader': uploader, 'uploader_id': uploader_id, 'thumbnail': thumbnail, } info = common_entry.copy() info.update({ 'id': video_id, 'title': title, 'duration': duration, }) if not video_url: entries = [] file_elements = video_element.findall(compat_xpath('./file')) one = len(file_elements) == 1 for file_num, file_element in enumerate(file_elements, start=1): file_url = url_or_none(file_element.text) if not file_url: continue key = file_element.get('key', '') upload_date = self._search_regex( r'^(\d{8})_', key, 'upload date', default=None) file_duration = int_or_none(file_element.get('duration')) format_id = key if key else '%s_%s' % (video_id, file_num) if determine_ext(file_url) == 'm3u8': formats = self._extract_m3u8_formats( file_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', note='Downloading part %d m3u8 information' % file_num) else: formats = [{ 'url': file_url, 'format_id': 'http', }] if not formats: continue self._sort_formats(formats) file_info = common_entry.copy() file_info.update({ 'id': format_id, 'title': title if one else '%s (part %d)' % (title, file_num), 'upload_date': upload_date, 'duration': file_duration, 'formats': formats, }) entries.append(file_info) entries_info = info.copy() entries_info.update({ '_type': 'multi_video', 'entries': entries, }) return entries_info info = { 'id': video_id, 'title': title, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'thumbnail': thumbnail, } if determine_ext(video_url) == 'm3u8': info['formats'] = self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') else: app, playpath = video_url.split('mp4:') info.update({ 'url': app, 'ext': 'flv', 'play_path': 'mp4:' + playpath, 'rtmp_live': True, # downloading won't end without this }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gamespot.py
youtube_dl/extractor/gamespot.py
from __future__ import unicode_literals from .once import OnceIE from ..compat import compat_urllib_parse_unquote class GameSpotIE(OnceIE): _VALID_URL = r'https?://(?:www\.)?gamespot\.com/(?:video|article|review)s/(?:[^/]+/\d+-|embed/)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/', 'md5': 'b2a30deaa8654fcccd43713a6b6a4825', 'info_dict': { 'id': 'gs-2300-6410818', 'ext': 'mp4', 'title': 'Arma 3 - Community Guide: SITREP I', 'description': 'Check out this video where some of the basics of Arma 3 is explained.', }, 'skip': 'manifest URL give HTTP Error 404: Not Found', }, { 'url': 'http://www.gamespot.com/videos/the-witcher-3-wild-hunt-xbox-one-now-playing/2300-6424837/', 'md5': '173ea87ad762cf5d3bf6163dceb255a6', 'info_dict': { 'id': 'gs-2300-6424837', 'ext': 'mp4', 'title': 'Now Playing - The Witcher 3: Wild Hunt', 'description': 'Join us as we take a look at the early hours of The Witcher 3: Wild Hunt and more.', }, }, { 'url': 'https://www.gamespot.com/videos/embed/6439218/', 'only_matching': True, }, { 'url': 'https://www.gamespot.com/articles/the-last-of-us-2-receives-new-ps4-trailer/1100-6454469/', 'only_matching': True, }, { 'url': 'https://www.gamespot.com/reviews/gears-of-war-review/1900-6161188/', 'only_matching': True, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) data_video = self._parse_json(self._html_search_regex( r'data-video=(["\'])({.*?})\1', webpage, 'video data', group=2), page_id) title = compat_urllib_parse_unquote(data_video['title']) streams = data_video['videoStreams'] formats = [] m3u8_url = streams.get('adaptive_stream') if m3u8_url: m3u8_formats = self._extract_m3u8_formats( m3u8_url, page_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) for f in m3u8_formats: formats.append(f) http_f = f.copy() del http_f['manifest_url'] http_f.update({ 'format_id': f['format_id'].replace('hls-', 'http-'), 'protocol': 'http', 'url': f['url'].replace('.m3u8', '.mp4'), }) formats.append(http_f) mpd_url = streams.get('adaptive_dash') if mpd_url: formats.extend(self._extract_mpd_formats( mpd_url, page_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) return { 'id': data_video.get('guid') or page_id, 'display_id': page_id, 'title': title, 'formats': formats, 'description': self._html_search_meta('description', webpage), 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/embedly.py
youtube_dl/extractor/embedly.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class EmbedlyIE(InfoExtractor): _VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?url=(?P<id>[^#&]+)' _TESTS = [{ 'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1', 'only_matching': True, }] def _real_extract(self, url): return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/seeker.py
youtube_dl/extractor/seeker.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( get_element_by_class, strip_or_none, ) class SeekerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?seeker\.com/(?P<display_id>.*)-(?P<article_id>\d+)\.html' _TESTS = [{ 'url': 'http://www.seeker.com/should-trump-be-required-to-release-his-tax-returns-1833805621.html', 'md5': '897d44bbe0d8986a2ead96de565a92db', 'info_dict': { 'id': 'Elrn3gnY', 'ext': 'mp4', 'title': 'Should Trump Be Required To Release His Tax Returns?', 'description': 'md5:41efa8cfa8d627841045eec7b018eb45', 'timestamp': 1490090165, 'upload_date': '20170321', } }, { 'url': 'http://www.seeker.com/changes-expected-at-zoos-following-recent-gorilla-lion-shootings-1834116536.html', 'playlist': [ { 'md5': '0497b9f20495174be73ae136949707d2', 'info_dict': { 'id': 'FihYQ8AE', 'ext': 'mp4', 'title': 'The Pros & Cons Of Zoos', 'description': 'md5:d88f99a8ea8e7d25e6ff77f271b1271c', 'timestamp': 1490039133, 'upload_date': '20170320', }, } ], 'info_dict': { 'id': '1834116536', 'title': 'After Gorilla Killing, Changes Ahead for Zoos', 'description': 'The largest association of zoos and others are hoping to learn from recent incidents that led to the shooting deaths of a gorilla and two lions.', }, }] def _real_extract(self, url): display_id, article_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) entries = [] for jwp_id in re.findall(r'data-video-id="([a-zA-Z0-9]{8})"', webpage): entries.append(self.url_result( 'jwplatform:' + jwp_id, 'JWPlatform', jwp_id)) return self.playlist_result( entries, article_id, self._og_search_title(webpage), strip_or_none(get_element_by_class('subtitle__text', webpage)) or self._og_search_description(webpage))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viu.py
youtube_dl/extractor/viu.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_kwargs, compat_str, ) from ..utils import ( ExtractorError, int_or_none, ) class ViuBaseIE(InfoExtractor): def _real_initialize(self): viu_auth_res = self._request_webpage( 'https://www.viu.com/api/apps/v2/authenticate', None, 'Requesting Viu auth', query={ 'acct': 'test', 'appid': 'viu_desktop', 'fmt': 'json', 'iid': 'guest', 'languageid': 'default', 'platform': 'desktop', 'userid': 'guest', 'useridtype': 'guest', 'ver': '1.0' }, headers=self.geo_verification_headers()) self._auth_token = viu_auth_res.info()['X-VIU-AUTH'] def _call_api(self, path, *args, **kwargs): headers = self.geo_verification_headers() headers.update({ 'X-VIU-AUTH': self._auth_token }) headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers response = self._download_json( 'https://www.viu.com/api/' + path, *args, **compat_kwargs(kwargs))['response'] if response.get('status') != 'success': raise ExtractorError('%s said: %s' % ( self.IE_NAME, response['message']), expected=True) return response class ViuIE(ViuBaseIE): _VALID_URL = r'(?:viu:|https?://[^/]+\.viu\.com/[a-z]{2}/media/)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.viu.com/en/media/1116705532?containerId=playlist-22168059', 'info_dict': { 'id': '1116705532', 'ext': 'mp4', 'title': 'Citizen Khan - Ep 1', 'description': 'md5:d7ea1604f49e5ba79c212c551ce2110e', }, 'params': { 'skip_download': 'm3u8 download', }, 'skip': 'Geo-restricted to India', }, { 'url': 'https://www.viu.com/en/media/1130599965', 'info_dict': { 'id': '1130599965', 'ext': 'mp4', 'title': 'Jealousy Incarnate - Episode 1', 'description': 'md5:d3d82375cab969415d2720b6894361e9', }, 'params': { 'skip_download': 'm3u8 download', }, 'skip': 'Geo-restricted to Indonesia', }, { 'url': 'https://india.viu.com/en/media/1126286865', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._call_api( 'clip/load', video_id, 'Downloading video data', query={ 'appid': 'viu_desktop', 'fmt': 'json', 'id': video_id })['item'][0] title = video_data['title'] m3u8_url = None url_path = video_data.get('urlpathd') or video_data.get('urlpath') tdirforwhole = video_data.get('tdirforwhole') # #EXT-X-BYTERANGE is not supported by native hls downloader # and ffmpeg (#10955) # hls_file = video_data.get('hlsfile') hls_file = video_data.get('jwhlsfile') if url_path and tdirforwhole and hls_file: m3u8_url = '%s/%s/%s' % (url_path, tdirforwhole, hls_file) else: # m3u8_url = re.sub( # r'(/hlsc_)[a-z]+(\d+\.m3u8)', # r'\1whe\2', video_data['href']) m3u8_url = video_data['href'] formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') self._sort_formats(formats) subtitles = {} for key, value in video_data.items(): mobj = re.match(r'^subtitle_(?P<lang>[^_]+)_(?P<ext>(vtt|srt))', key) if not mobj: continue subtitles.setdefault(mobj.group('lang'), []).append({ 'url': value, 'ext': mobj.group('ext') }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'series': video_data.get('moviealbumshowname'), 'episode': title, 'episode_number': int_or_none(video_data.get('episodeno')), 'duration': int_or_none(video_data.get('duration')), 'formats': formats, 'subtitles': subtitles, } class ViuPlaylistIE(ViuBaseIE): IE_NAME = 'viu:playlist' _VALID_URL = r'https?://www\.viu\.com/[^/]+/listing/playlist-(?P<id>\d+)' _TEST = { 'url': 'https://www.viu.com/en/listing/playlist-22461380', 'info_dict': { 'id': '22461380', 'title': 'The Good Wife', }, 'playlist_count': 16, 'skip': 'Geo-restricted to Indonesia', } def _real_extract(self, url): playlist_id = self._match_id(url) playlist_data = self._call_api( 'container/load', playlist_id, 'Downloading playlist info', query={ 'appid': 'viu_desktop', 'fmt': 'json', 'id': 'playlist-' + playlist_id })['container'] entries = [] for item in playlist_data.get('item', []): item_id = item.get('id') if not item_id: continue item_id = compat_str(item_id) entries.append(self.url_result( 'viu:' + item_id, 'Viu', item_id)) return self.playlist_result( entries, playlist_id, playlist_data.get('title')) class ViuOTTIE(InfoExtractor): IE_NAME = 'viu:ott' _VALID_URL = r'https?://(?:www\.)?viu\.com/ott/(?P<country_code>[a-z]{2})/[a-z]{2}-[a-z]{2}/vod/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.viu.com/ott/sg/en-us/vod/3421/The%20Prime%20Minister%20and%20I', 'info_dict': { 'id': '3421', 'ext': 'mp4', 'title': 'A New Beginning', 'description': 'md5:1e7486a619b6399b25ba6a41c0fe5b2c', }, 'params': { 'skip_download': 'm3u8 download', }, 'skip': 'Geo-restricted to Singapore', }, { 'url': 'http://www.viu.com/ott/hk/zh-hk/vod/7123/%E5%A4%A7%E4%BA%BA%E5%A5%B3%E5%AD%90', 'info_dict': { 'id': '7123', 'ext': 'mp4', 'title': '這就是我的生活之道', 'description': 'md5:4eb0d8b08cf04fcdc6bbbeb16043434f', }, 'params': { 'skip_download': 'm3u8 download', }, 'skip': 'Geo-restricted to Hong Kong', }] _AREA_ID = { 'HK': 1, 'SG': 2, 'TH': 4, 'PH': 5, } def _real_extract(self, url): country_code, video_id = re.match(self._VALID_URL, url).groups() query = { 'r': 'vod/ajax-detail', 'platform_flag_label': 'web', 'product_id': video_id, } area_id = self._AREA_ID.get(country_code.upper()) if area_id: query['area_id'] = area_id product_data = self._download_json( 'http://www.viu.com/ott/%s/index.php' % country_code, video_id, 'Downloading video info', query=query)['data'] video_data = product_data.get('current_product') if not video_data: raise ExtractorError('This video is not available in your region.', expected=True) stream_data = self._download_json( 'https://d1k2us671qcoau.cloudfront.net/distribute_web_%s.php' % country_code, video_id, 'Downloading stream info', query={ 'ccs_product_id': video_data['ccs_product_id'], }, headers={ 'Referer': url, 'Origin': re.search(r'https?://[^/]+', url).group(0), })['data']['stream'] stream_sizes = stream_data.get('size', {}) formats = [] for vid_format, stream_url in stream_data.get('url', {}).items(): height = int_or_none(self._search_regex( r's(\d+)p', vid_format, 'height', default=None)) formats.append({ 'format_id': vid_format, 'url': stream_url, 'height': height, 'ext': 'mp4', 'filesize': int_or_none(stream_sizes.get(vid_format)) }) self._sort_formats(formats) subtitles = {} for sub in video_data.get('subtitle', []): sub_url = sub.get('url') if not sub_url: continue subtitles.setdefault(sub.get('name'), []).append({ 'url': sub_url, 'ext': 'srt', }) title = video_data['synopsis'].strip() return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'series': product_data.get('series', {}).get('name'), 'episode': title, 'episode_number': int_or_none(video_data.get('number')), 'duration': int_or_none(stream_data.get('duration')), 'thumbnail': video_data.get('cover_image_url'), 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/photobucket.py
youtube_dl/extractor/photobucket.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote class PhotobucketIE(InfoExtractor): _VALID_URL = r'https?://(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))' _TEST = { 'url': 'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0', 'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99', 'info_dict': { 'id': 'zpsc0c3b9fa', 'ext': 'mp4', 'timestamp': 1367669341, 'upload_date': '20130504', 'uploader': 'rachaneronas', 'title': 'Tired of Link Building? Try BacklinkMyDomain.com!', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') video_extension = mobj.group('ext') webpage = self._download_webpage(url, video_id) # Extract URL, uploader, and title from webpage self.report_extraction(video_id) info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);', webpage, 'info json') info = json.loads(info_json) url = compat_urllib_parse_unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url')) return { 'id': video_id, 'url': url, 'uploader': info['username'], 'timestamp': info['creationDate'], 'title': info['title'], 'ext': video_extension, 'thumbnail': info['thumbUrl'], }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nzz.py
youtube_dl/extractor/nzz.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, ) class NZZIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153', 'info_dict': { 'id': '9153', }, 'playlist_mincount': 6, }, { 'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112', 'info_dict': { 'id': '1368112', }, 'playlist_count': 1, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) entries = [] for player_element in re.findall( r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage): player_params = extract_attributes(player_element) if player_params.get('data-type') not in ('kaltura_singleArticle',): self.report_warning('Unsupported player type') continue entry_id = player_params['data-id'] entries.append(self.url_result( 'kaltura:1750922:' + entry_id, 'Kaltura', entry_id)) return self.playlist_result(entries, page_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/libsyn.py
youtube_dl/extractor/libsyn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, parse_duration, strip_or_none, unified_strdate, ) class LibsynIE(InfoExtractor): _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))' _TESTS = [{ 'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/', 'md5': '2a55e75496c790cdeb058e7e6c087746', 'info_dict': { 'id': '6385796', 'ext': 'mp3', 'title': "Champion Minded - Developing a Growth Mindset", # description fetched using another request: # http://html5-player.libsyn.com/embed/getitemdetails?item_id=6385796 # 'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.', 'upload_date': '20180320', 'thumbnail': 're:^https?://.*', }, }, { 'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/', 'md5': '6c5cb21acd622d754d3b1a92b582ce42', 'info_dict': { 'id': '3727166', 'ext': 'mp3', 'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career', 'upload_date': '20150818', 'thumbnail': 're:^https?://.*', } }] def _real_extract(self, url): url, video_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, video_id) data = self._parse_json(self._search_regex( r'var\s+playlistItem\s*=\s*({.+?});', webpage, 'JSON data block'), video_id) episode_title = data.get('item_title') or get_element_by_class('episode-title', webpage) if not episode_title: self._search_regex( [r'data-title="([^"]+)"', r'<title>(.+?)</title>'], webpage, 'episode title') episode_title = episode_title.strip() podcast_title = strip_or_none(clean_html(self._search_regex( r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None) or get_element_by_class('podcast-title', webpage))) title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title formats = [] for k, format_id in (('media_url_libsyn', 'libsyn'), ('media_url', 'main'), ('download_link', 'download')): f_url = data.get(k) if not f_url: continue formats.append({ 'url': f_url, 'format_id': format_id, }) description = self._html_search_regex( r'<p\s+id="info_text_body">(.+?)</p>', webpage, 'description', default=None) if description: # Strip non-breaking and normal spaces description = description.replace('\u00A0', ' ').strip() release_date = unified_strdate(self._search_regex( r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', default=None) or data.get('release_date')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': data.get('thumbnail_url'), 'upload_date': release_date, 'duration': parse_duration(data.get('duration')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/cmt.py
youtube_dl/extractor/cmt.py
from __future__ import unicode_literals from .mtv import MTVIE class CMTIE(MTVIE): IE_NAME = 'cmt.com' _VALID_URL = r'https?://(?:www\.)?cmt\.com/(?:videos|shows|(?:full-)?episodes|video-clips)/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061', 'md5': 'e6b7ef3c4c45bbfae88061799bbba6c2', 'info_dict': { 'id': '989124', 'ext': 'mp4', 'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"', 'description': 'Blame It All On My Roots', }, 'skip': 'Video not available', }, { 'url': 'http://www.cmt.com/videos/misc/1504699/still-the-king-ep-109-in-3-minutes.jhtml#id=1739908', 'md5': 'e61a801ca4a183a466c08bd98dccbb1c', 'info_dict': { 'id': '1504699', 'ext': 'mp4', 'title': 'Still The King Ep. 109 in 3 Minutes', 'description': 'Relive or catch up with Still The King by watching this recap of season 1, episode 9.', 'timestamp': 1469421000.0, 'upload_date': '20160725', }, }, { 'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172', 'only_matching': True, }, { 'url': 'http://www.cmt.com/full-episodes/537qb3/nashville-the-wayfaring-stranger-season-5-ep-501', 'only_matching': True, }, { 'url': 'http://www.cmt.com/video-clips/t9e4ci/nashville-juliette-in-2-minutes', 'only_matching': True, }] def _extract_mgid(self, webpage): mgid = self._search_regex( r'MTVN\.VIDEO\.contentUri\s*=\s*([\'"])(?P<mgid>.+?)\1', webpage, 'mgid', group='mgid', default=None) if not mgid: mgid = self._extract_triforce_mgid(webpage) return mgid def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) mgid = self._extract_mgid(webpage) return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/trilulilu.py
youtube_dl/extractor/trilulilu.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class TriluliluIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)' _TESTS = [{ 'url': 'http://www.trilulilu.ro/big-buck-bunny-1', 'md5': '68da087b676a6196a413549212f60cc6', 'info_dict': { 'id': 'ae2899e124140b', 'ext': 'mp4', 'title': 'Big Buck Bunny', 'description': ':) pentru copilul din noi', 'uploader_id': 'chipy', 'upload_date': '20120304', 'timestamp': 1330830647, 'uploader': 'chipy', 'view_count': int, 'like_count': int, 'comment_count': int, }, }, { 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta', 'md5': '929dfb8729dc71750463af88bbbbf4a4', 'info_dict': { 'id': 'f299710e3c91c5', 'ext': 'mp4', 'title': 'Adena ft. Morreti - Inocenta', 'description': 'pop music', 'uploader_id': 'VEVOmixt', 'upload_date': '20151204', 'uploader': 'VEVOmixt', 'timestamp': 1449187937, 'view_count': int, 'like_count': int, 'comment_count': int, }, }] def _real_extract(self, url): display_id = self._match_id(url) media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id) age_limit = 0 errors = media_info.get('errors', {}) if errors.get('friends'): raise ExtractorError('This video is private.', expected=True) elif errors.get('geoblock'): raise ExtractorError('This video is not available in your country.', expected=True) elif errors.get('xxx_unlogged'): age_limit = 18 media_class = media_info.get('class') if media_class not in ('video', 'audio'): raise ExtractorError('not a video or an audio') user = media_info.get('user', {}) thumbnail = media_info.get('cover_url') if thumbnail: thumbnail.format(width='1600', height='1200') # TODO: get correct ext for audio files stream_type = media_info.get('stream_type') formats = [{ 'url': media_info['href'], 'ext': stream_type, }] if media_info.get('is_hd'): formats.append({ 'format_id': 'hd', 'url': media_info['hrefhd'], 'ext': stream_type, }) if media_class == 'audio': formats[0]['vcodec'] = 'none' else: formats[0]['format_id'] = 'sd' return { 'id': media_info['identifier'].split('|')[1], 'display_id': display_id, 'formats': formats, 'title': media_info['title'], 'description': media_info.get('description'), 'thumbnail': thumbnail, 'uploader_id': user.get('username'), 'uploader': user.get('fullname'), 'timestamp': parse_iso8601(media_info.get('published'), ' '), 'duration': int_or_none(media_info.get('duration')), 'view_count': int_or_none(media_info.get('count_views')), 'like_count': int_or_none(media_info.get('count_likes')), 'comment_count': int_or_none(media_info.get('count_comments')), 'age_limit': age_limit, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hrfernsehen.py
youtube_dl/extractor/hrfernsehen.py
# coding: utf-8 from __future__ import unicode_literals import json import re from ..utils import ( int_or_none, unified_timestamp, unescapeHTML ) from .common import InfoExtractor class HRFernsehenIE(InfoExtractor): IE_NAME = 'hrfernsehen' _VALID_URL = r'^https?://www\.(?:hr-fernsehen|hessenschau)\.de/.*,video-(?P<id>[0-9]{6})\.html' _TESTS = [{ 'url': 'https://www.hessenschau.de/tv-sendung/hessenschau-vom-26082020,video-130546.html', 'md5': '5c4e0ba94677c516a2f65a84110fc536', 'info_dict': { 'id': '130546', 'ext': 'mp4', 'description': 'Sturmtief Kirsten fegt über Hessen / Die Corona-Pandemie – eine Chronologie / ' 'Sterbehilfe: Die Lage in Hessen / Miss Hessen leitet zwei eigene Unternehmen / ' 'Pop-Up Museum zeigt Schwarze Unterhaltung und Black Music', 'subtitles': {'de': [{ 'url': 'https://hr-a.akamaihd.net/video/as/hessenschau/2020_08/hrLogo_200826200407_L385592_512x288-25p-500kbit.vtt' }]}, 'timestamp': 1598470200, 'upload_date': '20200826', 'thumbnail': 'https://www.hessenschau.de/tv-sendung/hs_ganz-1554~_t-1598465545029_v-16to9__medium.jpg', 'title': 'hessenschau vom 26.08.2020' } }, { 'url': 'https://www.hr-fernsehen.de/sendungen-a-z/mex/sendungen/fair-und-gut---was-hinter-aldis-eigenem-guetesiegel-steckt,video-130544.html', 'only_matching': True }] _GEO_COUNTRIES = ['DE'] def extract_airdate(self, loader_data): airdate_str = loader_data.get('mediaMetadata', {}).get('agf', {}).get('airdate') if airdate_str is None: return None return unified_timestamp(airdate_str) def extract_formats(self, loader_data): stream_formats = [] for stream_obj in loader_data["videoResolutionLevels"]: stream_format = { 'format_id': str(stream_obj['verticalResolution']) + "p", 'height': stream_obj['verticalResolution'], 'url': stream_obj['url'], } quality_information = re.search(r'([0-9]{3,4})x([0-9]{3,4})-([0-9]{2})p-([0-9]{3,4})kbit', stream_obj['url']) if quality_information: stream_format['width'] = int_or_none(quality_information.group(1)) stream_format['height'] = int_or_none(quality_information.group(2)) stream_format['fps'] = int_or_none(quality_information.group(3)) stream_format['tbr'] = int_or_none(quality_information.group(4)) stream_formats.append(stream_format) self._sort_formats(stream_formats) return stream_formats def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_meta( ['og:title', 'twitter:title', 'name'], webpage) description = self._html_search_meta( ['description'], webpage) loader_str = unescapeHTML(self._search_regex(r"data-new-hr-mediaplayer-loader='([^']*)'", webpage, "ardloader")) loader_data = json.loads(loader_str) info = { 'id': video_id, 'title': title, 'description': description, 'formats': self.extract_formats(loader_data), 'timestamp': self.extract_airdate(loader_data) } if "subtitle" in loader_data: info["subtitles"] = {"de": [{"url": loader_data["subtitle"]}]} thumbnails = list(set([t for t in loader_data.get("previewImageUrl", {}).values()])) if len(thumbnails) > 0: info["thumbnails"] = [{"url": t} for t in thumbnails] return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rottentomatoes.py
youtube_dl/extractor/rottentomatoes.py
from __future__ import unicode_literals from .common import InfoExtractor from .internetvideoarchive import InternetVideoArchiveIE class RottenTomatoesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)' _TEST = { 'url': 'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/', 'info_dict': { 'id': '11028566', 'ext': 'mp4', 'title': 'Toy Story 3', 'description': 'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.', 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) iva_id = self._search_regex(r'publishedid=(\d+)', webpage, 'internet video archive id') return { '_type': 'url_transparent', 'url': 'http://video.internetvideoarchive.net/player/6/configuration.ashx?domain=www.videodetective.com&customerid=69249&playerid=641&publishedid=' + iva_id, 'ie_key': InternetVideoArchiveIE.ie_key(), 'id': video_id, 'title': self._og_search_title(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/eighttracks.py
youtube_dl/extractor/eighttracks.py
# coding: utf-8 from __future__ import unicode_literals import json import random from .common import InfoExtractor from ..compat import ( compat_str, ) from ..utils import ( ExtractorError, ) class EightTracksIE(InfoExtractor): IE_NAME = '8tracks' _VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$' _TEST = { 'name': 'EightTracks', 'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a', 'info_dict': { 'id': '1336550', 'display_id': 'youtube-dl-test-tracks-a', 'description': "test chars: \"'/\\ä↭", 'title': "youtube-dl test tracks \"'/\\ä↭<>", }, 'playlist': [ { 'md5': '96ce57f24389fc8734ce47f4c1abcc55', 'info_dict': { 'id': '11885610', 'ext': 'm4a', 'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': '4ab26f05c1f7291ea460a3920be8021f', 'info_dict': { 'id': '11885608', 'ext': 'm4a', 'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': 'd30b5b5f74217410f4689605c35d1fd7', 'info_dict': { 'id': '11885679', 'ext': 'm4a', 'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': '4eb0a669317cd725f6bbd336a29f923a', 'info_dict': { 'id': '11885680', 'ext': 'm4a', 'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': '1893e872e263a2705558d1d319ad19e8', 'info_dict': { 'id': '11885682', 'ext': 'm4a', 'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': 'b673c46f47a216ab1741ae8836af5899', 'info_dict': { 'id': '11885683', 'ext': 'm4a', 'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': '1d74534e95df54986da7f5abf7d842b7', 'info_dict': { 'id': '11885684', 'ext': 'm4a', 'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } }, { 'md5': 'f081f47af8f6ae782ed131d38b9cd1c0', 'info_dict': { 'id': '11885685', 'ext': 'm4a', 'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad", 'uploader_id': 'ytdl' } } ] } def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) data = self._parse_json( self._search_regex( r"(?s)PAGE\.mix\s*=\s*({.+?});\n", webpage, 'trax information'), playlist_id) session = str(random.randint(0, 1000000000)) mix_id = data['id'] track_count = data['tracks_count'] duration = data['duration'] avg_song_duration = float(duration) / track_count # duration is sometimes negative, use predefined avg duration if avg_song_duration <= 0: avg_song_duration = 300 first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id) next_url = first_url entries = [] for i in range(track_count): api_json = None download_tries = 0 while api_json is None: try: api_json = self._download_webpage( next_url, playlist_id, note='Downloading song information %d/%d' % (i + 1, track_count), errnote='Failed to download song information') except ExtractorError: if download_tries > 3: raise else: download_tries += 1 self._sleep(avg_song_duration, playlist_id) api_data = json.loads(api_json) track_data = api_data['set']['track'] info = { 'id': compat_str(track_data['id']), 'url': track_data['track_file_stream_url'], 'title': track_data['performer'] + ' - ' + track_data['name'], 'raw_title': track_data['name'], 'uploader_id': data['user']['login'], 'ext': 'm4a', } entries.append(info) next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % ( session, mix_id, track_data['id']) return { '_type': 'playlist', 'entries': entries, 'id': compat_str(mix_id), 'display_id': playlist_id, 'title': data.get('name'), 'description': data.get('description'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pinkbike.py
youtube_dl/extractor/pinkbike.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, remove_end, remove_start, str_to_int, unified_strdate, ) class PinkbikeIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?pinkbike\.com/video/|es\.pinkbike\.org/i/kvid/kvid-y5\.swf\?id=)(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.pinkbike.com/video/402811/', 'md5': '4814b8ca7651034cd87e3361d5c2155a', 'info_dict': { 'id': '402811', 'ext': 'mp4', 'title': 'Brandon Semenuk - RAW 100', 'description': 'Official release: www.redbull.ca/rupertwalker', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 100, 'upload_date': '20150406', 'uploader': 'revelco', 'location': 'Victoria, British Columbia, Canada', 'view_count': int, 'comment_count': int, } }, { 'url': 'http://es.pinkbike.org/i/kvid/kvid-y5.swf?id=406629', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.pinkbike.com/video/%s' % video_id, video_id) formats = [] for _, format_id, src in re.findall( r'data-quality=((?:\\)?["\'])(.+?)\1[^>]+src=\1(.+?)\1', webpage): height = int_or_none(self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None)) formats.append({ 'url': src, 'format_id': format_id, 'height': height, }) self._sort_formats(formats) title = remove_end(self._og_search_title(webpage), ' Video - Pinkbike') description = self._html_search_regex( r'(?s)id="media-description"[^>]*>(.+?)<', webpage, 'description', default=None) or remove_start( self._og_search_description(webpage), title + '. ') thumbnail = self._og_search_thumbnail(webpage) duration = int_or_none(self._html_search_meta( 'video:duration', webpage, 'duration')) uploader = self._search_regex( r'<a[^>]+\brel=["\']author[^>]+>([^<]+)', webpage, 'uploader', fatal=False) upload_date = unified_strdate(self._search_regex( r'class="fullTime"[^>]+title="([^"]+)"', webpage, 'upload date', fatal=False)) location = self._html_search_regex( r'(?s)<dt>Location</dt>\s*<dd>(.+?)<', webpage, 'location', fatal=False) def extract_count(webpage, label): return str_to_int(self._search_regex( r'<span[^>]+class="stat-num"[^>]*>([\d,.]+)</span>\s*<span[^>]+class="stat-label"[^>]*>%s' % label, webpage, label, fatal=False)) view_count = extract_count(webpage, 'Views') comment_count = extract_count(webpage, 'Comments') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader': uploader, 'location': location, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/facebook.py
youtube_dl/extractor/facebook.py
# coding: utf-8 from __future__ import unicode_literals import json import re import socket from .common import InfoExtractor from ..compat import ( compat_etree_fromstring, compat_http_client, compat_str, compat_urllib_error, compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, ) from ..utils import ( clean_html, error_to_compat_str, ExtractorError, float_or_none, get_element_by_id, int_or_none, js_to_json, limit_length, parse_count, qualities, sanitized_Request, try_get, urlencode_postdata, urljoin, ) class FacebookIE(InfoExtractor): _VALID_URL = r'''(?x) (?: https?:// (?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/ (?:[^#]*?\#!/)? (?: (?: video/video\.php| photo\.php| video\.php| video/embed| story\.php| watch(?:/live)?/? )\?(?:.*?)(?:v|video_id|story_fbid)=| [^/]+/videos/(?:[^/]+/)?| [^/]+/posts/| groups/[^/]+/permalink/| watchparty/ )| facebook: ) (?P<id>[0-9]+) ''' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s' _VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary' _TESTS = [{ 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf', 'md5': '6a40d33c0eccbb1af76cf0485a052659', 'info_dict': { 'id': '637842556329505', 'ext': 'mp4', 'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam', 'uploader': 'Tennis on Facebook', 'upload_date': '20140908', 'timestamp': 1410199200, }, 'skip': 'Requires logging in', }, { # data.video 'url': 'https://www.facebook.com/video.php?v=274175099429670', 'info_dict': { 'id': '274175099429670', 'ext': 'mp4', 'title': 're:^Asif Nawab Butt posted a video', 'uploader': 'Asif Nawab Butt', 'upload_date': '20140506', 'timestamp': 1399398998, 'thumbnail': r're:^https?://.*', }, 'expected_warnings': [ 'title' ] }, { 'note': 'Video with DASH manifest', 'url': 'https://www.facebook.com/video.php?v=957955867617029', 'md5': 'b2c28d528273b323abe5c6ab59f0f030', 'info_dict': { 'id': '957955867617029', 'ext': 'mp4', 'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...', 'uploader': 'Demy de Zeeuw', 'upload_date': '20160110', 'timestamp': 1452431627, }, 'skip': 'Requires logging in', }, { 'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570', 'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6', 'info_dict': { 'id': '544765982287235', 'ext': 'mp4', 'title': '"What are you doing running in the snow?"', 'uploader': 'FailArmy', }, 'skip': 'Video gone', }, { 'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903', 'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3', 'info_dict': { 'id': '1035862816472149', 'ext': 'mp4', 'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog', 'uploader': 'S. Saint', }, 'skip': 'Video gone', }, { 'note': 'swf params escaped', 'url': 'https://www.facebook.com/barackobama/posts/10153664894881749', 'md5': '97ba073838964d12c70566e0085c2b91', 'info_dict': { 'id': '10153664894881749', 'ext': 'mp4', 'title': 'Average time to confirm recent Supreme Court nominees: 67 days Longest it\'s t...', 'thumbnail': r're:^https?://.*', 'timestamp': 1456259628, 'upload_date': '20160223', 'uploader': 'Barack Obama', }, }, { # have 1080P, but only up to 720p in swf params # data.video.story.attachments[].media 'url': 'https://www.facebook.com/cnn/videos/10155529876156509/', 'md5': '9571fae53d4165bbbadb17a94651dcdc', 'info_dict': { 'id': '10155529876156509', 'ext': 'mp4', 'title': 'She survived the holocaust — and years later, she’s getting her citizenship s...', 'timestamp': 1477818095, 'upload_date': '20161030', 'uploader': 'CNN', 'thumbnail': r're:^https?://.*', 'view_count': int, }, }, { # bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall # data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media 'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/', 'info_dict': { 'id': '1417995061575415', 'ext': 'mp4', 'title': 'md5:1db063d6a8c13faa8da727817339c857', 'timestamp': 1486648217, 'upload_date': '20170209', 'uploader': 'Yaroslav Korpan', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471', 'info_dict': { 'id': '1072691702860471', 'ext': 'mp4', 'title': 'md5:ae2d22a93fbb12dad20dc393a869739d', 'timestamp': 1477305000, 'upload_date': '20161024', 'uploader': 'La Guía Del Varón', 'thumbnail': r're:^https?://.*', }, 'params': { 'skip_download': True, }, }, { # data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media 'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/', 'info_dict': { 'id': '1396382447100162', 'ext': 'mp4', 'title': 'md5:19a428bbde91364e3de815383b54a235', 'timestamp': 1486035494, 'upload_date': '20170202', 'uploader': 'Elisabeth Ahtn', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.facebook.com/video.php?v=10204634152394104', 'only_matching': True, }, { 'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf', 'only_matching': True, }, { # data.mediaset.currMedia.edges 'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater', 'only_matching': True, }, { # data.video.story.attachments[].media 'url': 'facebook:544765982287235', 'only_matching': True, }, { # data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media 'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/', 'only_matching': True, }, { # data.video.creation_story.attachments[].media 'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/', 'only_matching': True, }, { # data.video 'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670', 'only_matching': True, }, { # no title 'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/', 'only_matching': True, }, { # data.video 'url': 'https://www.facebook.com/WatchESLOne/videos/359649331226507/', 'info_dict': { 'id': '359649331226507', 'ext': 'mp4', 'title': '#ESLOne VoD - Birmingham Finals Day#1 Fnatic vs. @Evil Geniuses', 'uploader': 'ESL One Dota 2', }, 'params': { 'skip_download': True, }, }, { # data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media 'url': 'https://www.facebook.com/100033620354545/videos/106560053808006/', 'info_dict': { 'id': '106560053808006', }, 'playlist_count': 2, }, { # data.video.story.attachments[].media 'url': 'https://www.facebook.com/watch/?v=647537299265662', 'only_matching': True, }, { # data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media 'url': 'https://www.facebook.com/PankajShahLondon/posts/10157667649866271', 'info_dict': { 'id': '10157667649866271', }, 'playlist_count': 3, }, { # data.nodes[].comet_sections.content.story.attachments[].style_type_renderer.attachment.media 'url': 'https://m.facebook.com/Alliance.Police.Department/posts/4048563708499330', 'info_dict': { 'id': '117576630041613', 'ext': 'mp4', # TODO: title can be extracted from video page 'title': 'Facebook video #117576630041613', 'uploader_id': '189393014416438', 'upload_date': '20201123', 'timestamp': 1606162592, }, 'skip': 'Requires logging in', }, { # node.comet_sections.content.story.attached_story.attachments.style_type_renderer.attachment.media 'url': 'https://www.facebook.com/groups/ateistiskselskab/permalink/10154930137678856/', 'info_dict': { 'id': '211567722618337', 'ext': 'mp4', 'title': 'Facebook video #211567722618337', 'uploader_id': '127875227654254', 'upload_date': '20161122', 'timestamp': 1479793574, }, }, { # data.video.creation_story.attachments[].media 'url': 'https://www.facebook.com/watch/live/?v=1823658634322275', 'only_matching': True, }, { 'url': 'https://www.facebook.com/watchparty/211641140192478', 'info_dict': { 'id': '211641140192478', }, 'playlist_count': 1, 'skip': 'Requires logging in', }] _SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)' _api_config = { 'graphURI': '/api/graphql/' } @staticmethod def _extract_urls(webpage): urls = [] for mobj in re.finditer( r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1', webpage): urls.append(mobj.group('url')) # Facebook API embed # see https://developers.facebook.com/docs/plugins/embedded-video-player for mobj in re.finditer(r'''(?x)<div[^>]+ class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+ data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage): urls.append(mobj.group('url')) return urls def _login(self): useremail, password = self._get_login_info() if useremail is None: return login_page_req = sanitized_Request(self._LOGIN_URL) self._set_cookie('facebook.com', 'locale', 'en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') lsd = self._search_regex( r'<input type="hidden" name="lsd" value="([^"]*)"', login_page, 'lsd') lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd') login_form = { 'email': useremail, 'pass': password, 'lsd': lsd, 'lgnrnd': lgnrnd, 'next': 'http://facebook.com/home.php', 'default_persistent': '0', 'legacy_return': '1', 'timezone': '-60', 'trynum': '1', } request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: error = self._html_search_regex( r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>', login_results, 'login error', default=None, group='error') if error: raise ExtractorError('Unable to login: %s' % error, expected=True) self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.') return fb_dtsg = self._search_regex( r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None) h = self._search_regex( r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None) if not fb_dtsg or not h: return check_form = { 'fb_dtsg': fb_dtsg, 'h': h, 'name_action_selected': 'dont_save', } check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) return def _real_initialize(self): self._login() def _extract_from_url(self, url, video_id): webpage = self._download_webpage( url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id) video_data = None def extract_video_data(instances): video_data = [] for item in instances: if try_get(item, lambda x: x[1][0]) == 'VideoConfig': video_item = item[2][0] if video_item.get('video_id'): video_data.append(video_item['videoData']) return video_data server_js_data = self._parse_json(self._search_regex( [r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'], webpage, 'server js data', default='{}'), video_id, fatal=False) if server_js_data: video_data = extract_video_data(server_js_data.get('instances', [])) def extract_from_jsmods_instances(js_data): if js_data: return extract_video_data(try_get( js_data, lambda x: x['jsmods']['instances'], list) or []) def extract_dash_manifest(video, formats): dash_manifest = video.get('dash_manifest') if dash_manifest: formats.extend(self._parse_mpd_formats( compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest)))) def process_formats(formats): # Downloads with browser's User-Agent are rate limited. Working around # with non-browser User-Agent. for f in formats: f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1' self._sort_formats(formats) def extract_relay_data(_filter): return self._parse_json(self._search_regex( r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter, webpage, 'replay data', default='{}'), video_id, fatal=False) or {} def extract_relay_prefetched_data(_filter): replay_data = extract_relay_data(_filter) for require in (replay_data.get('require') or []): if require[0] == 'RelayPrefetchedStreamCache': return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {} if not video_data: server_js_data = self._parse_json(self._search_regex([ r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX, r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX ], webpage, 'js data', default='{}'), video_id, js_to_json, False) video_data = extract_from_jsmods_instances(server_js_data) if not video_data: data = extract_relay_prefetched_data( r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"') if data: entries = [] def parse_graphql_video(video): formats = [] q = qualities(['sd', 'hd']) for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]: playable_url = video.get('playable_url' + suffix) if not playable_url: continue formats.append({ 'format_id': format_id, 'quality': q(format_id), 'url': playable_url, }) extract_dash_manifest(video, formats) process_formats(formats) v_id = video.get('videoId') or video.get('id') or video_id info = { 'id': v_id, 'formats': formats, 'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']), 'uploader_id': try_get(video, lambda x: x['owner']['id']), 'timestamp': int_or_none(video.get('publish_time')), 'duration': float_or_none(video.get('playable_duration_in_ms'), 1000), } description = try_get(video, lambda x: x['savable_description']['text']) title = video.get('name') if title: info.update({ 'title': title, 'description': description, }) else: info['title'] = description or 'Facebook video #%s' % v_id entries.append(info) def parse_attachment(attachment, key='media'): media = attachment.get(key) or {} if media.get('__typename') == 'Video': return parse_graphql_video(media) nodes = data.get('nodes') or [] node = data.get('node') or {} if not nodes and node: nodes.append(node) for node in nodes: story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {} attachments = try_get(story, [ lambda x: x['attached_story']['attachments'], lambda x: x['attachments'] ], list) or [] for attachment in attachments: attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict) ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or [] for n in ns: parse_attachment(n) parse_attachment(attachment) edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or [] for edge in edges: parse_attachment(edge, key='node') video = data.get('video') or {} if video: attachments = try_get(video, [ lambda x: x['story']['attachments'], lambda x: x['creation_story']['attachments'] ], list) or [] for attachment in attachments: parse_attachment(attachment) if not entries: parse_graphql_video(video) return self.playlist_result(entries, video_id) if not video_data: m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) if m_msg is not None: raise ExtractorError( 'The video is not available, Facebook said: "%s"' % m_msg.group(1), expected=True) elif any(p in webpage for p in ( '>You must log in to continue', 'id="login_form"', 'id="loginbutton"')): self.raise_login_required() if not video_data and '/watchparty/' in url: post_data = { 'doc_id': 3731964053542869, 'variables': json.dumps({ 'livingRoomID': video_id, }), } prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{') if prefetched_data: lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict) if lsd: post_data[lsd['name']] = lsd['value'] relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,') for define in (relay_data.get('define') or []): if define[0] == 'RelayAPIConfigDefaults': self._api_config = define[2] living_room = self._download_json( urljoin(url, self._api_config['graphURI']), video_id, data=urlencode_postdata(post_data))['data']['living_room'] entries = [] for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []): video = try_get(edge, lambda x: x['node']['video']) or {} v_id = video.get('id') if not v_id: continue v_id = compat_str(v_id) entries.append(self.url_result( self._VIDEO_PAGE_TEMPLATE % v_id, self.ie_key(), v_id, video.get('name'))) return self.playlist_result(entries, video_id) if not video_data: # Video info not in first request, do a secondary request using # tahoe player specific URL tahoe_data = self._download_webpage( self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id, data=urlencode_postdata({ '__a': 1, '__pc': self._search_regex( r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage, 'pkg cohort', default='PHASED:DEFAULT'), '__rev': self._search_regex( r'client_revision["\']\s*:\s*(\d+),', webpage, 'client revision', default='3944515'), 'fb_dtsg': self._search_regex( r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"', webpage, 'dtsg token', default=''), }), headers={ 'Content-Type': 'application/x-www-form-urlencoded', }) tahoe_js_data = self._parse_json( self._search_regex( r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data, 'tahoe js data', default='{}'), video_id, fatal=False) video_data = extract_from_jsmods_instances(tahoe_js_data) if not video_data: raise ExtractorError('Cannot parse data') if len(video_data) > 1: entries = [] for v in video_data: video_url = v[0].get('video_url') if not video_url: continue entries.append(self.url_result(urljoin( url, video_url), self.ie_key(), v[0].get('video_id'))) return self.playlist_result(entries, video_id) video_data = video_data[0] formats = [] subtitles = {} for f in video_data: format_id = f['stream_type'] if f and isinstance(f, dict): f = [f] if not f or not isinstance(f, list): continue for quality in ('sd', 'hd'): for src_type in ('src', 'src_no_ratelimit'): src = f[0].get('%s_%s' % (quality, src_type)) if src: preference = -10 if format_id == 'progressive' else 0 if quality == 'hd': preference += 5 formats.append({ 'format_id': '%s_%s_%s' % (format_id, quality, src_type), 'url': src, 'preference': preference, }) extract_dash_manifest(f[0], formats) subtitles_src = f[0].get('subtitles_src') if subtitles_src: subtitles.setdefault('en', []).append({'url': subtitles_src}) if not formats: raise ExtractorError('Cannot find video formats') process_formats(formats) video_title = self._html_search_regex( r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title', default=None) if not video_title: video_title = self._html_search_regex( r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>', webpage, 'alternative title', default=None) if not video_title: video_title = self._html_search_meta( 'description', webpage, 'title', default=None) if video_title: video_title = limit_length(video_title, 80) else: video_title = 'Facebook video #%s' % video_id uploader = clean_html(get_element_by_id( 'fbPhotoPageAuthorName', webpage)) or self._search_regex( r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader', default=None) or self._og_search_title(webpage, fatal=False) timestamp = int_or_none(self._search_regex( r'<abbr[^>]+data-utime=["\'](\d+)', webpage, 'timestamp', default=None)) thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage) view_count = parse_count(self._search_regex( r'\bviewCount\s*:\s*["\']([\d,.]+)', webpage, 'view count', default=None)) info_dict = { 'id': video_id, 'title': video_title, 'formats': formats, 'uploader': uploader, 'timestamp': timestamp, 'thumbnail': thumbnail, 'view_count': view_count, 'subtitles': subtitles, } return info_dict def _real_extract(self, url): video_id = self._match_id(url) real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url return self._extract_from_url(real_url, video_id) class FacebookPluginsVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)' _TESTS = [{ 'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560', 'md5': '5954e92cdfe51fe5782ae9bda7058a07', 'info_dict': { 'id': '10154383743583686', 'ext': 'mp4', 'title': 'What to do during the haze?', 'uploader': 'Gov.sg', 'upload_date': '20160826', 'timestamp': 1472184808, }, 'add_ie': [FacebookIE.ie_key()], }, { 'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104', 'only_matching': True, }, { 'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560', 'only_matching': True, }] def _real_extract(self, url): return self.url_result( compat_urllib_parse_unquote(self._match_id(url)), FacebookIE.ie_key())
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/porncom.py
youtube_dl/extractor/porncom.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( int_or_none, js_to_json, parse_filesize, str_to_int, ) class PornComIE(InfoExtractor): _VALID_URL = r'https?://(?:[a-zA-Z]+\.)?porn\.com/videos/(?:(?P<display_id>[^/]+)-)?(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.porn.com/videos/teen-grabs-a-dildo-and-fucks-her-pussy-live-on-1hottie-i-rec-2603339', 'md5': '3f30ce76267533cd12ba999263156de7', 'info_dict': { 'id': '2603339', 'display_id': 'teen-grabs-a-dildo-and-fucks-her-pussy-live-on-1hottie-i-rec', 'ext': 'mp4', 'title': 'Teen grabs a dildo and fucks her pussy live on 1hottie, I rec', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 551, 'view_count': int, 'age_limit': 18, 'categories': list, 'tags': list, }, }, { 'url': 'http://se.porn.com/videos/marsha-may-rides-seth-on-top-of-his-thick-cock-2658067', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage = self._download_webpage(url, display_id) config = self._parse_json( self._search_regex( (r'=\s*({.+?})\s*;\s*v1ar\b', r'=\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*='), webpage, 'config', default='{}'), display_id, transform_source=js_to_json, fatal=False) if config: title = config['title'] formats = [{ 'url': stream['url'], 'format_id': stream.get('id'), 'height': int_or_none(self._search_regex( r'^(\d+)[pP]', stream.get('id') or '', 'height', default=None)) } for stream in config['streams'] if stream.get('url')] thumbnail = (compat_urlparse.urljoin( config['thumbCDN'], config['poster']) if config.get('thumbCDN') and config.get('poster') else None) duration = int_or_none(config.get('length')) else: title = self._search_regex( (r'<title>([^<]+)</title>', r'<h1[^>]*>([^<]+)</h1>'), webpage, 'title') formats = [{ 'url': compat_urlparse.urljoin(url, format_url), 'format_id': '%sp' % height, 'height': int(height), 'filesize_approx': parse_filesize(filesize), } for format_url, height, filesize in re.findall( r'<a[^>]+href="(/download/[^"]+)">[^<]*?(\d+)p<span[^>]*>(\d+\s*[a-zA-Z]+)<', webpage)] thumbnail = None duration = None self._sort_formats(formats) view_count = str_to_int(self._search_regex( (r'Views:\s*</span>\s*<span>\s*([\d,.]+)', r'class=["\']views["\'][^>]*><p>([\d,.]+)'), webpage, 'view count', fatal=False)) def extract_list(kind): s = self._search_regex( (r'(?s)%s:\s*</span>\s*<span>(.+?)</span>' % kind.capitalize(), r'(?s)<p[^>]*>%s:(.+?)</p>' % kind.capitalize()), webpage, kind, fatal=False) return re.findall(r'<a[^>]+>([^<]+)</a>', s or '') return { 'id': video_id, 'display_id': display_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': 18, 'categories': extract_list('categories'), 'tags': extract_list('tags'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mailru.py
youtube_dl/extractor/mailru.py
# coding: utf-8 from __future__ import unicode_literals import itertools import json import re from .common import InfoExtractor from ..compat import compat_urllib_parse_unquote from ..utils import ( int_or_none, parse_duration, remove_end, try_get, ) class MailRuIE(InfoExtractor): IE_NAME = 'mailru' IE_DESC = 'Видео@Mail.Ru' _VALID_URL = r'''(?x) https?:// (?:(?:www|m)\.)?my\.mail\.ru/+ (?: video/.*\#video=/?(?P<idv1>(?:[^/]+/){3}\d+)| (?:(?P<idv2prefix>(?:[^/]+/+){2})video/(?P<idv2suffix>[^/]+/\d+))\.html| (?:video/embed|\+/video/meta)/(?P<metaid>\d+) ) ''' _TESTS = [ { 'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76', 'md5': 'dea205f03120046894db4ebb6159879a', 'info_dict': { 'id': '46301138_76', 'ext': 'mp4', 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро', 'timestamp': 1393235077, 'upload_date': '20140224', 'uploader': 'sonypicturesrus', 'uploader_id': 'sonypicturesrus@mail.ru', 'duration': 184, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html', 'md5': '00a91a58c3402204dcced523777b475f', 'info_dict': { 'id': '46843144_1263', 'ext': 'mp4', 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion', 'timestamp': 1397039888, 'upload_date': '20140409', 'uploader': 'hitech', 'uploader_id': 'hitech@corp.mail.ru', 'duration': 245, }, 'skip': 'Not accessible from Travis CI server', }, { # only available via metaUrl API 'url': 'http://my.mail.ru/mail/720pizle/video/_myvideo/502.html', 'md5': '3b26d2491c6949d031a32b96bd97c096', 'info_dict': { 'id': '56664382_502', 'ext': 'mp4', 'title': ':8336', 'timestamp': 1449094163, 'upload_date': '20151202', 'uploader': '720pizle@mail.ru', 'uploader_id': '720pizle@mail.ru', 'duration': 6001, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://m.my.mail.ru/mail/3sktvtr/video/_myvideo/138.html', 'only_matching': True, }, { 'url': 'https://my.mail.ru/video/embed/7949340477499637815', 'only_matching': True, }, { 'url': 'http://my.mail.ru/+/video/meta/7949340477499637815', 'only_matching': True, }, { 'url': 'https://my.mail.ru//list/sinyutin10/video/_myvideo/4.html', 'only_matching': True, }, { 'url': 'https://my.mail.ru//list//sinyutin10/video/_myvideo/4.html', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) meta_id = mobj.group('metaid') video_id = None if meta_id: meta_url = 'https://my.mail.ru/+/video/meta/%s' % meta_id else: video_id = mobj.group('idv1') if not video_id: video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix') webpage = self._download_webpage(url, video_id) page_config = self._parse_json(self._search_regex( r'(?s)<script[^>]+class="sp-video__page-config"[^>]*>(.+?)</script>', webpage, 'page config', default='{}'), video_id, fatal=False) if page_config: meta_url = page_config.get('metaUrl') or page_config.get('video', {}).get('metaUrl') else: meta_url = None video_data = None if meta_url: video_data = self._download_json( meta_url, video_id or meta_id, 'Downloading video meta JSON', fatal=not video_id) # Fallback old approach if not video_data: video_data = self._download_json( 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON') headers = {} video_key = self._get_cookies('https://my.mail.ru').get('video_key') if video_key: headers['Cookie'] = 'video_key=%s' % video_key.value formats = [] for f in video_data['videos']: video_url = f.get('url') if not video_url: continue format_id = f.get('key') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None)) if format_id else None formats.append({ 'url': video_url, 'format_id': format_id, 'height': height, 'http_headers': headers, }) self._sort_formats(formats) meta_data = video_data['meta'] title = remove_end(meta_data['title'], '.mp4') author = video_data.get('author') uploader = author.get('name') uploader_id = author.get('id') or author.get('email') view_count = int_or_none(video_data.get('viewsCount') or video_data.get('views_count')) acc_id = meta_data.get('accId') item_id = meta_data.get('itemId') content_id = '%s_%s' % (acc_id, item_id) if acc_id and item_id else video_id thumbnail = meta_data.get('poster') duration = int_or_none(meta_data.get('duration')) timestamp = int_or_none(meta_data.get('timestamp')) return { 'id': content_id, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'formats': formats, } class MailRuMusicSearchBaseIE(InfoExtractor): def _search(self, query, url, audio_id, limit=100, offset=0): search = self._download_json( 'https://my.mail.ru/cgi-bin/my/ajax', audio_id, 'Downloading songs JSON page %d' % (offset // limit + 1), headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', }, query={ 'xemail': '', 'ajax_call': '1', 'func_name': 'music.search', 'mna': '', 'mnb': '', 'arg_query': query, 'arg_extended': '1', 'arg_search_params': json.dumps({ 'music': { 'limit': limit, 'offset': offset, }, }), 'arg_limit': limit, 'arg_offset': offset, }) return next(e for e in search if isinstance(e, dict)) @staticmethod def _extract_track(t, fatal=True): audio_url = t['URL'] if fatal else t.get('URL') if not audio_url: return audio_id = t['File'] if fatal else t.get('File') if not audio_id: return thumbnail = t.get('AlbumCoverURL') or t.get('FiledAlbumCover') uploader = t.get('OwnerName') or t.get('OwnerName_Text_HTML') uploader_id = t.get('UploaderID') duration = int_or_none(t.get('DurationInSeconds')) or parse_duration( t.get('Duration') or t.get('DurationStr')) view_count = int_or_none(t.get('PlayCount') or t.get('PlayCount_hr')) track = t.get('Name') or t.get('Name_Text_HTML') artist = t.get('Author') or t.get('Author_Text_HTML') if track: title = '%s - %s' % (artist, track) if artist else track else: title = audio_id return { 'extractor_key': MailRuMusicIE.ie_key(), 'id': audio_id, 'title': title, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'vcodec': 'none', 'abr': int_or_none(t.get('BitRate')), 'track': track, 'artist': artist, 'album': t.get('Album'), 'url': audio_url, } class MailRuMusicIE(MailRuMusicSearchBaseIE): IE_NAME = 'mailru:music' IE_DESC = 'Музыка@Mail.Ru' _VALID_URL = r'https?://my\.mail\.ru/+music/+songs/+[^/?#&]+-(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'https://my.mail.ru/music/songs/%D0%BC8%D0%BB8%D1%82%D1%85-l-a-h-luciferian-aesthetics-of-herrschaft-single-2017-4e31f7125d0dfaef505d947642366893', 'md5': '0f8c22ef8c5d665b13ac709e63025610', 'info_dict': { 'id': '4e31f7125d0dfaef505d947642366893', 'ext': 'mp3', 'title': 'L.A.H. (Luciferian Aesthetics of Herrschaft) single, 2017 - М8Л8ТХ', 'uploader': 'Игорь Мудрый', 'uploader_id': '1459196328', 'duration': 280, 'view_count': int, 'vcodec': 'none', 'abr': 320, 'track': 'L.A.H. (Luciferian Aesthetics of Herrschaft) single, 2017', 'artist': 'М8Л8ТХ', }, }] def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) title = self._og_search_title(webpage) music_data = self._search(title, url, audio_id)['MusicData'] t = next(t for t in music_data if t.get('File') == audio_id) info = self._extract_track(t) info['title'] = title return info class MailRuMusicSearchIE(MailRuMusicSearchBaseIE): IE_NAME = 'mailru:music:search' IE_DESC = 'Музыка@Mail.Ru' _VALID_URL = r'https?://my\.mail\.ru/+music/+search/+(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://my.mail.ru/music/search/black%20shadow', 'info_dict': { 'id': 'black shadow', }, 'playlist_mincount': 532, }] def _real_extract(self, url): query = compat_urllib_parse_unquote(self._match_id(url)) entries = [] LIMIT = 100 offset = 0 for _ in itertools.count(1): search = self._search(query, url, query, LIMIT, offset) music_data = search.get('MusicData') if not music_data or not isinstance(music_data, list): break for t in music_data: track = self._extract_track(t, fatal=False) if track: entries.append(track) total = try_get( search, lambda x: x['Results']['music']['Total'], int) if total is not None: if offset > total: break offset += LIMIT return self.playlist_result(entries, query)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nba.py
youtube_dl/extractor/nba.py
from __future__ import unicode_literals import functools import re from .turner import TurnerBaseIE from ..compat import ( compat_parse_qs, compat_str, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( int_or_none, merge_dicts, OnDemandPagedList, parse_duration, parse_iso8601, try_get, update_url_query, urljoin, ) class NBACVPBaseIE(TurnerBaseIE): def _extract_nba_cvp_info(self, path, video_id, fatal=False): return self._extract_cvp_info( 'http://secure.nba.com/%s' % path, video_id, { 'default': { 'media_src': 'http://nba.cdn.turner.com/nba/big', }, 'm3u8': { 'media_src': 'http://nbavod-f.akamaihd.net', }, }, fatal=fatal) class NBAWatchBaseIE(NBACVPBaseIE): _VALID_URL_BASE = r'https?://(?:(?:www\.)?nba\.com(?:/watch)?|watch\.nba\.com)/' def _extract_video(self, filter_key, filter_value): video = self._download_json( 'https://neulionscnbav2-a.akamaihd.net/solr/nbad_program/usersearch', filter_value, query={ 'fl': 'description,image,name,pid,releaseDate,runtime,tags,seoName', 'q': filter_key + ':' + filter_value, 'wt': 'json', })['response']['docs'][0] video_id = str(video['pid']) title = video['name'] formats = [] m3u8_url = (self._download_json( 'https://watch.nba.com/service/publishpoint', video_id, query={ 'type': 'video', 'format': 'json', 'id': video_id, }, headers={ 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_1 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A402 Safari/604.1', }, fatal=False) or {}).get('path') if m3u8_url: m3u8_formats = self._extract_m3u8_formats( re.sub(r'_(?:pc|iphone)\.', '.', m3u8_url), video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) for f in m3u8_formats: http_f = f.copy() http_f.update({ 'format_id': http_f['format_id'].replace('hls-', 'http-'), 'protocol': 'http', 'url': http_f['url'].replace('.m3u8', ''), }) formats.append(http_f) info = { 'id': video_id, 'title': title, 'thumbnail': urljoin('https://nbadsdmt.akamaized.net/media/nba/nba/thumbs/', video.get('image')), 'description': video.get('description'), 'duration': int_or_none(video.get('runtime')), 'timestamp': parse_iso8601(video.get('releaseDate')), 'tags': video.get('tags'), } seo_name = video.get('seoName') if seo_name and re.search(r'\d{4}/\d{2}/\d{2}/', seo_name): base_path = '' if seo_name.startswith('teams/'): base_path += seo_name.split('/')[1] + '/' base_path += 'video/' cvp_info = self._extract_nba_cvp_info( base_path + seo_name + '.xml', video_id, False) if cvp_info: formats.extend(cvp_info['formats']) info = merge_dicts(info, cvp_info) self._sort_formats(formats) info['formats'] = formats return info class NBAWatchEmbedIE(NBAWatchBaseIE): IENAME = 'nba:watch:embed' _VALID_URL = NBAWatchBaseIE._VALID_URL_BASE + r'embed\?.*?\bid=(?P<id>\d+)' _TESTS = [{ 'url': 'http://watch.nba.com/embed?id=659395', 'md5': 'b7e3f9946595f4ca0a13903ce5edd120', 'info_dict': { 'id': '659395', 'ext': 'mp4', 'title': 'Mix clip: More than 7 points of Joe Ingles, Luc Mbah a Moute, Blake Griffin and 6 more in Utah Jazz vs. the Clippers, 4/15/2017', 'description': 'Mix clip: More than 7 points of Joe Ingles, Luc Mbah a Moute, Blake Griffin and 6 more in Utah Jazz vs. the Clippers, 4/15/2017', 'timestamp': 1492228800, 'upload_date': '20170415', }, }] def _real_extract(self, url): video_id = self._match_id(url) return self._extract_video('pid', video_id) class NBAWatchIE(NBAWatchBaseIE): IE_NAME = 'nba:watch' _VALID_URL = NBAWatchBaseIE._VALID_URL_BASE + r'(?:nba/)?video/(?P<id>.+?(?=/index\.html)|(?:[^/]+/)*[^/?#&]+)' _TESTS = [{ 'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html', 'md5': '9d902940d2a127af3f7f9d2f3dc79c96', 'info_dict': { 'id': '70946', 'ext': 'mp4', 'title': 'Thunder vs. Nets', 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.', 'duration': 181, 'timestamp': 1354597200, 'upload_date': '20121204', }, }, { 'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/', 'only_matching': True, }, { 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4', 'info_dict': { 'id': '330865', 'ext': 'mp4', 'title': 'Hawks vs. Cavaliers Game 1', 'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d', 'duration': 228, 'timestamp': 1432094400, 'upload_date': '20150521', }, }, { 'url': 'http://watch.nba.com/nba/video/channels/nba_tv/2015/06/11/YT_go_big_go_home_Game4_061115', 'only_matching': True, }, { # only CVP mp4 format available 'url': 'https://watch.nba.com/video/teams/cavaliers/2012/10/15/sloan121015mov-2249106', 'only_matching': True, }, { 'url': 'https://watch.nba.com/video/top-100-dunks-from-the-2019-20-season?plsrc=nba&collection=2019-20-season-highlights', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) collection_id = compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('collection', [None])[0] if collection_id: if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % display_id) else: self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % collection_id) return self.url_result( 'https://www.nba.com/watch/list/collection/' + collection_id, NBAWatchCollectionIE.ie_key(), collection_id) return self._extract_video('seoName', display_id) class NBAWatchCollectionIE(NBAWatchBaseIE): IE_NAME = 'nba:watch:collection' _VALID_URL = NBAWatchBaseIE._VALID_URL_BASE + r'list/collection/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://watch.nba.com/list/collection/season-preview-2020', 'info_dict': { 'id': 'season-preview-2020', }, 'playlist_mincount': 43, }] _PAGE_SIZE = 100 def _fetch_page(self, collection_id, page): page += 1 videos = self._download_json( 'https://content-api-prod.nba.com/public/1/endeavor/video-list/collection/' + collection_id, collection_id, 'Downloading page %d JSON metadata' % page, query={ 'count': self._PAGE_SIZE, 'page': page, })['results']['videos'] for video in videos: program = video.get('program') or {} seo_name = program.get('seoName') or program.get('slug') if not seo_name: continue yield { '_type': 'url', 'id': program.get('id'), 'title': program.get('title') or video.get('title'), 'url': 'https://www.nba.com/watch/video/' + seo_name, 'thumbnail': video.get('image'), 'description': program.get('description') or video.get('description'), 'duration': parse_duration(program.get('runtimeHours')), 'timestamp': parse_iso8601(video.get('releaseDate')), } def _real_extract(self, url): collection_id = self._match_id(url) entries = OnDemandPagedList( functools.partial(self._fetch_page, collection_id), self._PAGE_SIZE) return self.playlist_result(entries, collection_id) class NBABaseIE(NBACVPBaseIE): _VALID_URL_BASE = r'''(?x) https?://(?:www\.)?nba\.com/ (?P<team> blazers| bucks| bulls| cavaliers| celtics| clippers| grizzlies| hawks| heat| hornets| jazz| kings| knicks| lakers| magic| mavericks| nets| nuggets| pacers| pelicans| pistons| raptors| rockets| sixers| spurs| suns| thunder| timberwolves| warriors| wizards ) (?:/play\#)?/''' _CHANNEL_PATH_REGEX = r'video/channel|series' def _embed_url_result(self, team, content_id): return self.url_result(update_url_query( 'https://secure.nba.com/assets/amp/include/video/iframe.html', { 'contentId': content_id, 'team': team, }), NBAEmbedIE.ie_key()) def _call_api(self, team, content_id, query, resource): return self._download_json( 'https://api.nba.net/2/%s/video,imported_video,wsc/' % team, content_id, 'Download %s JSON metadata' % resource, query=query, headers={ 'accessToken': 'internal|bb88df6b4c2244e78822812cecf1ee1b', })['response']['result'] def _extract_video(self, video, team, extract_all=True): video_id = compat_str(video['nid']) team = video['brand'] info = { 'id': video_id, 'title': video.get('title') or video.get('headline') or video['shortHeadline'], 'description': video.get('description'), 'timestamp': parse_iso8601(video.get('published')), } subtitles = {} captions = try_get(video, lambda x: x['videoCaptions']['sidecars'], dict) or {} for caption_url in captions.values(): subtitles.setdefault('en', []).append({'url': caption_url}) formats = [] mp4_url = video.get('mp4') if mp4_url: formats.append({ 'url': mp4_url, }) if extract_all: source_url = video.get('videoSource') if source_url and not source_url.startswith('s3://') and self._is_valid_url(source_url, video_id, 'source'): formats.append({ 'format_id': 'source', 'url': source_url, 'preference': 1, }) m3u8_url = video.get('m3u8') if m3u8_url: if '.akamaihd.net/i/' in m3u8_url: formats.extend(self._extract_akamai_formats( m3u8_url, video_id, {'http': 'pmd.cdn.turner.com'})) else: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) content_xml = video.get('contentXml') if team and content_xml: cvp_info = self._extract_nba_cvp_info( team + content_xml, video_id, fatal=False) if cvp_info: formats.extend(cvp_info['formats']) subtitles = self._merge_subtitles(subtitles, cvp_info['subtitles']) info = merge_dicts(info, cvp_info) self._sort_formats(formats) else: info.update(self._embed_url_result(team, video['videoId'])) info.update({ 'formats': formats, 'subtitles': subtitles, }) return info def _real_extract(self, url): team, display_id = re.match(self._VALID_URL, url).groups() if '/play#/' in url: display_id = compat_urllib_parse_unquote(display_id) else: webpage = self._download_webpage(url, display_id) display_id = self._search_regex( self._CONTENT_ID_REGEX + r'\s*:\s*"([^"]+)"', webpage, 'video id') return self._extract_url_results(team, display_id) class NBAEmbedIE(NBABaseIE): IENAME = 'nba:embed' _VALID_URL = r'https?://secure\.nba\.com/assets/amp/include/video/(?:topI|i)frame\.html\?.*?\bcontentId=(?P<id>[^?#&]+)' _TESTS = [{ 'url': 'https://secure.nba.com/assets/amp/include/video/topIframe.html?contentId=teams/bulls/2020/12/04/3478774/1607105587854-20201204_SCHEDULE_RELEASE_FINAL_DRUPAL-3478774&team=bulls&adFree=false&profile=71&videoPlayerName=TAMPCVP&baseUrl=&videoAdsection=nba.com_mobile_web_teamsites_chicagobulls&ampEnv=', 'only_matching': True, }, { 'url': 'https://secure.nba.com/assets/amp/include/video/iframe.html?contentId=2016/10/29/0021600027boschaplay7&adFree=false&profile=71&team=&videoPlayerName=LAMPCVP', 'only_matching': True, }] def _real_extract(self, url): qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) content_id = qs['contentId'][0] team = qs.get('team', [None])[0] if not team: return self.url_result( 'https://watch.nba.com/video/' + content_id, NBAWatchIE.ie_key()) video = self._call_api(team, content_id, {'videoid': content_id}, 'video')[0] return self._extract_video(video, team) class NBAIE(NBABaseIE): IENAME = 'nba' _VALID_URL = NBABaseIE._VALID_URL_BASE + '(?!%s)video/(?P<id>(?:[^/]+/)*[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX _TESTS = [{ 'url': 'https://www.nba.com/bulls/video/teams/bulls/2020/12/04/3478774/1607105587854-20201204schedulereleasefinaldrupal-3478774', 'info_dict': { 'id': '45039', 'ext': 'mp4', 'title': 'AND WE BACK.', 'description': 'Part 1 of our 2020-21 schedule is here! Watch our games on NBC Sports Chicago.', 'duration': 94, 'timestamp': 1607112000, 'upload_date': '20201218', }, }, { 'url': 'https://www.nba.com/bucks/play#/video/teams%2Fbucks%2F2020%2F12%2F17%2F64860%2F1608252863446-Op_Dream_16x9-64860', 'only_matching': True, }, { 'url': 'https://www.nba.com/bucks/play#/video/wsc%2Fteams%2F2787C911AA1ACD154B5377F7577CCC7134B2A4B0', 'only_matching': True, }] _CONTENT_ID_REGEX = r'videoID' def _extract_url_results(self, team, content_id): return self._embed_url_result(team, content_id) class NBAChannelIE(NBABaseIE): IENAME = 'nba:channel' _VALID_URL = NBABaseIE._VALID_URL_BASE + '(?:%s)/(?P<id>[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX _TESTS = [{ 'url': 'https://www.nba.com/blazers/video/channel/summer_league', 'info_dict': { 'title': 'Summer League', }, 'playlist_mincount': 138, }, { 'url': 'https://www.nba.com/bucks/play#/series/On%20This%20Date', 'only_matching': True, }] _CONTENT_ID_REGEX = r'videoSubCategory' _PAGE_SIZE = 100 def _fetch_page(self, team, channel, page): results = self._call_api(team, channel, { 'channels': channel, 'count': self._PAGE_SIZE, 'offset': page * self._PAGE_SIZE, }, 'page %d' % (page + 1)) for video in results: yield self._extract_video(video, team, False) def _extract_url_results(self, team, content_id): entries = OnDemandPagedList( functools.partial(self._fetch_page, team, content_id), self._PAGE_SIZE) return self.playlist_result(entries, playlist_title=content_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ccc.py
youtube_dl/extractor/ccc.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, try_get, url_or_none, ) class CCCIE(InfoExtractor): IE_NAME = 'media.ccc.de' _VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/v/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://media.ccc.de/v/30C3_-_5443_-_en_-_saal_g_-_201312281830_-_introduction_to_processor_design_-_byterazor#video', 'md5': '3a1eda8f3a29515d27f5adb967d7e740', 'info_dict': { 'id': '1839', 'ext': 'mp4', 'title': 'Introduction to Processor Design', 'creator': 'byterazor', 'description': 'md5:df55f6d073d4ceae55aae6f2fd98a0ac', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20131228', 'timestamp': 1388188800, 'duration': 3710, 'tags': list, } }, { 'url': 'https://media.ccc.de/v/32c3-7368-shopshifting#download', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) event_id = self._search_regex(r"data-id='(\d+)'", webpage, 'event id') event_data = self._download_json('https://media.ccc.de/public/events/%s' % event_id, event_id) formats = [] for recording in event_data.get('recordings', []): recording_url = recording.get('recording_url') if not recording_url: continue language = recording.get('language') folder = recording.get('folder') format_id = None if language: format_id = language if folder: if language: format_id += '-' + folder else: format_id = folder vcodec = 'h264' if 'h264' in folder else ( 'none' if folder in ('mp3', 'opus') else None ) formats.append({ 'format_id': format_id, 'url': recording_url, 'width': int_or_none(recording.get('width')), 'height': int_or_none(recording.get('height')), 'filesize': int_or_none(recording.get('size'), invscale=1024 * 1024), 'language': language, 'vcodec': vcodec, }) self._sort_formats(formats) return { 'id': event_id, 'display_id': display_id, 'title': event_data['title'], 'creator': try_get(event_data, lambda x: ', '.join(x['persons'])), 'description': event_data.get('description'), 'thumbnail': event_data.get('thumb_url'), 'timestamp': parse_iso8601(event_data.get('date')), 'duration': int_or_none(event_data.get('length')), 'tags': event_data.get('tags'), 'formats': formats, } class CCCPlaylistIE(InfoExtractor): IE_NAME = 'media.ccc.de:lists' _VALID_URL = r'https?://(?:www\.)?media\.ccc\.de/c/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://media.ccc.de/c/30c3', 'info_dict': { 'title': '30C3', 'id': '30c3', }, 'playlist_count': 135, }] def _real_extract(self, url): playlist_id = self._match_id(url).lower() conf = self._download_json( 'https://media.ccc.de/public/conferences/' + playlist_id, playlist_id) entries = [] for e in conf['events']: event_url = url_or_none(e.get('frontend_link')) if event_url: entries.append(self.url_result(event_url, ie=CCCIE.ie_key())) return self.playlist_result(entries, playlist_id, conf.get('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/chaturbate.py
youtube_dl/extractor/chaturbate.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( ExtractorError, lowercase_escape, url_or_none, ) class ChaturbateIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?:fullvideo/?\?.*?\bb=)?(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.chaturbate.com/siswet19/', 'info_dict': { 'id': 'siswet19', 'ext': 'mp4', 'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'age_limit': 18, 'is_live': True, }, 'params': { 'skip_download': True, }, 'skip': 'Room is offline', }, { 'url': 'https://chaturbate.com/fullvideo/?b=caylin', 'only_matching': True, }, { 'url': 'https://en.chaturbate.com/siswet19/', 'only_matching': True, }] _ROOM_OFFLINE = 'Room is currently offline' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://chaturbate.com/%s/' % video_id, video_id, headers=self.geo_verification_headers()) found_m3u8_urls = [] data = self._parse_json( self._search_regex( r'initialRoomDossier\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'data', default='{}', group='value'), video_id, transform_source=lowercase_escape, fatal=False) if data: m3u8_url = url_or_none(data.get('hls_source')) if m3u8_url: found_m3u8_urls.append(m3u8_url) if not found_m3u8_urls: for m in re.finditer( r'(\\u002[27])(?P<url>http.+?\.m3u8.*?)\1', webpage): found_m3u8_urls.append(lowercase_escape(m.group('url'))) if not found_m3u8_urls: for m in re.finditer( r'(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage): found_m3u8_urls.append(m.group('url')) m3u8_urls = [] for found_m3u8_url in found_m3u8_urls: m3u8_fast_url, m3u8_no_fast_url = found_m3u8_url, found_m3u8_url.replace('_fast', '') for m3u8_url in (m3u8_fast_url, m3u8_no_fast_url): if m3u8_url not in m3u8_urls: m3u8_urls.append(m3u8_url) if not m3u8_urls: error = self._search_regex( [r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>', r'<div[^>]+id=(["\'])defchat\1[^>]*>\s*<p><strong>(?P<error>[^<]+)<'], webpage, 'error', group='error', default=None) if not error: if any(p in webpage for p in ( self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')): error = self._ROOM_OFFLINE if error: raise ExtractorError(error, expected=True) raise ExtractorError('Unable to find stream URL') formats = [] for m3u8_url in m3u8_urls: for known_id in ('fast', 'slow'): if '_%s' % known_id in m3u8_url: m3u8_id = known_id break else: m3u8_id = None formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', # ffmpeg skips segments for fast m3u8 preference=-10 if m3u8_id == 'fast' else None, m3u8_id=m3u8_id, fatal=False, live=True)) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title(video_id), 'thumbnail': 'https://roomimg.stream.highwebmedia.com/ri/%s.jpg' % video_id, 'age_limit': self._rta_search(webpage), 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvc.py
youtube_dl/extractor/tvc.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, ) class TVCIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvc\.ru/video/iframe/id/(?P<id>\d+)' _TEST = { 'url': 'http://www.tvc.ru/video/iframe/id/74622/isPlay/false/id_stat/channel/?acc_video_id=/channel/brand/id/17/show/episodes/episode_id/39702', 'md5': 'bbc5ff531d1e90e856f60fc4b3afd708', 'info_dict': { 'id': '74622', 'ext': 'mp4', 'title': 'События. "События". Эфир от 22.05.2015 14:30', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1122, }, } @classmethod def _extract_url(cls, webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:http:)?//(?:www\.)?tvc\.ru/video/iframe/id/[^"]+)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'http://www.tvc.ru/video/json/id/%s' % video_id, video_id) formats = [] for info in video.get('path', {}).get('quality', []): video_url = info.get('url') if not video_url: continue format_id = self._search_regex( r'cdnvideo/([^/]+?)(?:-[^/]+?)?/', video_url, 'format id', default=None) formats.append({ 'url': video_url, 'format_id': format_id, 'width': int_or_none(info.get('width')), 'height': int_or_none(info.get('height')), 'tbr': int_or_none(info.get('bitrate')), }) self._sort_formats(formats) return { 'id': video_id, 'title': video['title'], 'thumbnail': video.get('picture'), 'duration': int_or_none(video.get('duration')), 'formats': formats, } class TVCArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tvc\.ru/(?!video/iframe/id/)(?P<id>[^?#]+)' _TESTS = [{ 'url': 'http://www.tvc.ru/channel/brand/id/29/show/episodes/episode_id/39702/', 'info_dict': { 'id': '74622', 'ext': 'mp4', 'title': 'События. "События". Эфир от 22.05.2015 14:30', 'description': 'md5:ad7aa7db22903f983e687b8a3e98c6dd', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1122, }, }, { 'url': 'http://www.tvc.ru/news/show/id/69944', 'info_dict': { 'id': '75399', 'ext': 'mp4', 'title': 'Эксперты: в столице встал вопрос о максимально безопасных остановках', 'description': 'md5:f2098f71e21f309e89f69b525fd9846e', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 278, }, }, { 'url': 'http://www.tvc.ru/channel/brand/id/47/show/episodes#', 'info_dict': { 'id': '2185', 'ext': 'mp4', 'title': 'Ещё не поздно. Эфир от 03.08.2013', 'description': 'md5:51fae9f3f8cfe67abce014e428e5b027', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 3316, }, }] def _real_extract(self, url): webpage = self._download_webpage(url, self._match_id(url)) return { '_type': 'url_transparent', 'ie_key': 'TVC', 'url': self._og_search_video_url(webpage), 'title': clean_html(self._og_search_title(webpage)), 'description': clean_html(self._og_search_description(webpage)), 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/closertotruth.py
youtube_dl/extractor/closertotruth.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class CloserToTruthIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688', 'info_dict': { 'id': '0_zof1ktre', 'display_id': 'solutions-the-mind-body-problem', 'ext': 'mov', 'title': 'Solutions to the Mind-Body Problem?', 'upload_date': '20140221', 'timestamp': 1392956007, 'uploader_id': 'CTTXML' }, 'params': { 'skip_download': True, }, }, { 'url': 'http://closertotruth.com/episodes/how-do-brains-work', 'info_dict': { 'id': '0_iuxai6g6', 'display_id': 'how-do-brains-work', 'ext': 'mov', 'title': 'How do Brains Work?', 'upload_date': '20140221', 'timestamp': 1392956024, 'uploader_id': 'CTTXML' }, 'params': { 'skip_download': True, }, }, { 'url': 'http://closertotruth.com/interviews/1725', 'info_dict': { 'id': '1725', 'title': 'AyaFr-002', }, 'playlist_mincount': 2, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) partner_id = self._search_regex( r'<script[^>]+src=["\'].*?\b(?:partner_id|p)/(\d+)', webpage, 'kaltura partner_id') title = self._search_regex( r'<title>(.+?)\s*\|\s*.+?</title>', webpage, 'video title') select = self._search_regex( r'(?s)<select[^>]+id="select-version"[^>]*>(.+?)</select>', webpage, 'select version', default=None) if select: entry_ids = set() entries = [] for mobj in re.finditer( r'<option[^>]+value=(["\'])(?P<id>[0-9a-z_]+)(?:#.+?)?\1[^>]*>(?P<title>[^<]+)', webpage): entry_id = mobj.group('id') if entry_id in entry_ids: continue entry_ids.add(entry_id) entries.append({ '_type': 'url_transparent', 'url': 'kaltura:%s:%s' % (partner_id, entry_id), 'ie_key': 'Kaltura', 'title': mobj.group('title'), }) if entries: return self.playlist_result(entries, display_id, title) entry_id = self._search_regex( r'<a[^>]+id=(["\'])embed-kaltura\1[^>]+data-kaltura=(["\'])(?P<id>[0-9a-z_]+)\2', webpage, 'kaltura entry_id', group='id') return { '_type': 'url_transparent', 'display_id': display_id, 'url': 'kaltura:%s:%s' % (partner_id, entry_id), 'ie_key': 'Kaltura', 'title': title }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/golem.py
youtube_dl/extractor/golem.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( determine_ext, ) class GolemIE(InfoExtractor): _VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/' _TEST = { 'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html', 'md5': 'c1a2c0a3c863319651c7c992c5ee29bf', 'info_dict': { 'id': '14095', 'format_id': 'high', 'ext': 'mp4', 'title': 'iPhone 6 und 6 Plus - Test', 'duration': 300.44, 'filesize': 65309548, } } _PREFIX = 'http://video.golem.de' def _real_extract(self, url): video_id = self._match_id(url) config = self._download_xml( 'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id) info = { 'id': video_id, 'title': config.findtext('./title', 'golem'), 'duration': self._float(config.findtext('./playtime'), 'duration'), } formats = [] for e in config: url = e.findtext('./url') if not url: continue formats.append({ 'format_id': compat_str(e.tag), 'url': compat_urlparse.urljoin(self._PREFIX, url), 'height': self._int(e.get('height'), 'height'), 'width': self._int(e.get('width'), 'width'), 'filesize': self._int(e.findtext('filesize'), 'filesize'), 'ext': determine_ext(e.findtext('./filename')), }) self._sort_formats(formats) info['formats'] = formats thumbnails = [] for e in config.findall('.//teaser'): url = e.findtext('./url') if not url: continue thumbnails.append({ 'url': compat_urlparse.urljoin(self._PREFIX, url), 'width': self._int(e.get('width'), 'thumbnail width'), 'height': self._int(e.get('height'), 'thumbnail height'), }) info['thumbnails'] = thumbnails return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tfo.py
youtube_dl/extractor/tfo.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( HEADRequest, ExtractorError, int_or_none, clean_html, ) class TFOIE(InfoExtractor): _GEO_COUNTRIES = ['CA'] _VALID_URL = r'https?://(?:www\.)?tfo\.org/(?:en|fr)/(?:[^/]+/){2}(?P<id>\d+)' _TEST = { 'url': 'http://www.tfo.org/en/universe/tfo-247/100463871/video-game-hackathon', 'md5': 'cafbe4f47a8dae0ca0159937878100d6', 'info_dict': { 'id': '7da3d50e495c406b8fc0b997659cc075', 'ext': 'mp4', 'title': 'Video Game Hackathon', 'description': 'md5:558afeba217c6c8d96c60e5421795c07', } } def _real_extract(self, url): video_id = self._match_id(url) self._request_webpage(HEADRequest('http://www.tfo.org/'), video_id) infos = self._download_json( 'http://www.tfo.org/api/web/video/get_infos', video_id, data=json.dumps({ 'product_id': video_id, }).encode(), headers={ 'X-tfo-session': self._get_cookies('http://www.tfo.org/')['tfo-session'].value, }) if infos.get('success') == 0: if infos.get('code') == 'ErrGeoBlocked': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(infos['msg'])), expected=True) video_data = infos['data'] return { '_type': 'url_transparent', 'id': video_id, 'url': 'limelight:media:' + video_data['llid'], 'title': video_data['title'], 'description': video_data.get('description'), 'series': video_data.get('collection'), 'season_number': int_or_none(video_data.get('season')), 'episode_number': int_or_none(video_data.get('episode')), 'duration': int_or_none(video_data.get('duration')), 'ie_key': 'LimelightMedia', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/telewebion.py
youtube_dl/extractor/telewebion.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, url_or_none, ) class TelewebionIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?telewebion\.com/(episode|clip)/(?P<id>[a-zA-Z0-9]+)' _TEST = { 'url': 'http://www.telewebion.com/episode/0x1b3139c/', 'info_dict': { 'id': '0x1b3139c', 'ext': 'mp4', 'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا', 'thumbnail': r're:^https?://static\.telewebion\.com/episodeImages/.*/default', 'view_count': int, }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) episode_details = self._download_json('https://gateway.telewebion.ir/kandoo/episode/getEpisodeDetail/?EpisodeId={0}'.format(video_id), video_id) episode_details = episode_details['body']['queryEpisode'][0] channel_id = episode_details['channel']['descriptor'] episode_image_id = episode_details.get('image') episode_image = 'https://static.telewebion.com/episodeImages/{0}/default'.format(episode_image_id) if episode_image_id else None m3u8_url = 'https://cdna.telewebion.com/{0}/episode/{1}/playlist.m3u8'.format(channel_id, video_id) formats = self._extract_m3u8_formats( m3u8_url, video_id, ext='mp4', m3u8_id='hls', entry_protocol='m3u8_native') self._sort_formats(formats) return { 'id': video_id, 'title': episode_details['title'], 'formats': formats, 'thumbnail': url_or_none(episode_image), 'view_count': int_or_none(episode_details.get('view_count')), 'duration': float_or_none(episode_details.get('duration')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/kinopoisk.py
youtube_dl/extractor/kinopoisk.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( dict_get, int_or_none, ) class KinoPoiskIE(InfoExtractor): _GEO_COUNTRIES = ['RU'] _VALID_URL = r'https?://(?:www\.)?kinopoisk\.ru/film/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.kinopoisk.ru/film/81041/watch/', 'md5': '4f71c80baea10dfa54a837a46111d326', 'info_dict': { 'id': '81041', 'ext': 'mp4', 'title': 'Алеша попович и тугарин змей', 'description': 'md5:43787e673d68b805d0aa1df5a5aea701', 'thumbnail': r're:^https?://.*', 'duration': 4533, 'age_limit': 12, }, 'params': { 'format': 'bestvideo', }, }, { 'url': 'https://www.kinopoisk.ru/film/81041', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'https://ott-widget.kinopoisk.ru/v1/kp/', video_id, query={'kpId': video_id}) data = self._parse_json( self._search_regex( r'(?s)<script[^>]+\btype=["\']application/json[^>]+>(.+?)<', webpage, 'data'), video_id)['models'] film = data['filmStatus'] title = film.get('title') or film['originalTitle'] formats = self._extract_m3u8_formats( data['playlistEntity']['uri'], video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') self._sort_formats(formats) description = dict_get( film, ('descriptscription', 'description', 'shortDescriptscription', 'shortDescription')) thumbnail = film.get('coverUrl') or film.get('posterUrl') duration = int_or_none(film.get('duration')) age_limit = int_or_none(film.get('restrictionAge')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': age_limit, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/wat.py
youtube_dl/extractor/wat.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, try_get, unified_strdate, ) class WatIE(InfoExtractor): _VALID_URL = r'(?:wat:|https?://(?:www\.)?wat\.tv/video/.*-)(?P<id>[0-9a-z]+)' IE_NAME = 'wat.tv' _TESTS = [ { 'url': 'http://www.wat.tv/video/soupe-figues-l-orange-aux-epices-6z1uz_2hvf7_.html', 'info_dict': { 'id': '11713067', 'ext': 'mp4', 'title': 'Soupe de figues à l\'orange et aux épices', 'description': 'Retrouvez l\'émission "Petits plats en équilibre", diffusée le 18 août 2014.', 'upload_date': '20140819', 'duration': 120, }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['HTTP Error 404'], 'skip': 'This content is no longer available', }, { 'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html', 'md5': 'b16574df2c3cd1a36ca0098f2a791925', 'info_dict': { 'id': '11713075', 'ext': 'mp4', 'title': 'Grégory Lemarchal, une voix d\'ange depuis 10 ans (1/3)', 'upload_date': '20140816', }, 'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."], 'skip': 'This content is no longer available', }, ] _GEO_BYPASS = False def _real_extract(self, url): video_id = self._match_id(url) video_id = video_id if video_id.isdigit() and len(video_id) > 6 else compat_str(int(video_id, 36)) # 'contentv4' is used in the website, but it also returns the related # videos, we don't need them # video_data = self._download_json( # 'http://www.wat.tv/interface/contentv4s/' + video_id, video_id) video_data = self._download_json( 'https://mediainfo.tf1.fr/mediainfocombo/' + video_id, video_id, query={'context': 'MYTF1', 'pver': '4001000'}) video_info = video_data['media'] error_desc = video_info.get('error_desc') if error_desc: if video_info.get('error_code') == 'GEOBLOCKED': self.raise_geo_restricted(error_desc, video_info.get('geoList')) raise ExtractorError(error_desc, expected=True) title = video_info['title'] formats = [] def extract_formats(manifest_urls): for f, f_url in manifest_urls.items(): if not f_url: continue if f in ('dash', 'mpd'): formats.extend(self._extract_mpd_formats( f_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'), video_id, mpd_id='dash', fatal=False)) elif f == 'hls': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) delivery = video_data.get('delivery') or {} extract_formats({delivery.get('format'): delivery.get('url')}) if not formats: if delivery.get('drm'): raise ExtractorError('This video is DRM protected.', expected=True) manifest_urls = self._download_json( 'http://www.wat.tv/get/webhtml/' + video_id, video_id, fatal=False) if manifest_urls: extract_formats(manifest_urls) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': video_info.get('preview'), 'upload_date': unified_strdate(try_get( video_data, lambda x: x['mediametrie']['chapters'][0]['estatS4'])), 'duration': int_or_none(video_info.get('duration')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/medaltv.py
youtube_dl/extractor/medaltv.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, float_or_none, int_or_none, str_or_none, try_get, ) class MedalTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?medal\.tv/clips/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://medal.tv/clips/2mA60jWAGQCBH', 'md5': '7b07b064331b1cf9e8e5c52a06ae68fa', 'info_dict': { 'id': '2mA60jWAGQCBH', 'ext': 'mp4', 'title': 'Quad Cold', 'description': 'Medal,https://medal.tv/desktop/', 'uploader': 'MowgliSB', 'timestamp': 1603165266, 'upload_date': '20201020', 'uploader_id': '10619174', } }, { 'url': 'https://medal.tv/clips/2um24TWdty0NA', 'md5': 'b6dc76b78195fff0b4f8bf4a33ec2148', 'info_dict': { 'id': '2um24TWdty0NA', 'ext': 'mp4', 'title': 'u tk me i tk u bigger', 'description': 'Medal,https://medal.tv/desktop/', 'uploader': 'Mimicc', 'timestamp': 1605580939, 'upload_date': '20201117', 'uploader_id': '5156321', } }, { 'url': 'https://medal.tv/clips/37rMeFpryCC-9', 'only_matching': True, }, { 'url': 'https://medal.tv/clips/2WRj40tpY_EU9', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) hydration_data = self._parse_json(self._search_regex( r'<script[^>]*>\s*(?:var\s*)?hydrationData\s*=\s*({.+?})\s*</script>', webpage, 'hydration data', default='{}'), video_id) clip = try_get( hydration_data, lambda x: x['clips'][video_id], dict) or {} if not clip: raise ExtractorError( 'Could not find video information.', video_id=video_id) title = clip['contentTitle'] source_width = int_or_none(clip.get('sourceWidth')) source_height = int_or_none(clip.get('sourceHeight')) aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9 def add_item(container, item_url, height, id_key='format_id', item_id=None): item_id = item_id or '%dp' % height if item_id not in item_url: return width = int(round(aspect_ratio * height)) container.append({ 'url': item_url, id_key: item_id, 'width': width, 'height': height }) formats = [] thumbnails = [] for k, v in clip.items(): if not (v and isinstance(v, compat_str)): continue mobj = re.match(r'(contentUrl|thumbnail)(?:(\d+)p)?$', k) if not mobj: continue prefix = mobj.group(1) height = int_or_none(mobj.group(2)) if prefix == 'contentUrl': add_item( formats, v, height or source_height, item_id=None if height else 'source') elif prefix == 'thumbnail': add_item(thumbnails, v, height, 'id') error = clip.get('error') if not formats and error: if error == 404: raise ExtractorError( 'That clip does not exist.', expected=True, video_id=video_id) else: raise ExtractorError( 'An unknown error occurred ({0}).'.format(error), video_id=video_id) self._sort_formats(formats) # Necessary because the id of the author is not known in advance. # Won't raise an issue if no profile can be found as this is optional. author = try_get( hydration_data, lambda x: list(x['profiles'].values())[0], dict) or {} author_id = str_or_none(author.get('id')) author_url = 'https://medal.tv/users/{0}'.format(author_id) if author_id else None return { 'id': video_id, 'title': title, 'formats': formats, 'thumbnails': thumbnails, 'description': clip.get('contentDescription'), 'uploader': author.get('displayName'), 'timestamp': float_or_none(clip.get('created'), 1000), 'uploader_id': author_id, 'uploader_url': author_url, 'duration': int_or_none(clip.get('videoLengthSeconds')), 'view_count': int_or_none(clip.get('views')), 'like_count': int_or_none(clip.get('likes')), 'comment_count': int_or_none(clip.get('comments')), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/watchbox.py
youtube_dl/extractor/watchbox.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, js_to_json, strip_or_none, try_get, unescapeHTML, unified_timestamp, ) class WatchBoxIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?watchbox\.de/(?P<kind>serien|filme)/(?:[^/]+/)*[^/]+-(?P<id>\d+)' _TESTS = [{ # film 'url': 'https://www.watchbox.de/filme/free-jimmy-12325.html', 'info_dict': { 'id': '341368', 'ext': 'mp4', 'title': 'Free Jimmy', 'description': 'md5:bcd8bafbbf9dc0ef98063d344d7cc5f6', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 4890, 'age_limit': 16, 'release_year': 2009, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, 'expected_warnings': ['Failed to download m3u8 information'], }, { # episode 'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-1/date-in-der-hoelle-328286.html', 'info_dict': { 'id': '328286', 'ext': 'mp4', 'title': 'S01 E01 - Date in der Hölle', 'description': 'md5:2f31c74a8186899f33cb5114491dae2b', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1291, 'age_limit': 12, 'release_year': 2010, 'series': 'Ugly Americans', 'season_number': 1, 'episode': 'Date in der Hölle', 'episode_number': 1, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, 'expected_warnings': ['Failed to download m3u8 information'], }, { 'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-2/der-ring-des-powers-328270', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) kind, video_id = mobj.group('kind', 'id') webpage = self._download_webpage(url, video_id) player_config = self._parse_json( self._search_regex( r'data-player-conf=(["\'])(?P<data>{.+?})\1', webpage, 'player config', default='{}', group='data'), video_id, transform_source=unescapeHTML, fatal=False) if not player_config: player_config = self._parse_json( self._search_regex( r'playerConf\s*=\s*({.+?})\s*;', webpage, 'player config', default='{}'), video_id, transform_source=js_to_json, fatal=False) or {} source = player_config.get('source') or {} video_id = compat_str(source.get('videoId') or video_id) devapi = self._download_json( 'http://api.watchbox.de/devapi/id/%s' % video_id, video_id, query={ 'format': 'json', 'apikey': 'hbbtv', }, fatal=False) item = try_get(devapi, lambda x: x['items'][0], dict) or {} title = item.get('title') or try_get( item, lambda x: x['movie']['headline_movie'], compat_str) or source['title'] formats = [] hls_url = item.get('media_videourl_hls') or source.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( hls_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) dash_url = item.get('media_videourl_wv') or source.get('dash') if dash_url: formats.extend(self._extract_mpd_formats( dash_url, video_id, mpd_id='dash', fatal=False)) mp4_url = item.get('media_videourl') if mp4_url: formats.append({ 'url': mp4_url, 'format_id': 'mp4', 'width': int_or_none(item.get('width')), 'height': int_or_none(item.get('height')), 'tbr': int_or_none(item.get('bitrate')), }) self._sort_formats(formats) description = strip_or_none(item.get('descr')) thumbnail = item.get('media_content_thumbnail_large') or source.get('poster') or item.get('media_thumbnail') duration = int_or_none(item.get('media_length') or source.get('length')) timestamp = unified_timestamp(item.get('pubDate')) view_count = int_or_none(item.get('media_views')) age_limit = int_or_none(try_get(item, lambda x: x['movie']['fsk'])) release_year = int_or_none(try_get(item, lambda x: x['movie']['rel_year'])) info = { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'timestamp': timestamp, 'view_count': view_count, 'age_limit': age_limit, 'release_year': release_year, 'formats': formats, } if kind.lower() == 'serien': series = try_get( item, lambda x: x['special']['title'], compat_str) or source.get('format') season_number = int_or_none(self._search_regex( r'^S(\d{1,2})\s*E\d{1,2}', title, 'season number', default=None) or self._search_regex( r'/staffel-(\d+)/', url, 'season number', default=None)) episode = source.get('title') episode_number = int_or_none(self._search_regex( r'^S\d{1,2}\s*E(\d{1,2})', title, 'episode number', default=None)) info.update({ 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/defense.py
youtube_dl/extractor/defense.py
from __future__ import unicode_literals from .common import InfoExtractor class DefenseGouvFrIE(InfoExtractor): IE_NAME = 'defense.gouv.fr' _VALID_URL = r'https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)' _TEST = { 'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', 'md5': '75bba6124da7e63d2d60b5244ec9430c', 'info_dict': { 'id': '11213', 'ext': 'mp4', 'title': 'attaque-chimique-syrienne-du-21-aout-2013-1' } } def _real_extract(self, url): title = self._match_id(url) webpage = self._download_webpage(url, title) video_id = self._search_regex( r"flashvars.pvg_id=\"(\d+)\";", webpage, 'ID') json_url = ( 'http://static.videos.gouv.fr/brightcovehub/export/json/%s' % video_id) info = self._download_json(json_url, title, 'Downloading JSON config') video_url = info['renditions'][0]['url'] return { 'id': video_id, 'ext': 'mp4', 'url': video_url, 'title': title, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/slideshare.py
youtube_dl/extractor/slideshare.py
from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_urlparse, ) from ..utils import ( ExtractorError, get_element_by_id, ) class SlideshareIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)' _TEST = { 'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', 'info_dict': { 'id': '25665706', 'ext': 'mp4', 'title': 'Managing Scale and Complexity', 'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.', }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) page_title = mobj.group('title') webpage = self._download_webpage(url, page_title) slideshare_obj = self._search_regex( r'\$\.extend\(.*?slideshare_object,\s*(\{.*?\})\);', webpage, 'slideshare object') info = json.loads(slideshare_obj) if info['slideshow']['type'] != 'video': raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) doc = info['doc'] bucket = info['jsplayer']['video_bucket'] ext = info['jsplayer']['video_extension'] video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) description = get_element_by_id('slideshow-description-paragraph', webpage) or self._html_search_regex( r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>', webpage, 'description', fatal=False) return { '_type': 'video', 'id': info['slideshow']['id'], 'title': info['slideshow']['title'], 'ext': ext, 'url': video_url, 'thumbnail': info['slideshow']['pin_image_url'], 'description': description.strip() if description else None, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/anvato.py
youtube_dl/extractor/anvato.py
# coding: utf-8 from __future__ import unicode_literals import base64 import hashlib import json import random import re import time from .common import InfoExtractor from ..aes import aes_encrypt from ..compat import compat_str from ..utils import ( bytes_to_intlist, determine_ext, intlist_to_bytes, int_or_none, strip_jsonp, unescapeHTML, unsmuggle_url, ) def md5_text(s): if not isinstance(s, compat_str): s = compat_str(s) return hashlib.md5(s.encode('utf-8')).hexdigest() class AnvatoIE(InfoExtractor): _VALID_URL = r'anvato:(?P<access_key_or_mcp>[^:]+):(?P<id>\d+)' # Copied from anvplayer.min.js _ANVACK_TABLE = { 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ', 'nbcu_nbcd_desktop_web_qa_1a6f01bdd0dc45a439043b694c8a031d': 'eSxJUbA2UUKBTXryyQ2d6NuM8oEqaPySvaPzfKNA', 'nbcu_nbcd_desktop_web_acc_eb2ff240a5d4ae9a63d4c297c32716b6c523a129': '89JR3RtUGbvKuuJIiKOMK0SoarLb5MUx8v89RcbP', 'nbcu_nbcd_watchvod_web_prod_e61107507180976724ec8e8319fe24ba5b4b60e1': 'Uc7dFt7MJ9GsBWB5T7iPvLaMSOt8BBxv4hAXk5vv', 'nbcu_nbcd_watchvod_web_qa_42afedba88a36203db5a4c09a5ba29d045302232': 'T12oDYVFP2IaFvxkmYMy5dKxswpLHtGZa4ZAXEi7', 'nbcu_nbcd_watchvod_web_acc_9193214448e2e636b0ffb78abacfd9c4f937c6ca': 'MmobcxUxMedUpohNWwXaOnMjlbiyTOBLL6d46ZpR', 'nbcu_local_monitor_web_acc_f998ad54eaf26acd8ee033eb36f39a7b791c6335': 'QvfIoPYrwsjUCcASiw3AIkVtQob2LtJHfidp9iWg', 'nbcu_cable_monitor_web_acc_a413759603e8bedfcd3c61b14767796e17834077': 'uwVPJLShvJWSs6sWEIuVem7MTF8A4IknMMzIlFto', 'nbcu_nbcd_mcpstage_web_qa_4c43a8f6e95a88dbb40276c0630ba9f693a63a4e': 'PxVYZVwjhgd5TeoPRxL3whssb5OUPnM3zyAzq8GY', 'nbcu_comcast_comcast_web_prod_074080762ad4ce956b26b43fb22abf153443a8c4': 'afnaRZfDyg1Z3WZHdupKfy6xrbAG2MHqe3VfuSwh', 'nbcu_comcast_comcast_web_qa_706103bb93ead3ef70b1de12a0e95e3c4481ade0': 'DcjsVbX9b3uoPlhdriIiovgFQZVxpISZwz0cx1ZK', 'nbcu_comcast_comcastcable_web_prod_669f04817536743563d7331c9293e59fbdbe3d07': '0RwMN2cWy10qhAhOscq3eK7aEe0wqnKt3vJ0WS4D', 'nbcu_comcast_comcastcable_web_qa_3d9d2d66219094127f0f6b09cc3c7bb076e3e1ca': '2r8G9DEya7PCqBceKZgrn2XkXgASjwLMuaFE1Aad', 'hearst_hearst_demo_web_stage_960726dfef3337059a01a78816e43b29ec04dfc7': 'cuZBPXTR6kSdoTCVXwk5KGA8rk3NrgGn4H6e9Dsp', 'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922': 'IOaaLQ8ymqVyem14QuAvE5SndQynTcH5CrLkU2Ih', 'anvato_nextmedia_demo_web_stage_9787d56a02ff6b9f43e9a2b0920d8ca88beb5818': 'Pqu9zVzI1ApiIzbVA3VkGBEQHvdKSUuKpD6s2uaR', 'anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a': 'du1ccmn7RxzgizwbWU7hyUaGodNlJn7HtXI0WgXW', 'anvato_scripps_app_web_stage_360797e00fe2826be142155c4618cc52fce6c26c': '2PMrQ0BRoqCWl7nzphj0GouIMEh2mZYivAT0S1Su', 'fs2go_fs2go_go_all_prod_21934911ccfafc03a075894ead2260d11e2ddd24': 'RcuHlKikW2IJw6HvVoEkqq2UsuEJlbEl11pWXs4Q', 'fs2go_fs2go_go_web_prod_ead4b0eec7460c1a07783808db21b49cf1f2f9a7': '4K0HTT2u1zkQA2MaGaZmkLa1BthGSBdr7jllrhk5', 'fs2go_fs2go_go_web_stage_407585454a4400355d4391691c67f361': 'ftnc37VKRJBmHfoGGi3kT05bHyeJzilEzhKJCyl3', 'fs2go_fs2go_go_android_stage_44b714db6f8477f29afcba15a41e1d30': 'CtxpPvVpo6AbZGomYUhkKs7juHZwNml9b9J0J2gI', 'anvato_cbslocal_app_web_prod_547f3e49241ef0e5d30c79b2efbca5d92c698f67': 'Pw0XX5KBDsyRnPS0R2JrSrXftsy8Jnz5pAjaYC8s', 'anvato_cbslocal_app_web_stage_547a5f096594cd3e00620c6f825cad1096d28c80': '37OBUhX2uwNyKhhrNzSSNHSRPZpApC3trdqDBpuz', 'fs2go_att_att_web_prod_1042dddd089a05438b6a08f972941176f699ffd8': 'JLcF20JwYvpv6uAGcLWIaV12jKwaL1R8us4b6Zkg', 'fs2go_att_att_web_stage_807c5001955fc114a3331fe027ddc76e': 'gbu1oO1y0JiOFh4SUipt86P288JHpyjSqolrrT1x', 'fs2go_fs2go_tudor_web_prod_a7dd8e5a7cdc830cae55eae6f3e9fee5ee49eb9b': 'ipcp87VCEZXPPe868j3orLqzc03oTy7DXsGkAXXH', 'anvato_mhz_app_web_prod_b808218b30de7fdf60340cbd9831512bc1bf6d37': 'Stlm5Gs6BEhJLRTZHcNquyzxGqr23EuFmE5DCgjX', 'fs2go_charter_charter_web_stage_c2c6e5a68375a1bf00fff213d3ff8f61a835a54c': 'Lz4hbJp1fwL6jlcz4M2PMzghM4jp4aAmybtT5dPc', 'fs2go_charter_charter_web_prod_ebfe3b10f1af215a7321cd3d629e0b81dfa6fa8c': 'vUJsK345A1bVmyYDRhZX0lqFIgVXuqhmuyp1EtPK', 'anvato_epfox_app_web_prod_b3373168e12f423f41504f207000188daf88251b': 'GDKq1ixvX3MoBNdU5IOYmYa2DTUXYOozPjrCJnW7', 'anvato_epfox_app_web_stage_a3c2ce60f8f83ef374a88b68ee73a950f8ab87ce': '2jz2NH4BsXMaDsoJ5qkHMbcczAfIReo2eFYuVC1C', 'fs2go_verizon_verizon_web_stage_08e6df0354a4803f1b1f2428b5a9a382e8dbcd62': 'rKTVapNaAcmnUbGL4ZcuOoY4SE7VmZSQsblPFr7e', 'fs2go_verizon_verizon_web_prod_f909564cb606eff1f731b5e22e0928676732c445': 'qLSUuHerM3u9eNPzaHyUK52obai5MvE4XDJfqYe1', 'fs2go_foxcom_synd_web_stage_f7b9091f00ea25a4fdaaae77fca5b54cdc7e7043': '96VKF2vLd24fFiDfwPFpzM5llFN4TiIGAlodE0Re', 'fs2go_foxcom_synd_web_prod_0f2cdd64d87e4ab6a1d54aada0ff7a7c8387a064': 'agiPjbXEyEZUkbuhcnmVPhe9NNVbDjCFq2xkcx51', 'anvato_own_app_web_stage_1214ade5d28422c4dae9d03c1243aba0563c4dba': 'mzhamNac3swG4WsJAiUTacnGIODi6SWeVWk5D7ho', 'anvato_own_app_web_prod_944e162ed927ec3e9ed13eb68ed2f1008ee7565e': '9TSxh6G2TXOLBoYm9ro3LdNjjvnXpKb8UR8KoIP9', 'anvato_scripps_app_ftv_prod_a10a10468edd5afb16fb48171c03b956176afad1': 'COJ2i2UIPK7xZqIWswxe7FaVBOVgRkP1F6O6qGoH', 'anvato_scripps_app_ftv_stage_77d3ad2bdb021ec37ca2e35eb09acd396a974c9a': 'Q7nnopNLe2PPfGLOTYBqxSaRpl209IhqaEuDZi1F', 'anvato_univision_app_web_stage_551236ef07a0e17718c3995c35586b5ed8cb5031': 'D92PoLS6UitwxDRA191HUGT9OYcOjV6mPMa5wNyo', 'anvato_univision_app_web_prod_039a5c0a6009e637ae8ac906718a79911e0e65e1': '5mVS5u4SQjtw6NGw2uhMbKEIONIiLqRKck5RwQLR', 'nbcu_cnbc_springfield_ios_prod_670207fae43d6e9a94c351688851a2ce': 'M7fqCCIP9lW53oJbHs19OlJlpDrVyc2OL8gNeuTa', 'nbcu_cnbc_springfieldvod_ios_prod_7a5f04b1ceceb0e9c9e2264a44aa236e08e034c2': 'Yia6QbJahW0S7K1I0drksimhZb4UFq92xLBmmMvk', 'anvato_cox_app_web_prod_ce45cda237969f93e7130f50ee8bb6280c1484ab': 'cc0miZexpFtdoqZGvdhfXsLy7FXjRAOgb9V0f5fZ', 'anvato_cox_app_web_stage_c23dbe016a8e9d8c7101d10172b92434f6088bf9': 'yivU3MYHd2eDZcOfmLbINVtqxyecKTOp8OjOuoGJ', 'anvato_chnzero_app_web_stage_b1164d1352b579e792e542fddf13ee34c0eeb46b': 'A76QkXMmVH8lTCfU15xva1mZnSVcqeY4Xb22Kp7m', 'anvato_chnzero_app_web_prod_253d358928dc08ec161eda2389d53707288a730c': 'OA5QI3ZWZZkdtUEDqh28AH8GedsF6FqzJI32596b', 'anvato_discovery_vodpoc_web_stage_9fa7077b5e8af1f8355f65d4fb8d2e0e9d54e2b7': 'q3oT191tTQ5g3JCP67PkjLASI9s16DuWZ6fYmry3', 'anvato_discovery_vodpoc_web_prod_688614983167a1af6cdf6d76343fda10a65223c1': 'qRvRQCTVHd0VVOHsMvvfidyWmlYVrTbjby7WqIuK', 'nbcu_cnbc_springfieldvod_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua', 'nbcu_cnbc_springfield_ftv_stage_826040aad1925a46ac5dfb4b3c5143e648c6a30d': 'JQaSb5a8Tz0PT4ti329DNmzDO30TnngTHmvX8Vua', 'nbcu_nbcd_capture_web_stage_4dd9d585bfb984ebf856dee35db027b2465cc4ae': '0j1Ov4Vopyi2HpBZJYdL2m8ERJVGYh3nNpzPiO8F', 'nbcu_nbcd_watch3_android_prod_7712ca5fcf1c22f19ec1870a9650f9c37db22dcf': '3LN2UB3rPUAMu7ZriWkHky9vpLMXYha8JbSnxBlx', 'nbcu_nbcd_watchvod3_android_prod_0910a3a4692d57c0b5ff4316075bc5d096be45b9': 'mJagcQ2II30vUOAauOXne7ERwbf5S9nlB3IP17lQ', 'anvato_scripps_app_atv_prod_790deda22e16e71e83df58f880cd389908a45d52': 'CB6trI1mpoDIM5o54DNTsji90NDBQPZ4z4RqBNSH', 'nbcu_nbcd_watchv4_android_prod_ff67cef9cb409158c6f8c3533edddadd0b750507': 'j8CHQCUWjlYERj4NFRmUYOND85QNbHViH09UwuKm', 'nbcu_nbcd_watchvodv4_android_prod_a814d781609989dea6a629d50ae4c7ad8cc8e907': 'rkVnUXxdA9rawVLUlDQtMue9Y4Q7lFEaIotcUhjt', 'rvVKpA50qlOPLFxMjrCGf5pdkdQDm7qn': '1J7ZkY5Qz5lMLi93QOH9IveE7EYB3rLl', 'nbcu_dtv_local_web_prod_b266cf49defe255fd4426a97e27c09e513e9f82f': 'HuLnJDqzLa4saCzYMJ79zDRSQpEduw1TzjMNQu2b', 'nbcu_att_local_web_prod_4cef038b2d969a6b7d700a56a599040b6a619f67': 'Q0Em5VDc2KpydUrVwzWRXAwoNBulWUxCq2faK0AV', 'nbcu_dish_local_web_prod_c56dcaf2da2e9157a4266c82a78195f1dd570f6b': 'bC1LWmRz9ayj2AlzizeJ1HuhTfIaJGsDBnZNgoRg', 'nbcu_verizon_local_web_prod_88bebd2ce006d4ed980de8133496f9a74cb9b3e1': 'wzhDKJZpgvUSS1EQvpCQP8Q59qVzcPixqDGJefSk', 'nbcu_charter_local_web_prod_9ad90f7fc4023643bb718f0fe0fd5beea2382a50': 'PyNbxNhEWLzy1ZvWEQelRuIQY88Eub7xbSVRMdfT', 'nbcu_suddenlink_local_web_prod_20fb711725cac224baa1c1cb0b1c324d25e97178': '0Rph41lPXZbb3fqeXtHjjbxfSrNbtZp1Ygq7Jypa', 'nbcu_wow_local_web_prod_652d9ce4f552d9c2e7b5b1ed37b8cb48155174ad': 'qayIBZ70w1dItm2zS42AptXnxW15mkjRrwnBjMPv', 'nbcu_centurylink_local_web_prod_2034402b029bf3e837ad46814d9e4b1d1345ccd5': 'StePcPMkjsX51PcizLdLRMzxMEl5k2FlsMLUNV4k', 'nbcu_atlanticbrd_local_web_prod_8d5f5ecbf7f7b2f5e6d908dd75d90ae3565f682e': 'NtYLb4TFUS0pRs3XTkyO5sbVGYjVf17bVbjaGscI', 'nbcu_nbcd_watchvod_web_dev_08bc05699be47c4f31d5080263a8cfadc16d0f7c': 'hwxi2dgDoSWgfmVVXOYZm14uuvku4QfopstXckhr', 'anvato_nextmedia_app_web_prod_a4fa8c7204aa65e71044b57aaf63711980cfe5a0': 'tQN1oGPYY1nM85rJYePWGcIb92TG0gSqoVpQTWOw', 'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749': 'GUXNf5ZDX2jFUpu4WT2Go4DJ5nhUCzpnwDRRUx1K', 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa': 'bLDYF8JqfG42b7bwKEgQiU9E2LTIAtnKzSgYpFUH', 'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a': 'icgGoYGipQMMSEvhplZX1pwbN69srwKYWksz3xWK', 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336': 'fA2iQdI7RDpynqzQYIpXALVS83NTPr8LLFK4LFsu', 'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg', 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900': 'rMOUZqe9lwcGq2mNgG3EDusm6lKgsUnczoOX3mbg', 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99': 'P3uXJ0fXXditBPCGkfvlnVScpPEfKmc64Zv7ZgbK', 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe': 'mGPvo5ZA5SgjOFAPEPXv7AnOpFUICX8hvFQVz69n', 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582': 'qyT6PXXLjVNCrHaRVj0ugAhalNRS7Ee9BP7LUokD', 'nbcu_nbcd_watchvodv4_web_stage_4108362fba2d4ede21f262fea3c4162cbafd66c7': 'DhaU5lj0W2gEdcSSsnxURq8t7KIWtJfD966crVDk', 'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn', 'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W', 'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ', 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ', 'X8POa4zPPaKVZHqmWjuEzfP31b1QM9VN': 'Dn5vOY9ooDw7VSl9qztjZI5o0g08mA0z', 'M2v78QkBMpNJlSPp9diX5F2PBmBy6Bog': 'ka6K32kyo7nDZfNkjQCGWf1lpApXMd1B', 'bvJ0dQpav07l0hG5JgfVLF2dv1vARwpP': 'BzoQW24GrJZoJfmNodiJKSPeB9B8NOxj', 'lxQMLg2XZKuEZaWgsqubBxV9INZ6bryY': 'Vm2Mx6noKds9jB71h6urazwlTG3m9x8l', '04EnjvXeoSmkbJ9ckPs7oY0mcxv7PlyN': 'aXERQP9LMfQVlEDsgGs6eEA1SWznAQ8P', 'mQbO2ge6BFRWVPYCYpU06YvNt80XLvAX': 'E2BV1NGmasN5v7eujECVPJgwflnLPm2A', 'g43oeBzJrCml7o6fa5fRL1ErCdeD8z4K': 'RX34mZ6zVH4Nr6whbxIGLv9WSbxEKo8V', 'VQrDJoP7mtdBzkxhXbSPwGB1coeElk4x': 'j2VejQx0VFKQepAF7dI0mJLKtOVJE18z', 'WxA5NzLRjCrmq0NUgaU5pdMDuZO7RJ4w': 'lyY5ADLKaIOLEgAsGQCveEMAcqnx3rY9', 'M4lpMXB71ie0PjMCjdFzVXq0SeRVqz49': 'n2zVkOqaLIv3GbLfBjcwW51LcveWOZ2e', 'dyDZGEqN8u8nkJZcJns0oxYmtP7KbGAn': 'VXOEqQW9BtEVLajfZQSLEqxgS5B7qn2D', 'E7QNjrVY5u5mGvgu67IoDgV1CjEND8QR': 'rz8AaDmdKIkLmPNhB5ILPJnjS5PnlL8d', 'a4zrqjoKlfzg0dwHEWtP31VqcLBpjm4g': 'LY9J16gwETdGWa3hjBu5o0RzuoQDjqXQ', 'dQP5BZroMsMVLO1hbmT5r2Enu86GjxA6': '7XR3oOdbPF6x3PRFLDCq9RkgsRjAo48V', 'M4lKNBO1NFe0PjMCj1tzVXq0SeRVqzA9': 'n2zoRqGLRUv3GbLfBmTwW51LcveWOZYe', 'nAZ7MZdpGCGg1pqFEbsoJOz2C60mv143': 'dYJgdqA9aT4yojETqGi7yNgoFADxqmXP', '3y1MERYgOuE9NzbFgwhV6Wv2F0YKvbyz': '081xpZDQgC4VadLTavhWQxrku56DAgXV', 'bmQvmEXr5HWklBMCZOcpE2Z3HBYwqGyl': 'zxXPbVNyMiMAZldhr9FkOmA0fl4aKr2v', 'wA7oDNYldfr6050Hwxi52lPZiVlB86Ap': 'ZYK16aA7ni0d3l3c34uwpxD7CbReMm8Q', 'g43MbKMWmFml7o7sJoSRkXxZiXRvJ3QK': 'RX3oBJonvs4Nr6rUWBCGn3matRGqJPXV', 'mA9VdlqpLS0raGaSDvtoqNrBTzb8XY4q': '0XN4OjBD3fnW7r7IbmtJB4AyfOmlrE2r', 'mAajOwgkGt17oGoFmEuklMP9H0GnW54d': 'lXbBLPGyzikNGeGujAuAJGjZiwLRxyXR', 'vy8vjJ9kbUwrRqRu59Cj5dWZfzYErlAb': 'K8l7gpwaGcBpnAnCLNCmPZRdin3eaQX0', 'xQMWBpR8oHEZaWaSMGUb0avOHjLVYn4Y': 'm2MrN4vEaf9jB7BFy5Srb40jTrN67AYl', 'xyKEmVO3miRr6D6UVkt7oB8jtD6aJEAv': 'g2ddDebqDfqdgKgswyUKwGjbTWwzq923', '7Qk0wa2D9FjKapacoJF27aLvUDKkLGA0': 'b2kgBEkephJaMkMTL7s1PLe4Ua6WyP2P', '3QLg6nqmNTJ5VvVTo7f508LPidz1xwyY': 'g2L1GgpraipmAOAUqmIbBnPxHOmw4MYa', '3y1B7zZjXTE9NZNSzZSVNPZaTNLjo6Qz': '081b5G6wzH4VagaURmcWbN5mT4JGEe2V', 'lAqnwvkw6SG6D8DSqmUg6DRLUp0w3G4x': 'O2pbP0xPDFNJjpjIEvcdryOJtpkVM4X5', 'awA7xd1N0Hr6050Hw2c52lPZiVlB864p': 'GZYKpn4aoT0d3l3c3PiwpxD7CbReMmXQ', 'jQVqPLl9YHL1WGWtR1HDgWBGT63qRNyV': '6X03ne6vrU4oWyWUN7tQVoajikxJR3Ye', 'GQRMR8mL7uZK797t7xH3eNzPIP5dOny1': 'm2vqPWGd4U31zWzSyasDRAoMT1PKRp8o', 'zydq9RdmRhXLkNkfNoTJlMzaF0lWekQB': '3X7LnvE7vH5nkEkSqLiey793Un7dLB8e', 'VQrDzwkB2IdBzjzu9MHPbEYkSB50gR4x': 'j2VebLzoKUKQeEesmVh0gM1eIp9jKz8z', 'mAa2wMamBs17oGoFmktklMP9H0GnW54d': 'lXbgP74xZTkNGeGujVUAJGjZiwLRxy8R', '7yjB6ZLG6sW8R6RF2xcan1KGfJ5dNoyd': 'wXQkPorvPHZ45N5t4Jf6qwg5Tp4xvw29', 'a4zPpNeWGuzg0m0iX3tPeanGSkRKWXQg': 'LY9oa3QAyHdGW9Wu3Ri5JGeEik7l1N8Q', 'k2rneA2M38k25cXDwwSknTJlxPxQLZ6M': '61lyA2aEVDzklfdwmmh31saPxQx2VRjp', 'bK9Zk4OvPnvxduLgxvi8VUeojnjA02eV': 'o5jANYjbeMb4nfBaQvcLAt1jzLzYx6ze', '5VD6EydM3R9orHmNMGInGCJwbxbQvGRw': 'w3zjmX7g4vnxzCxElvUEOiewkokXprkZ', '70X35QbVYVYNPUmP9YfbzI06YqYQk2R1': 'vG4Aj2BMjMjoztB7zeFOnCVPJpJ8lMOa', '26qYwQVG9p1Bks2GgBckjfDJOXOAMgG1': 'r4ev9X0mv5zqJc0yk5IBDcQOwZw8mnwQ', 'rvVKpA56MBXWlSxMw3cobT5pdkd4Dm7q': '1J7ZkY53pZ645c93owcLZuveE7E8B3rL', 'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo': 'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo', 'jdKqRGF16dKsBviMDae7IGDl7oTjEbVV': 'Q09l7vhlNxPFErIOK6BVCe7KnwUW5DVV', '3QLkogW1OUJ5VvPsrDH56DY2u7lgZWyY': 'g2LRE1V9espmAOPhE4ubj4ZdUA57yDXa', 'wyJvWbXGBSdbkEzhv0CW8meou82aqRy8': 'M2wolPvyBIpQGkbT4juedD4ruzQGdK2y', '7QkdZrzEkFjKap6IYDU2PB0oCNZORmA0': 'b2kN1l96qhJaMkPs9dt1lpjBfwqZoA8P', 'pvA05113MHG1w3JTYxc6DVlRCjErVz4O': 'gQXeAbblBUnDJ7vujbHvbRd1cxlz3AXO', 'mA9blJDZwT0raG1cvkuoeVjLC7ZWd54q': '0XN9jRPwMHnW7rvumgfJZOD9CJgVkWYr', '5QwRN5qKJTvGKlDTmnf7xwNZcjRmvEy9': 'R2GP6LWBJU1QlnytwGt0B9pytWwAdDYy', 'eyn5rPPbkfw2KYxH32fG1q58CbLJzM40': 'p2gyqooZnS56JWeiDgfmOy1VugOQEBXn', '3BABn3b5RfPJGDwilbHe7l82uBoR05Am': '7OYZG7KMVhbPdKJS3xcWEN3AuDlLNmXj', 'xA5zNGXD3HrmqMlF6OS5pdMDuZO7RJ4w': 'yY5DAm6r1IOLE3BCVMFveEMAcqnx3r29', 'g43PgW3JZfml7o6fDEURL1ErCdeD8zyK': 'RX3aQn1zrS4Nr6whDgCGLv9WSbxEKo2V', 'lAqp8WbGgiG6D8LTKJcg3O72CDdre1Qx': 'O2pnm6473HNJjpKuVosd3vVeh975yrX5', 'wyJbYEDxKSdbkJ6S6RhW8meou82aqRy8': 'M2wPm7EgRSpQGlAh70CedD4ruzQGdKYy', 'M4lgW28nLCe0PVdtaXszVXq0SeRVqzA9': 'n2zmJvg4jHv3G0ETNgiwW51LcveWOZ8e', '5Qw3OVvp9FvGKlDTmOC7xwNZcjRmvEQ9': 'R2GzDdml9F1Qlnytw9s0B9pytWwAdD8y', 'vy8a98X7zCwrRqbHrLUjYzwDiK2b70Qb': 'K8lVwzyjZiBpnAaSGeUmnAgxuGOBxmY0', 'g4eGjJLLoiqRD3Pf9oT5O03LuNbLRDQp': '6XqD59zzpfN4EwQuaGt67qNpSyRBlnYy', 'g43OPp9boIml7o6fDOIRL1ErCdeD8z4K': 'RX33alNB4s4Nr6whDPUGLv9WSbxEKoXV', 'xA2ng9OkBcGKzDbTkKsJlx7dUK8R3dA5': 'z2aPnJvzBfObkwGC3vFaPxeBhxoMqZ8K', 'xyKEgBajZuRr6DEC0Kt7XpD1cnNW9gAv': 'g2ddlEBvRsqdgKaI4jUK9PrgfMexGZ23', 'BAogww51jIMa2JnH1BcYpXM5F658RNAL': 'rYWDmm0KptlkGv4FGJFMdZmjs9RDE6XR', 'BAokpg62VtMa2JnH1mHYpXM5F658RNAL': 'rYWryDnlNslkGv4FG4HMdZmjs9RDE62R', 'a4z1Px5e2hzg0m0iMMCPeanGSkRKWXAg': 'LY9eorNQGUdGW9WuKKf5JGeEik7l1NYQ', 'kAx69R58kF9nY5YcdecJdl2pFXP53WyX': 'gXyRxELpbfPvLeLSaRil0mp6UEzbZJ8L', 'BAoY13nwViMa2J2uo2cY6BlETgmdwryL': 'rYWwKzJmNFlkGvGtNoUM9bzwIJVzB1YR', } _MCP_TO_ACCESS_KEY_TABLE = { 'qa': 'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922', 'lin': 'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749', 'univison': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa', 'uni': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa', 'dev': 'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a', 'sps': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336', 'spsstg': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336', 'anv': 'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3', 'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900', 'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99', 'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe', 'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582' } _API_KEY = '3hwbSuqqT690uxjNYBktSQpa5ZrpYYR0Iofx7NcJHyA' _ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1' _AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce' _TESTS = [{ # from https://www.boston25news.com/news/watch-humpback-whale-breaches-right-next-to-fishing-boat-near-nh/817484874 'url': 'anvato:8v9BEynrwx8EFLYpgfOWcG1qJqyXKlRM:4465496', 'info_dict': { 'id': '4465496', 'ext': 'mp4', 'title': 'VIDEO: Humpback whale breaches right next to NH boat', 'description': 'VIDEO: Humpback whale breaches right next to NH boat. Footage courtesy: Zach Fahey.', 'duration': 22, 'timestamp': 1534855680, 'upload_date': '20180821', 'uploader': 'ANV', }, 'params': { 'skip_download': True, }, }, { # from https://sanfrancisco.cbslocal.com/2016/06/17/source-oakland-cop-on-leave-for-having-girlfriend-help-with-police-reports/ 'url': 'anvato:DVzl9QRzox3ZZsP9bNu5Li3X7obQOnqP:3417601', 'only_matching': True, }] def __init__(self, *args, **kwargs): super(AnvatoIE, self).__init__(*args, **kwargs) self.__server_time = None def _server_time(self, access_key, video_id): if self.__server_time is not None: return self.__server_time self.__server_time = int(self._download_json( self._api_prefix(access_key) + 'server_time?anvack=' + access_key, video_id, note='Fetching server time')['server_time']) return self.__server_time def _api_prefix(self, access_key): return 'https://tkx2-%s.anvato.net/rest/v2/' % ('prod' if 'prod' in access_key else 'stage') def _get_video_json(self, access_key, video_id): # See et() in anvplayer.min.js, which is an alias of getVideoJSON() video_data_url = self._api_prefix(access_key) + 'mcp/video/%s?anvack=%s' % (video_id, access_key) server_time = self._server_time(access_key, video_id) input_data = '%d~%s~%s' % (server_time, md5_text(video_data_url), md5_text(server_time)) auth_secret = intlist_to_bytes(aes_encrypt( bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY))) video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii') anvrid = md5_text(time.time() * 1000 * random.random())[:30] api = { 'anvrid': anvrid, 'anvts': server_time, } api['anvstk'] = md5_text('%s|%s|%d|%s' % ( access_key, anvrid, server_time, self._ANVACK_TABLE.get(access_key, self._API_KEY))) return self._download_json( video_data_url, video_id, transform_source=strip_jsonp, data=json.dumps({'api': api}).encode('utf-8')) def _get_anvato_videos(self, access_key, video_id): video_data = self._get_video_json(access_key, video_id) formats = [] for published_url in video_data['published_urls']: video_url = published_url['embed_url'] media_format = published_url.get('format') ext = determine_ext(video_url) if ext == 'smil' or media_format == 'smil': formats.extend(self._extract_smil_formats(video_url, video_id)) continue tbr = int_or_none(published_url.get('kbps')) a_format = { 'url': video_url, 'format_id': ('-'.join(filter(None, ['http', published_url.get('cdn_name')]))).lower(), 'tbr': tbr if tbr != 0 else None, } if media_format == 'm3u8' and tbr is not None: a_format.update({ 'format_id': '-'.join(filter(None, ['hls', compat_str(tbr)])), 'ext': 'mp4', }) elif media_format == 'm3u8-variant' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) continue elif ext == 'mp3' or media_format == 'mp3': a_format['vcodec'] = 'none' else: a_format.update({ 'width': int_or_none(published_url.get('width')), 'height': int_or_none(published_url.get('height')), }) formats.append(a_format) self._sort_formats(formats) subtitles = {} for caption in video_data.get('captions', []): a_caption = { 'url': caption['url'], 'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None } subtitles.setdefault(caption['language'], []).append(a_caption) return { 'id': video_id, 'formats': formats, 'title': video_data.get('def_title'), 'description': video_data.get('def_description'), 'tags': video_data.get('def_tags', '').split(','), 'categories': video_data.get('categories'), 'thumbnail': video_data.get('src_image_url') or video_data.get('thumbnail'), 'timestamp': int_or_none(video_data.get( 'ts_published') or video_data.get('ts_added')), 'uploader': video_data.get('mcp_id'), 'duration': int_or_none(video_data.get('duration')), 'subtitles': subtitles, } @staticmethod def _extract_urls(ie, webpage, video_id): entries = [] for mobj in re.finditer(AnvatoIE._ANVP_RE, webpage): anvplayer_data = ie._parse_json( mobj.group('anvp'), video_id, transform_source=unescapeHTML, fatal=False) if not anvplayer_data: continue video = anvplayer_data.get('video') if not isinstance(video, compat_str) or not video.isdigit(): continue access_key = anvplayer_data.get('accessKey') if not access_key: mcp = anvplayer_data.get('mcp') if mcp: access_key = AnvatoIE._MCP_TO_ACCESS_KEY_TABLE.get( mcp.lower()) if not access_key: continue entries.append(ie.url_result( 'anvato:%s:%s' % (access_key, video), ie=AnvatoIE.ie_key(), video_id=video)) return entries def _extract_anvato_videos(self, webpage, video_id): anvplayer_data = self._parse_json( self._html_search_regex( self._ANVP_RE, webpage, 'Anvato player data', group='anvp'), video_id) return self._get_anvato_videos( anvplayer_data['accessKey'], anvplayer_data['video']) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) mobj = re.match(self._VALID_URL, url) access_key, video_id = mobj.group('access_key_or_mcp', 'id') if access_key not in self._ANVACK_TABLE: access_key = self._MCP_TO_ACCESS_KEY_TABLE.get( access_key) or access_key return self._get_anvato_videos(access_key, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/asiancrush.py
youtube_dl/extractor/asiancrush.py
# coding: utf-8 from __future__ import unicode_literals import functools import re from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( extract_attributes, int_or_none, OnDemandPagedList, parse_age_limit, strip_or_none, try_get, ) class AsianCrushBaseIE(InfoExtractor): _VALID_URL_BASE = r'https?://(?:www\.)?(?P<host>(?:(?:asiancrush|yuyutv|midnightpulp)\.com|(?:cocoro|retrocrush)\.tv))' _KALTURA_KEYS = [ 'video_url', 'progressive_url', 'download_url', 'thumbnail_url', 'widescreen_thumbnail_url', 'screencap_widescreen', ] _API_SUFFIX = {'retrocrush.tv': '-ott'} def _call_api(self, host, endpoint, video_id, query, resource): return self._download_json( 'https://api%s.%s/%s' % (self._API_SUFFIX.get(host, ''), host, endpoint), video_id, 'Downloading %s JSON metadata' % resource, query=query, headers=self.geo_verification_headers())['objects'] def _download_object_data(self, host, object_id, resource): return self._call_api( host, 'search', object_id, {'id': object_id}, resource)[0] def _get_object_description(self, obj): return strip_or_none(obj.get('long_description') or obj.get('short_description')) def _parse_video_data(self, video): title = video['name'] entry_id, partner_id = [None] * 2 for k in self._KALTURA_KEYS: k_url = video.get(k) if k_url: mobj = re.search(r'/p/(\d+)/.+?/entryId/([^/]+)/', k_url) if mobj: partner_id, entry_id = mobj.groups() break meta_categories = try_get(video, lambda x: x['meta']['categories'], list) or [] categories = list(filter(None, [c.get('name') for c in meta_categories])) show_info = video.get('show_info') or {} return { '_type': 'url_transparent', 'url': 'kaltura:%s:%s' % (partner_id, entry_id), 'ie_key': KalturaIE.ie_key(), 'id': entry_id, 'title': title, 'description': self._get_object_description(video), 'age_limit': parse_age_limit(video.get('mpaa_rating') or video.get('tv_rating')), 'categories': categories, 'series': show_info.get('show_name'), 'season_number': int_or_none(show_info.get('season_num')), 'season_id': show_info.get('season_id'), 'episode_number': int_or_none(show_info.get('episode_num')), } class AsianCrushIE(AsianCrushBaseIE): _VALID_URL = r'%s/video/(?:[^/]+/)?0+(?P<id>\d+)v\b' % AsianCrushBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://www.asiancrush.com/video/004289v/women-who-flirt', 'md5': 'c3b740e48d0ba002a42c0b72857beae6', 'info_dict': { 'id': '1_y4tmjm5r', 'ext': 'mp4', 'title': 'Women Who Flirt', 'description': 'md5:b65c7e0ae03a85585476a62a186f924c', 'timestamp': 1496936429, 'upload_date': '20170608', 'uploader_id': 'craig@crifkin.com', 'age_limit': 13, 'categories': 'count:5', 'duration': 5812, }, }, { 'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/', 'only_matching': True, }, { 'url': 'https://www.yuyutv.com/video/013886v/the-act-of-killing/', 'only_matching': True, }, { 'url': 'https://www.yuyutv.com/video/peep-show/013922v-warring-factions/', 'only_matching': True, }, { 'url': 'https://www.midnightpulp.com/video/010400v/drifters/', 'only_matching': True, }, { 'url': 'https://www.midnightpulp.com/video/mononoke/016378v-zashikiwarashi-part-1/', 'only_matching': True, }, { 'url': 'https://www.cocoro.tv/video/the-wonderful-wizard-of-oz/008878v-the-wonderful-wizard-of-oz-ep01/', 'only_matching': True, }, { 'url': 'https://www.retrocrush.tv/video/true-tears/012328v-i...gave-away-my-tears', 'only_matching': True, }] def _real_extract(self, url): host, video_id = re.match(self._VALID_URL, url).groups() if host == 'cocoro.tv': webpage = self._download_webpage(url, video_id) embed_vars = self._parse_json(self._search_regex( r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars', default='{}'), video_id, fatal=False) or {} video_id = embed_vars.get('entry_id') or video_id video = self._download_object_data(host, video_id, 'video') return self._parse_video_data(video) class AsianCrushPlaylistIE(AsianCrushBaseIE): _VALID_URL = r'%s/series/0+(?P<id>\d+)s\b' % AsianCrushBaseIE._VALID_URL_BASE _TESTS = [{ 'url': 'https://www.asiancrush.com/series/006447s/fruity-samurai', 'info_dict': { 'id': '6447', 'title': 'Fruity Samurai', 'description': 'md5:7535174487e4a202d3872a7fc8f2f154', }, 'playlist_count': 13, }, { 'url': 'https://www.yuyutv.com/series/013920s/peep-show/', 'only_matching': True, }, { 'url': 'https://www.midnightpulp.com/series/016375s/mononoke/', 'only_matching': True, }, { 'url': 'https://www.cocoro.tv/series/008549s/the-wonderful-wizard-of-oz/', 'only_matching': True, }, { 'url': 'https://www.retrocrush.tv/series/012355s/true-tears', 'only_matching': True, }] _PAGE_SIZE = 1000000000 def _fetch_page(self, domain, parent_id, page): videos = self._call_api( domain, 'getreferencedobjects', parent_id, { 'max': self._PAGE_SIZE, 'object_type': 'video', 'parent_id': parent_id, 'start': page * self._PAGE_SIZE, }, 'page %d' % (page + 1)) for video in videos: yield self._parse_video_data(video) def _real_extract(self, url): host, playlist_id = re.match(self._VALID_URL, url).groups() if host == 'cocoro.tv': webpage = self._download_webpage(url, playlist_id) entries = [] for mobj in re.finditer( r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL, webpage): attrs = extract_attributes(mobj.group(0)) if attrs.get('class') == 'clearfix': entries.append(self.url_result( mobj.group('url'), ie=AsianCrushIE.ie_key())) title = self._html_search_regex( r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage, 'title', default=None) or self._og_search_title( webpage, default=None) or self._html_search_meta( 'twitter:title', webpage, 'title', default=None) or self._search_regex( r'<title>([^<]+)</title>', webpage, 'title', fatal=False) if title: title = re.sub(r'\s*\|\s*.+?$', '', title) description = self._og_search_description( webpage, default=None) or self._html_search_meta( 'twitter:description', webpage, 'description', fatal=False) else: show = self._download_object_data(host, playlist_id, 'show') title = show.get('name') description = self._get_object_description(show) entries = OnDemandPagedList( functools.partial(self._fetch_page, host, playlist_id), self._PAGE_SIZE) return self.playlist_result(entries, playlist_id, title, description)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/melonvod.py
youtube_dl/extractor/melonvod.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, urljoin, ) class MelonVODIE(InfoExtractor): _VALID_URL = r'https?://vod\.melon\.com/video/detail2\.html?\?.*?mvId=(?P<id>[0-9]+)' _TEST = { 'url': 'http://vod.melon.com/video/detail2.htm?mvId=50158734', 'info_dict': { 'id': '50158734', 'ext': 'mp4', 'title': "Jessica 'Wonderland' MV Making Film", 'thumbnail': r're:^https?://.*\.jpg$', 'artist': 'Jessica (제시카)', 'upload_date': '20161212', 'duration': 203, }, 'params': { 'skip_download': 'm3u8 download', } } def _real_extract(self, url): video_id = self._match_id(url) play_info = self._download_json( 'http://vod.melon.com/video/playerInfo.json', video_id, note='Downloading player info JSON', query={'mvId': video_id}) title = play_info['mvInfo']['MVTITLE'] info = self._download_json( 'http://vod.melon.com/delivery/streamingInfo.json', video_id, note='Downloading streaming info JSON', query={ 'contsId': video_id, 'contsType': 'VIDEO', }) stream_info = info['streamingInfo'] formats = self._extract_m3u8_formats( stream_info['encUrl'], video_id, 'mp4', m3u8_id='hls') self._sort_formats(formats) artist_list = play_info.get('artistList') artist = None if isinstance(artist_list, list): artist = ', '.join( [a['ARTISTNAMEWEBLIST'] for a in artist_list if a.get('ARTISTNAMEWEBLIST')]) thumbnail = urljoin(info.get('staticDomain'), stream_info.get('imgPath')) duration = int_or_none(stream_info.get('playTime')) upload_date = stream_info.get('mvSvcOpenDt', '')[:8] or None return { 'id': video_id, 'title': title, 'artist': artist, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'formats': formats }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/popcorntimes.py
youtube_dl/extractor/popcorntimes.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_b64decode, compat_chr, ) from ..utils import int_or_none class PopcorntimesIE(InfoExtractor): _VALID_URL = r'https?://popcorntimes\.tv/[^/]+/m/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)' _TEST = { 'url': 'https://popcorntimes.tv/de/m/A1XCFvz/haensel-und-gretel-opera-fantasy', 'md5': '93f210991ad94ba8c3485950a2453257', 'info_dict': { 'id': 'A1XCFvz', 'display_id': 'haensel-und-gretel-opera-fantasy', 'ext': 'mp4', 'title': 'Hänsel und Gretel', 'description': 'md5:1b8146791726342e7b22ce8125cf6945', 'thumbnail': r're:^https?://.*\.jpg$', 'creator': 'John Paul', 'release_date': '19541009', 'duration': 4260, 'tbr': 5380, 'width': 720, 'height': 540, }, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, display_id = mobj.group('id', 'display_id') webpage = self._download_webpage(url, display_id) title = self._search_regex( r'<h1>([^<]+)', webpage, 'title', default=None) or self._html_search_meta( 'ya:ovs:original_name', webpage, 'title', fatal=True) loc = self._search_regex( r'PCTMLOC\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'loc', group='value') loc_b64 = '' for c in loc: c_ord = ord(c) if ord('a') <= c_ord <= ord('z') or ord('A') <= c_ord <= ord('Z'): upper = ord('Z') if c_ord <= ord('Z') else ord('z') c_ord += 13 if upper < c_ord: c_ord -= 26 loc_b64 += compat_chr(c_ord) video_url = compat_b64decode(loc_b64).decode('utf-8') description = self._html_search_regex( r'(?s)<div[^>]+class=["\']pt-movie-desc[^>]+>(.+?)</div>', webpage, 'description', fatal=False) thumbnail = self._search_regex( r'<img[^>]+class=["\']video-preview[^>]+\bsrc=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'thumbnail', default=None, group='value') or self._og_search_thumbnail(webpage) creator = self._html_search_meta( 'video:director', webpage, 'creator', default=None) release_date = self._html_search_meta( 'video:release_date', webpage, default=None) if release_date: release_date = release_date.replace('-', '') def int_meta(name): return int_or_none(self._html_search_meta( name, webpage, default=None)) return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'creator': creator, 'release_date': release_date, 'duration': int_meta('video:duration'), 'tbr': int_meta('ya:ovs:bitrate'), 'width': int_meta('og:video:width'), 'height': int_meta('og:video:height'), 'http_headers': { 'Referer': url, }, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/footyroom.py
youtube_dl/extractor/footyroom.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from .streamable import StreamableIE class FootyRoomIE(InfoExtractor): _VALID_URL = r'https?://footyroom\.com/matches/(?P<id>\d+)' _TESTS = [{ 'url': 'http://footyroom.com/matches/79922154/hull-city-vs-chelsea/review', 'info_dict': { 'id': '79922154', 'title': 'VIDEO Hull City 0 - 2 Chelsea', }, 'playlist_count': 2, 'add_ie': [StreamableIE.ie_key()], }, { 'url': 'http://footyroom.com/matches/75817984/georgia-vs-germany/review', 'info_dict': { 'id': '75817984', 'title': 'VIDEO Georgia 0 - 2 Germany', }, 'playlist_count': 1, 'add_ie': ['Playwire'] }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) playlist = self._parse_json(self._search_regex( r'DataStore\.media\s*=\s*([^;]+)', webpage, 'media data'), playlist_id) playlist_title = self._og_search_title(webpage) entries = [] for video in playlist: payload = video.get('payload') if not payload: continue playwire_url = self._html_search_regex( r'data-config="([^"]+)"', payload, 'playwire url', default=None) if playwire_url: entries.append(self.url_result(self._proto_relative_url( playwire_url, 'http:'), 'Playwire')) streamable_url = StreamableIE._extract_url(payload) if streamable_url: entries.append(self.url_result( streamable_url, StreamableIE.ie_key())) return self.playlist_result(entries, playlist_id, playlist_title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/internazionale.py
youtube_dl/extractor/internazionale.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unified_timestamp class InternazionaleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?internazionale\.it/video/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.internazionale.it/video/2015/02/19/richard-linklater-racconta-una-scena-di-boyhood', 'md5': '3e39d32b66882c1218e305acbf8348ca', 'info_dict': { 'id': '265968', 'display_id': 'richard-linklater-racconta-una-scena-di-boyhood', 'ext': 'mp4', 'title': 'Richard Linklater racconta una scena di Boyhood', 'description': 'md5:efb7e5bbfb1a54ae2ed5a4a015f0e665', 'timestamp': 1424354635, 'upload_date': '20150219', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'format': 'bestvideo', }, }, { 'url': 'https://www.internazionale.it/video/2018/08/29/telefono-stare-con-noi-stessi', 'md5': '9db8663704cab73eb972d1cee0082c79', 'info_dict': { 'id': '761344', 'display_id': 'telefono-stare-con-noi-stessi', 'ext': 'mp4', 'title': 'Usiamo il telefono per evitare di stare con noi stessi', 'description': 'md5:75ccfb0d6bcefc6e7428c68b4aa1fe44', 'timestamp': 1535528954, 'upload_date': '20180829', 'thumbnail': r're:^https?://.*\.jpg$', }, 'params': { 'format': 'bestvideo', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) DATA_RE = r'data-%s=(["\'])(?P<value>(?:(?!\1).)+)\1' title = self._search_regex( DATA_RE % 'video-title', webpage, 'title', default=None, group='value') or self._og_search_title(webpage) video_id = self._search_regex( DATA_RE % 'job-id', webpage, 'video id', group='value') video_path = self._search_regex( DATA_RE % 'video-path', webpage, 'video path', group='value') video_available_abroad = self._search_regex( DATA_RE % 'video-available_abroad', webpage, 'video available aboard', default='1', group='value') video_available_abroad = video_available_abroad == '1' video_base = 'https://video%s.internazionale.it/%s/%s.' % \ ('' if video_available_abroad else '-ita', video_path, video_id) formats = self._extract_m3u8_formats( video_base + 'm3u8', display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) formats.extend(self._extract_mpd_formats( video_base + 'mpd', display_id, mpd_id='dash', fatal=False)) self._sort_formats(formats) timestamp = unified_timestamp(self._html_search_meta( 'article:published_time', webpage, 'timestamp')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage), 'description': self._og_search_description(webpage), 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/noovo.py
youtube_dl/extractor/noovo.py
# coding: utf-8 from __future__ import unicode_literals from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, js_to_json, smuggle_url, try_get, ) class NoovoIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?noovo\.ca/videos/(?P<id>[^/]+/[^/?#&]+)' _TESTS = [{ # clip 'url': 'http://noovo.ca/videos/rpm-plus/chrysler-imperial', 'info_dict': { 'id': '5386045029001', 'ext': 'mp4', 'title': 'Chrysler Imperial', 'description': 'md5:de3c898d1eb810f3e6243e08c8b4a056', 'timestamp': 1491399228, 'upload_date': '20170405', 'uploader_id': '618566855001', 'series': 'RPM+', }, 'params': { 'skip_download': True, }, }, { # episode 'url': 'http://noovo.ca/videos/l-amour-est-dans-le-pre/episode-13-8', 'info_dict': { 'id': '5395865725001', 'title': 'Épisode 13 : Les retrouvailles', 'description': 'md5:888c3330f0c1b4476c5bc99a1c040473', 'ext': 'mp4', 'timestamp': 1492019320, 'upload_date': '20170412', 'uploader_id': '618566855001', 'series': "L'amour est dans le pré", 'season_number': 5, 'episode': 'Épisode 13', 'episode_number': 13, }, 'params': { 'skip_download': True, }, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/618566855001/default_default/index.html?videoId=%s' def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) brightcove_id = self._search_regex( r'data-video-id=["\'](\d+)', webpage, 'brightcove id') data = self._parse_json( self._search_regex( r'(?s)dataLayer\.push\(\s*({.+?})\s*\);', webpage, 'data', default='{}'), video_id, transform_source=js_to_json, fatal=False) title = try_get( data, lambda x: x['video']['nom'], compat_str) or self._html_search_meta( 'dcterms.Title', webpage, 'title', fatal=True) description = self._html_search_meta( ('dcterms.Description', 'description'), webpage, 'description') series = try_get( data, lambda x: x['emission']['nom']) or self._search_regex( r'<div[^>]+class="banner-card__subtitle h4"[^>]*>([^<]+)', webpage, 'series', default=None) season_el = try_get(data, lambda x: x['emission']['saison'], dict) or {} season = try_get(season_el, lambda x: x['nom'], compat_str) season_number = int_or_none(try_get(season_el, lambda x: x['numero'])) episode_el = try_get(season_el, lambda x: x['episode'], dict) or {} episode = try_get(episode_el, lambda x: x['nom'], compat_str) episode_number = int_or_none(try_get(episode_el, lambda x: x['numero'])) return { '_type': 'url_transparent', 'ie_key': BrightcoveNewIE.ie_key(), 'url': smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': ['CA']}), 'id': brightcove_id, 'title': title, 'description': description, 'series': series, 'season': season, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/maoritv.py
youtube_dl/extractor/maoritv.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class MaoriTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?maoritelevision\.com/shows/(?:[^/]+/)+(?P<id>[^/?&#]+)' _TEST = { 'url': 'https://www.maoritelevision.com/shows/korero-mai/S01E054/korero-mai-series-1-episode-54', 'md5': '5ade8ef53851b6a132c051b1cd858899', 'info_dict': { 'id': '4774724855001', 'ext': 'mp4', 'title': 'Kōrero Mai, Series 1 Episode 54', 'upload_date': '20160226', 'timestamp': 1456455018, 'description': 'md5:59bde32fd066d637a1a55794c56d8dcb', 'uploader_id': '1614493167001', }, } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1614493167001/HJlhIQhQf_default/index.html?videoId=%s' def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) brightcove_id = self._search_regex( r'data-main-video-id=["\'](\d+)', webpage, 'brightcove id') return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bokecc.py
youtube_dl/extractor/bokecc.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_parse_qs from ..utils import ExtractorError class BokeCCBaseIE(InfoExtractor): def _extract_bokecc_formats(self, webpage, video_id, format_id=None): player_params_str = self._html_search_regex( r'<(?:script|embed)[^>]+src=(?P<q>["\'])(?:https?:)?//p\.bokecc\.com/(?:player|flash/player\.swf)\?(?P<query>.+?)(?P=q)', webpage, 'player params', group='query') player_params = compat_parse_qs(player_params_str) info_xml = self._download_xml( 'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % ( player_params['siteid'][0], player_params['vid'][0]), video_id) formats = [{ 'format_id': format_id, 'url': quality.find('./copy').attrib['playurl'], 'preference': int(quality.attrib['value']), } for quality in info_xml.findall('./video/quality')] self._sort_formats(formats) return formats class BokeCCIE(BokeCCBaseIE): IE_DESC = 'CC视频' _VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)' _TESTS = [{ 'url': 'http://union.bokecc.com/playvideo.bo?vid=E0ABAE9D4F509B189C33DC5901307461&uid=FE644790DE9D154A', 'info_dict': { 'id': 'FE644790DE9D154A_E0ABAE9D4F509B189C33DC5901307461', 'ext': 'flv', 'title': 'BokeCC Video', }, }] def _real_extract(self, url): qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query')) if not qs.get('vid') or not qs.get('uid'): raise ExtractorError('Invalid URL', expected=True) video_id = '%s_%s' % (qs['uid'][0], qs['vid'][0]) webpage = self._download_webpage(url, video_id) return { 'id': video_id, 'title': 'BokeCC Video', # no title provided in the webpage 'formats': self._extract_bokecc_formats(webpage, video_id), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/aljazeera.py
youtube_dl/extractor/aljazeera.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor class AlJazeeraIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?aljazeera\.com/(?P<type>program/[^/]+|(?:feature|video)s)/\d{4}/\d{1,2}/\d{1,2}/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.aljazeera.com/program/episode/2014/9/19/deliverance', 'info_dict': { 'id': '3792260579001', 'ext': 'mp4', 'title': 'The Slum - Episode 1: Deliverance', 'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', 'uploader_id': '665003303001', 'timestamp': 1411116829, 'upload_date': '20140919', }, 'add_ie': ['BrightcoveNew'], 'skip': 'Not accessible from Travis CI server', }, { 'url': 'https://www.aljazeera.com/videos/2017/5/11/sierra-leone-709-carat-diamond-to-be-auctioned-off', 'only_matching': True, }, { 'url': 'https://www.aljazeera.com/features/2017/8/21/transforming-pakistans-buses-into-art', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' def _real_extract(self, url): post_type, name = re.match(self._VALID_URL, url).groups() post_type = { 'features': 'post', 'program': 'episode', 'videos': 'video', }[post_type.split('/')[0]] video = self._download_json( 'https://www.aljazeera.com/graphql', name, query={ 'operationName': 'SingleArticleQuery', 'variables': json.dumps({ 'name': name, 'postType': post_type, }), }, headers={ 'wp-site': 'aje', })['data']['article']['video'] video_id = video['id'] account_id = video.get('accountId') or '665003303001' player_id = video.get('playerId') or 'BkeSH5BDb' return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id), 'BrightcoveNew', video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/atvat.py
youtube_dl/extractor/atvat.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, unescapeHTML, ) class ATVAtIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?atv\.at/(?:[^/]+/){2}(?P<id>[dv]\d+)' _TESTS = [{ 'url': 'http://atv.at/aktuell/di-210317-2005-uhr/v1698449/', 'md5': 'c3b6b975fb3150fc628572939df205f2', 'info_dict': { 'id': '1698447', 'ext': 'mp4', 'title': 'DI, 21.03.17 | 20:05 Uhr 1/1', } }, { 'url': 'http://atv.at/aktuell/meinrad-knapp/d8416/', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_data = self._parse_json(unescapeHTML(self._search_regex( [r'flashPlayerOptions\s*=\s*(["\'])(?P<json>(?:(?!\1).)+)\1', r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="(?P<json>[^"]+)"'], webpage, 'player data', group='json')), display_id)['config']['initial_video'] video_id = video_data['id'] video_title = video_data['title'] parts = [] for part in video_data.get('parts', []): part_id = part['id'] part_title = part['title'] formats = [] for source in part.get('sources', []): source_url = source.get('src') if not source_url: continue ext = determine_ext(source_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_url, part_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'format_id': source.get('delivery'), 'url': source_url, }) self._sort_formats(formats) parts.append({ 'id': part_id, 'title': part_title, 'thumbnail': part.get('preview_image_url'), 'duration': int_or_none(part.get('duration')), 'is_live': part.get('is_livestream'), 'formats': formats, }) return { '_type': 'multi_video', 'id': video_id, 'title': video_title, 'entries': parts, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/radiode.py
youtube_dl/extractor/radiode.py
from __future__ import unicode_literals from .common import InfoExtractor class RadioDeIE(InfoExtractor): IE_NAME = 'radio.de' _VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)' _TEST = { 'url': 'http://ndr2.radio.de/', 'info_dict': { 'id': 'ndr2', 'ext': 'mp3', 'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:591c49c702db1a33751625ebfb67f273', 'thumbnail': r're:^https?://.*\.png', 'is_live': True, }, 'params': { 'skip_download': True, } } def _real_extract(self, url): radio_id = self._match_id(url) webpage = self._download_webpage(url, radio_id) jscode = self._search_regex( r"'components/station/stationService':\s*\{\s*'?station'?:\s*(\{.*?\s*\}),\n", webpage, 'broadcast') broadcast = self._parse_json(jscode, radio_id) title = self._live_title(broadcast['name']) description = broadcast.get('description') or broadcast.get('shortDescription') thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl') or broadcast.get('logo100x100') formats = [{ 'url': stream['streamUrl'], 'ext': stream['streamContentFormat'].lower(), 'acodec': stream['streamContentFormat'], 'abr': stream['bitRate'], 'asr': stream['sampleRate'] } for stream in broadcast['streamUrls']] self._sort_formats(formats) return { 'id': radio_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rockstargames.py
youtube_dl/extractor/rockstargames.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, ) class RockstarGamesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rockstargames\.com/videos(?:/video/|#?/?\?.*\bvideo=)(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.rockstargames.com/videos/video/11544/', 'md5': '03b5caa6e357a4bd50e3143fc03e5733', 'info_dict': { 'id': '11544', 'ext': 'mp4', 'title': 'Further Adventures in Finance and Felony Trailer', 'description': 'md5:6d31f55f30cb101b5476c4a379e324a3', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1464876000, 'upload_date': '20160602', } }, { 'url': 'http://www.rockstargames.com/videos#/?video=48', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( 'https://www.rockstargames.com/videoplayer/videos/get-video.json', video_id, query={ 'id': video_id, 'locale': 'en_us', })['video'] title = video['title'] formats = [] for video in video['files_processed']['video/mp4']: if not video.get('src'): continue resolution = video.get('resolution') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', resolution or '', 'height', default=None)) formats.append({ 'url': self._proto_relative_url(video['src']), 'format_id': resolution, 'height': height, }) if not formats: youtube_id = video.get('youtube_id') if youtube_id: return self.url_result(youtube_id, 'Youtube') self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': self._proto_relative_url(video.get('screencap')), 'timestamp': parse_iso8601(video.get('created')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/callin.py
youtube_dl/extractor/callin.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, traverse_obj, try_get, ) class CallinIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?callin\.com/episode/(?:[^/#?-]+-)*(?P<id>[^/#?-]+)' _TESTS = [{ 'url': 'https://www.callin.com/episode/fcc-commissioner-brendan-carr-on-elons-PrumRdSQJW', 'md5': '14ede27ee2c957b7e4db93140fc0745c', 'info_dict': { 'id': 'PrumRdSQJW', 'ext': 'mp4', 'title': 'FCC Commissioner Brendan Carr on Elon’s Starlink', 'description': 'Or, why the government doesn’t like SpaceX', 'channel': 'The Pull Request', 'channel_url': 'https://callin.com/show/the-pull-request-ucnDJmEKAa', } }, { 'url': 'https://www.callin.com/episode/episode-81-elites-melt-down-over-student-debt-lzxMidUnjA', 'md5': '16f704ddbf82a27e3930533b12062f07', 'info_dict': { 'id': 'lzxMidUnjA', 'ext': 'mp4', 'title': 'Episode 81- Elites MELT DOWN over Student Debt Victory? Rumble in NYC?', 'description': 'Let’s talk todays episode about the primary election shake up in NYC and the elites melting down over student debt cancelation.', 'channel': 'The DEBRIEF With Briahna Joy Gray', 'channel_url': 'https://callin.com/show/the-debrief-with-briahna-joy-gray-siiFDzGegm', } }] def _search_nextjs_data(self, webpage, video_id, transform_source=None, fatal=True, **kw): return self._parse_json( self._search_regex( r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>', webpage, 'next.js data', fatal=fatal, **kw), video_id, transform_source=transform_source, fatal=fatal) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) next_data = self._search_nextjs_data(webpage, video_id) episode = traverse_obj(next_data, ('props', 'pageProps', 'episode'), expected_type=dict) if not episode: raise ExtractorError('Failed to find episode data') title = episode.get('title') or self._og_search_title(webpage) description = episode.get('description') or self._og_search_description(webpage) formats = [] formats.extend(self._extract_m3u8_formats( episode.get('m3u8'), video_id, 'mp4', entry_protocol='m3u8_native', fatal=False)) self._sort_formats(formats) channel = try_get(episode, lambda x: x['show']['title'], compat_str) channel_url = try_get(episode, lambda x: x['show']['linkObj']['resourceUrl'], compat_str) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'channel': channel, 'channel_url': channel_url, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ministrygrid.py
youtube_dl/extractor/ministrygrid.py
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( ExtractorError, smuggle_url, ) class MinistryGridIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ministrygrid\.com/([^/?#]*/)*(?P<id>[^/#?]+)/?(?:$|[?#])' _TEST = { 'url': 'http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers', 'md5': '844be0d2a1340422759c2a9101bab017', 'info_dict': { 'id': '3453494717001', 'ext': 'mp4', 'title': 'The Gospel by Numbers', 'thumbnail': r're:^https?://.*\.jpg', 'upload_date': '20140410', 'description': 'Coming soon from T4G 2014!', 'uploader_id': '2034960640001', 'timestamp': 1397145591, }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['TDSLifeway'], } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) portlets = self._parse_json(self._search_regex( r'Liferay\.Portlet\.list=(\[.+?\])', webpage, 'portlet list'), video_id) pl_id = self._search_regex( r'getPlid:function\(\){return"(\d+)"}', webpage, 'p_l_id') for i, portlet in enumerate(portlets): portlet_url = 'http://www.ministrygrid.com/c/portal/render_portlet?p_l_id=%s&p_p_id=%s' % (pl_id, portlet) portlet_code = self._download_webpage( portlet_url, video_id, note='Looking in portlet %s (%d/%d)' % (portlet, i + 1, len(portlets)), fatal=False) video_iframe_url = self._search_regex( r'<iframe.*?src="([^"]+)"', portlet_code, 'video iframe', default=None) if video_iframe_url: return self.url_result( smuggle_url(video_iframe_url, {'force_videoid': video_id}), video_id=video_id) raise ExtractorError('Could not find video iframe in any portlets')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/hbo.py
youtube_dl/extractor/hbo.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( xpath_text, xpath_element, int_or_none, parse_duration, urljoin, ) class HBOBaseIE(InfoExtractor): _FORMATS_INFO = { 'pro7': { 'width': 1280, 'height': 720, }, '1920': { 'width': 1280, 'height': 720, }, 'pro6': { 'width': 768, 'height': 432, }, '640': { 'width': 768, 'height': 432, }, 'pro5': { 'width': 640, 'height': 360, }, 'highwifi': { 'width': 640, 'height': 360, }, 'high3g': { 'width': 640, 'height': 360, }, 'medwifi': { 'width': 400, 'height': 224, }, 'med3g': { 'width': 400, 'height': 224, }, } def _extract_info(self, url, display_id): video_data = self._download_xml(url, display_id) video_id = xpath_text(video_data, 'id', fatal=True) episode_title = title = xpath_text(video_data, 'title', fatal=True) series = xpath_text(video_data, 'program') if series: title = '%s - %s' % (series, title) formats = [] for source in xpath_element(video_data, 'videos', 'sources', True): if source.tag == 'size': path = xpath_text(source, './/path') if not path: continue width = source.attrib.get('width') format_info = self._FORMATS_INFO.get(width, {}) height = format_info.get('height') fmt = { 'url': path, 'format_id': 'http%s' % ('-%dp' % height if height else ''), 'width': format_info.get('width'), 'height': height, } rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', path) if rtmp: fmt.update({ 'url': rtmp.group('url'), 'play_path': rtmp.group('playpath'), 'app': rtmp.group('app'), 'ext': 'flv', 'format_id': fmt['format_id'].replace('http', 'rtmp'), }) formats.append(fmt) else: video_url = source.text if not video_url: continue if source.tag == 'tarball': formats.extend(self._extract_m3u8_formats( video_url.replace('.tar', '/base_index_w8.m3u8'), video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif source.tag == 'hls': m3u8_formats = self._extract_m3u8_formats( video_url.replace('.tar', '/base_index.m3u8'), video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) for f in m3u8_formats: if f.get('vcodec') == 'none' and not f.get('tbr'): f['tbr'] = int_or_none(self._search_regex( r'-(\d+)k/', f['url'], 'tbr', default=None)) formats.extend(m3u8_formats) elif source.tag == 'dash': formats.extend(self._extract_mpd_formats( video_url.replace('.tar', '/manifest.mpd'), video_id, mpd_id='dash', fatal=False)) else: format_info = self._FORMATS_INFO.get(source.tag, {}) formats.append({ 'format_id': 'http-%s' % source.tag, 'url': video_url, 'width': format_info.get('width'), 'height': format_info.get('height'), }) self._sort_formats(formats) thumbnails = [] card_sizes = xpath_element(video_data, 'titleCardSizes') if card_sizes is not None: for size in card_sizes: path = xpath_text(size, 'path') if not path: continue width = int_or_none(size.get('width')) thumbnails.append({ 'id': width, 'url': path, 'width': width, }) subtitles = None caption_url = xpath_text(video_data, 'captionUrl') if caption_url: subtitles = { 'en': [{ 'url': caption_url, 'ext': 'ttml' }], } return { 'id': video_id, 'title': title, 'duration': parse_duration(xpath_text(video_data, 'duration/tv14')), 'series': series, 'episode': episode_title, 'formats': formats, 'thumbnails': thumbnails, 'subtitles': subtitles, } class HBOIE(HBOBaseIE): IE_NAME = 'hbo' _VALID_URL = r'https?://(?:www\.)?hbo\.com/(?:video|embed)(?:/[^/]+)*/(?P<id>[^/?#]+)' _TEST = { 'url': 'https://www.hbo.com/video/game-of-thrones/seasons/season-8/videos/trailer', 'md5': '8126210656f433c452a21367f9ad85b3', 'info_dict': { 'id': '22113301', 'ext': 'mp4', 'title': 'Game of Thrones - Trailer', }, 'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'], } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) location_path = self._parse_json(self._html_search_regex( r'data-state="({.+?})"', webpage, 'state'), display_id)['video']['locationUrl'] return self._extract_info(urljoin(url, location_path), display_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bongacams.py
youtube_dl/extractor/bongacams.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, try_get, urlencode_postdata, ) class BongaCamsIE(InfoExtractor): _VALID_URL = r'https?://(?P<host>(?:[^/]+\.)?bongacams\d*\.(?:com|net))/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://de.bongacams.com/azumi-8', 'only_matching': True, }, { 'url': 'https://cn.bongacams.com/azumi-8', 'only_matching': True, }, { 'url': 'https://de.bongacams.net/claireashton', 'info_dict': { 'id': 'claireashton', 'ext': 'mp4', 'title': r're:ClaireAshton \d{4}-\d{2}-\d{2} \d{2}:\d{2}', 'age_limit': 18, 'uploader_id': 'ClaireAshton', 'uploader': 'ClaireAshton', 'like_count': int, 'is_live': True, }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host = mobj.group('host') channel_id = mobj.group('id') amf = self._download_json( 'https://%s/tools/amf.php' % host, channel_id, data=urlencode_postdata(( ('method', 'getRoomData'), ('args[]', channel_id), ('args[]', 'false'), )), headers={'X-Requested-With': 'XMLHttpRequest'}) server_url = amf['localData']['videoServerUrl'] uploader_id = try_get( amf, lambda x: x['performerData']['username'], compat_str) or channel_id uploader = try_get( amf, lambda x: x['performerData']['displayName'], compat_str) like_count = int_or_none(try_get( amf, lambda x: x['performerData']['loversCount'])) formats = self._extract_m3u8_formats( '%s/hls/stream_%s/playlist.m3u8' % (server_url, uploader_id), channel_id, 'mp4', m3u8_id='hls', live=True) self._sort_formats(formats) return { 'id': channel_id, 'title': self._live_title(uploader or uploader_id), 'uploader': uploader, 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': 18, 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/br.py
youtube_dl/extractor/br.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, int_or_none, parse_duration, parse_iso8601, xpath_element, xpath_text, ) class BRIE(InfoExtractor): IE_DESC = 'Bayerischer Rundfunk' _VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html' _TESTS = [ { 'url': 'http://www.br.de/mediathek/video/sendungen/abendschau/betriebliche-altersvorsorge-104.html', 'md5': '83a0477cf0b8451027eb566d88b51106', 'info_dict': { 'id': '48f656ef-287e-486f-be86-459122db22cc', 'ext': 'mp4', 'title': 'Die böse Überraschung', 'description': 'md5:ce9ac81b466ce775b8018f6801b48ac9', 'duration': 180, 'uploader': 'Reinhard Weber', 'upload_date': '20150422', }, 'skip': '404 not found', }, { 'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html', 'md5': 'af3a3a4aa43ff0ce6a89504c67f427ef', 'info_dict': { 'id': 'a4b83e34-123d-4b81-9f4e-c0d3121a4e05', 'ext': 'flv', 'title': 'Manfred Schreiber ist tot', 'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97', 'duration': 26, }, 'skip': '404 not found', }, { 'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html', 'md5': '8b5b27c0b090f3b35eac4ab3f7a73d3d', 'info_dict': { 'id': '74c603c9-26d3-48bb-b85b-079aeed66e0b', 'ext': 'aac', 'title': 'Kurzweilig und sehr bewegend', 'description': 'md5:0351996e3283d64adeb38ede91fac54e', 'duration': 296, }, 'skip': '404 not found', }, { 'url': 'http://www.br.de/radio/bayern1/service/team/videos/team-video-erdelt100.html', 'md5': 'dbab0aef2e047060ea7a21fc1ce1078a', 'info_dict': { 'id': '6ba73750-d405-45d3-861d-1ce8c524e059', 'ext': 'mp4', 'title': 'Umweltbewusster Häuslebauer', 'description': 'md5:d52dae9792d00226348c1dbb13c9bae2', 'duration': 116, } }, { 'url': 'http://www.br.de/fernsehen/br-alpha/sendungen/kant-fuer-anfaenger/kritik-der-reinen-vernunft/kant-kritik-01-metaphysik100.html', 'md5': '23bca295f1650d698f94fc570977dae3', 'info_dict': { 'id': 'd982c9ce-8648-4753-b358-98abb8aec43d', 'ext': 'mp4', 'title': 'Folge 1 - Metaphysik', 'description': 'md5:bb659990e9e59905c3d41e369db1fbe3', 'duration': 893, 'uploader': 'Eva Maria Steimle', 'upload_date': '20170208', } }, ] def _real_extract(self, url): base_url, display_id = re.search(self._VALID_URL, url).groups() page = self._download_webpage(url, display_id) xml_url = self._search_regex( r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL') xml = self._download_xml(base_url + xml_url, display_id) medias = [] for xml_media in xml.findall('video') + xml.findall('audio'): media_id = xml_media.get('externalId') media = { 'id': media_id, 'title': xpath_text(xml_media, 'title', 'title', True), 'duration': parse_duration(xpath_text(xml_media, 'duration')), 'formats': self._extract_formats(xpath_element( xml_media, 'assets'), media_id), 'thumbnails': self._extract_thumbnails(xpath_element( xml_media, 'teaserImage/variants'), base_url), 'description': xpath_text(xml_media, 'desc'), 'webpage_url': xpath_text(xml_media, 'permalink'), 'uploader': xpath_text(xml_media, 'author'), } broadcast_date = xpath_text(xml_media, 'broadcastDate') if broadcast_date: media['upload_date'] = ''.join(reversed(broadcast_date.split('.'))) medias.append(media) if len(medias) > 1: self._downloader.report_warning( 'found multiple medias; please ' 'report this with the video URL to http://yt-dl.org/bug') if not medias: raise ExtractorError('No media entries found') return medias[0] def _extract_formats(self, assets, media_id): formats = [] for asset in assets.findall('asset'): format_url = xpath_text(asset, ['downloadUrl', 'url']) asset_type = asset.get('type') if asset_type.startswith('HDS'): formats.extend(self._extract_f4m_formats( format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False)) elif asset_type.startswith('HLS'): formats.extend(self._extract_m3u8_formats( format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False)) else: format_info = { 'ext': xpath_text(asset, 'mediaType'), 'width': int_or_none(xpath_text(asset, 'frameWidth')), 'height': int_or_none(xpath_text(asset, 'frameHeight')), 'tbr': int_or_none(xpath_text(asset, 'bitrateVideo')), 'abr': int_or_none(xpath_text(asset, 'bitrateAudio')), 'vcodec': xpath_text(asset, 'codecVideo'), 'acodec': xpath_text(asset, 'codecAudio'), 'container': xpath_text(asset, 'mediaType'), 'filesize': int_or_none(xpath_text(asset, 'size')), } format_url = self._proto_relative_url(format_url) if format_url: http_format_info = format_info.copy() http_format_info.update({ 'url': format_url, 'format_id': 'http-%s' % asset_type, }) formats.append(http_format_info) server_prefix = xpath_text(asset, 'serverPrefix') if server_prefix: rtmp_format_info = format_info.copy() rtmp_format_info.update({ 'url': server_prefix, 'play_path': xpath_text(asset, 'fileName'), 'format_id': 'rtmp-%s' % asset_type, }) formats.append(rtmp_format_info) self._sort_formats(formats) return formats def _extract_thumbnails(self, variants, base_url): thumbnails = [{ 'url': base_url + xpath_text(variant, 'url'), 'width': int_or_none(xpath_text(variant, 'width')), 'height': int_or_none(xpath_text(variant, 'height')), } for variant in variants.findall('variant') if xpath_text(variant, 'url')] thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True) return thumbnails class BRMediathekIE(InfoExtractor): IE_DESC = 'Bayerischer Rundfunk Mediathek' _VALID_URL = r'https?://(?:www\.)?br\.de/mediathek/video/[^/?&#]*?-(?P<id>av:[0-9a-f]{24})' _TESTS = [{ 'url': 'https://www.br.de/mediathek/video/gesundheit-die-sendung-vom-28112017-av:5a1e6a6e8fce6d001871cc8e', 'md5': 'fdc3d485835966d1622587d08ba632ec', 'info_dict': { 'id': 'av:5a1e6a6e8fce6d001871cc8e', 'ext': 'mp4', 'title': 'Die Sendung vom 28.11.2017', 'description': 'md5:6000cdca5912ab2277e5b7339f201ccc', 'timestamp': 1511942766, 'upload_date': '20171129', } }] def _real_extract(self, url): clip_id = self._match_id(url) clip = self._download_json( 'https://proxy-base.master.mango.express/graphql', clip_id, data=json.dumps({ "query": """{ viewer { clip(id: "%s") { title description duration createdAt ageRestriction videoFiles { edges { node { publicLocation fileSize videoProfile { width height bitrate encoding } } } } captionFiles { edges { node { publicLocation } } } teaserImages { edges { node { imageFiles { edges { node { publicLocation width height } } } } } } } } }""" % clip_id}).encode(), headers={ 'Content-Type': 'application/json', })['data']['viewer']['clip'] title = clip['title'] formats = [] for edge in clip.get('videoFiles', {}).get('edges', []): node = edge.get('node', {}) n_url = node.get('publicLocation') if not n_url: continue ext = determine_ext(n_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( n_url, clip_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: video_profile = node.get('videoProfile', {}) tbr = int_or_none(video_profile.get('bitrate')) format_id = 'http' if tbr: format_id += '-%d' % tbr formats.append({ 'format_id': format_id, 'url': n_url, 'width': int_or_none(video_profile.get('width')), 'height': int_or_none(video_profile.get('height')), 'tbr': tbr, 'filesize': int_or_none(node.get('fileSize')), }) self._sort_formats(formats) subtitles = {} for edge in clip.get('captionFiles', {}).get('edges', []): node = edge.get('node', {}) n_url = node.get('publicLocation') if not n_url: continue subtitles.setdefault('de', []).append({ 'url': n_url, }) thumbnails = [] for edge in clip.get('teaserImages', {}).get('edges', []): for image_edge in edge.get('node', {}).get('imageFiles', {}).get('edges', []): node = image_edge.get('node', {}) n_url = node.get('publicLocation') if not n_url: continue thumbnails.append({ 'url': n_url, 'width': int_or_none(node.get('width')), 'height': int_or_none(node.get('height')), }) return { 'id': clip_id, 'title': title, 'description': clip.get('description'), 'duration': int_or_none(clip.get('duration')), 'timestamp': parse_iso8601(clip.get('createdAt')), 'age_limit': int_or_none(clip.get('ageRestriction')), 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/mediasite.py
youtube_dl/extractor/mediasite.py
# coding: utf-8 from __future__ import unicode_literals import re import json from .common import InfoExtractor from ..compat import ( compat_str, compat_urlparse, ) from ..utils import ( ExtractorError, float_or_none, mimetype2ext, str_or_none, try_get, unescapeHTML, unsmuggle_url, url_or_none, urljoin, ) _ID_RE = r'(?:[0-9a-f]{32,34}|[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12,14})' class MediasiteIE(InfoExtractor): _VALID_URL = r'(?xi)https?://[^/]+/Mediasite/(?:Play|Showcase/(?:default|livebroadcast)/Presentation)/(?P<id>%s)(?P<query>\?[^#]+|)' % _ID_RE _TESTS = [ { 'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271681e4f199af3c60d1f82869b1d', 'info_dict': { 'id': '2db6c271681e4f199af3c60d1f82869b1d', 'ext': 'mp4', 'title': 'Lecture: Tuesday, September 20, 2016 - Sir Andrew Wiles', 'description': 'Sir Andrew Wiles: “Equations in arithmetic”\\n\\nI will describe some of the interactions between modern number theory and the problem of solving equations in rational numbers or integers\\u0027.', 'timestamp': 1474268400.0, 'upload_date': '20160919', }, }, { 'url': 'http://mediasite.uib.no/Mediasite/Play/90bb363295d945d6b548c867d01181361d?catalog=a452b7df-9ae1-46b7-a3ba-aceeb285f3eb', 'info_dict': { 'id': '90bb363295d945d6b548c867d01181361d', 'ext': 'mp4', 'upload_date': '20150429', 'title': '5) IT-forum 2015-Dag 1 - Dungbeetle - How and why Rain created a tiny bug tracker for Unity', 'timestamp': 1430311380.0, }, }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d', 'md5': '481fda1c11f67588c0d9d8fbdced4e39', 'info_dict': { 'id': '585a43626e544bdd97aeb71a0ec907a01d', 'ext': 'mp4', 'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.', 'description': '', 'thumbnail': r're:^https?://.*\.jpg(?:\?.*)?$', 'duration': 7713.088, 'timestamp': 1413309600, 'upload_date': '20141014', }, }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4', 'md5': 'ef1fdded95bdf19b12c5999949419c92', 'info_dict': { 'id': '86a9ea9f53e149079fbdb4202b521ed21d', 'ext': 'wmv', 'title': '64ste Vakantiecursus: Afvalwater', 'description': 'md5:7fd774865cc69d972f542b157c328305', 'thumbnail': r're:^https?://.*\.jpg(?:\?.*?)?$', 'duration': 10853, 'timestamp': 1326446400, 'upload_date': '20120113', }, }, { 'url': 'http://digitalops.sandia.gov/Mediasite/Play/24aace4429fc450fb5b38cdbf424a66e1d', 'md5': '9422edc9b9a60151727e4b6d8bef393d', 'info_dict': { 'id': '24aace4429fc450fb5b38cdbf424a66e1d', 'ext': 'mp4', 'title': 'Xyce Software Training - Section 1', 'description': r're:(?s)SAND Number: SAND 2013-7800.{200,}', 'upload_date': '20120409', 'timestamp': 1333983600, 'duration': 7794, } }, { 'url': 'https://collegerama.tudelft.nl/Mediasite/Showcase/livebroadcast/Presentation/ada7020854f743c49fbb45c9ec7dbb351d', 'only_matching': True, }, { 'url': 'https://mediasite.ntnu.no/Mediasite/Showcase/default/Presentation/7d8b913259334b688986e970fae6fcb31d', 'only_matching': True, }, { # dashed id 'url': 'https://hitsmediaweb.h-its.org/mediasite/Play/2db6c271-681e-4f19-9af3-c60d1f82869b1d', 'only_matching': True, } ] # look in Mediasite.Core.js (Mediasite.ContentStreamType[*]) _STREAM_TYPES = { 0: 'video1', # the main video 2: 'slide', 3: 'presentation', 4: 'video2', # screencast? 5: 'video3', } @staticmethod def _extract_urls(webpage): return [ unescapeHTML(mobj.group('url')) for mobj in re.finditer( r'(?xi)<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:(?:https?:)?//[^/]+)?/Mediasite/Play/%s(?:\?.*?)?)\1' % _ID_RE, webpage)] def _real_extract(self, url): url, data = unsmuggle_url(url, {}) mobj = re.match(self._VALID_URL, url) resource_id = mobj.group('id') query = mobj.group('query') webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer? redirect_url = urlh.geturl() # XXX: might have also extracted UrlReferrer and QueryString from the html service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex( r'<div[^>]+\bid=["\']ServicePath[^>]+>(.+?)</div>', webpage, resource_id, default='/Mediasite/PlayerService/PlayerService.svc/json')) player_options = self._download_json( '%s/GetPlayerOptions' % service_path, resource_id, headers={ 'Content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest', }, data=json.dumps({ 'getPlayerOptionsRequest': { 'ResourceId': resource_id, 'QueryString': query, 'UrlReferrer': data.get('UrlReferrer', ''), 'UseScreenReader': False, } }).encode('utf-8'))['d'] presentation = player_options['Presentation'] title = presentation['Title'] if presentation is None: raise ExtractorError( 'Mediasite says: %s' % player_options['PlayerPresentationStatusMessage'], expected=True) thumbnails = [] formats = [] for snum, Stream in enumerate(presentation['Streams']): stream_type = Stream.get('StreamType') if stream_type is None: continue video_urls = Stream.get('VideoUrls') if not isinstance(video_urls, list): video_urls = [] stream_id = self._STREAM_TYPES.get( stream_type, 'type%u' % stream_type) stream_formats = [] for unum, VideoUrl in enumerate(video_urls): video_url = url_or_none(VideoUrl.get('Location')) if not video_url: continue # XXX: if Stream.get('CanChangeScheme', False), switch scheme to HTTP/HTTPS media_type = VideoUrl.get('MediaType') if media_type == 'SS': stream_formats.extend(self._extract_ism_formats( video_url, resource_id, ism_id='%s-%u.%u' % (stream_id, snum, unum), fatal=False)) elif media_type == 'Dash': stream_formats.extend(self._extract_mpd_formats( video_url, resource_id, mpd_id='%s-%u.%u' % (stream_id, snum, unum), fatal=False)) else: stream_formats.append({ 'format_id': '%s-%u.%u' % (stream_id, snum, unum), 'url': video_url, 'ext': mimetype2ext(VideoUrl.get('MimeType')), }) # TODO: if Stream['HasSlideContent']: # synthesise an MJPEG video stream '%s-%u.slides' % (stream_type, snum) # from Stream['Slides'] # this will require writing a custom downloader... # disprefer 'secondary' streams if stream_type != 0: for fmt in stream_formats: fmt['preference'] = -1 thumbnail_url = Stream.get('ThumbnailUrl') if thumbnail_url: thumbnails.append({ 'id': '%s-%u' % (stream_id, snum), 'url': urljoin(redirect_url, thumbnail_url), 'preference': -1 if stream_type != 0 else 0, }) formats.extend(stream_formats) self._sort_formats(formats) # XXX: Presentation['Presenters'] # XXX: Presentation['Transcript'] return { 'id': resource_id, 'title': title, 'description': presentation.get('Description'), 'duration': float_or_none(presentation.get('Duration'), 1000), 'timestamp': float_or_none(presentation.get('UnixTime'), 1000), 'formats': formats, 'thumbnails': thumbnails, } class MediasiteCatalogIE(InfoExtractor): _VALID_URL = r'''(?xi) (?P<url>https?://[^/]+/Mediasite) /Catalog/Full/ (?P<catalog_id>{0}) (?: /(?P<current_folder_id>{0}) /(?P<root_dynamic_folder_id>{0}) )? '''.format(_ID_RE) _TESTS = [{ 'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48530d454381549f955d08c75e21', 'info_dict': { 'id': '631f9e48530d454381549f955d08c75e21', 'title': 'WCET Summit: Adaptive Learning in Higher Ed: Improving Outcomes Dynamically', }, 'playlist_count': 6, 'expected_warnings': ['is not a supported codec'], }, { # with CurrentFolderId and RootDynamicFolderId 'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521', 'info_dict': { 'id': '9518c4a6c5cf4993b21cbd53e828a92521', 'title': 'IUSM Family and Friends Sessions', }, 'playlist_count': 2, }, { 'url': 'http://uipsyc.mediasite.com/mediasite/Catalog/Full/d5d79287c75243c58c50fef50174ec1b21', 'only_matching': True, }, { # no AntiForgeryToken 'url': 'https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21', 'only_matching': True, }, { 'url': 'https://medaudio.medicine.iu.edu/Mediasite/Catalog/Full/9518c4a6c5cf4993b21cbd53e828a92521/97a9db45f7ab47428c77cd2ed74bb98f14/9518c4a6c5cf4993b21cbd53e828a92521', 'only_matching': True, }, { # dashed id 'url': 'http://events7.mediasite.com/Mediasite/Catalog/Full/631f9e48-530d-4543-8154-9f955d08c75e', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) mediasite_url = mobj.group('url') catalog_id = mobj.group('catalog_id') current_folder_id = mobj.group('current_folder_id') or catalog_id root_dynamic_folder_id = mobj.group('root_dynamic_folder_id') webpage = self._download_webpage(url, catalog_id) # AntiForgeryToken is optional (e.g. [1]) # 1. https://live.libraries.psu.edu/Mediasite/Catalog/Full/8376d4b24dd1457ea3bfe4cf9163feda21 anti_forgery_token = self._search_regex( r'AntiForgeryToken\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'anti forgery token', default=None, group='value') if anti_forgery_token: anti_forgery_header = self._search_regex( r'AntiForgeryHeaderName\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'anti forgery header name', default='X-SOFO-AntiForgeryHeader', group='value') data = { 'IsViewPage': True, 'IsNewFolder': True, 'AuthTicket': None, 'CatalogId': catalog_id, 'CurrentFolderId': current_folder_id, 'RootDynamicFolderId': root_dynamic_folder_id, 'ItemsPerPage': 1000, 'PageIndex': 0, 'PermissionMask': 'Execute', 'CatalogSearchType': 'SearchInFolder', 'SortBy': 'Date', 'SortDirection': 'Descending', 'StartDate': None, 'EndDate': None, 'StatusFilterList': None, 'PreviewKey': None, 'Tags': [], } headers = { 'Content-Type': 'application/json; charset=UTF-8', 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', } if anti_forgery_token: headers[anti_forgery_header] = anti_forgery_token catalog = self._download_json( '%s/Catalog/Data/GetPresentationsForFolder' % mediasite_url, catalog_id, data=json.dumps(data).encode(), headers=headers) entries = [] for video in catalog['PresentationDetailsList']: if not isinstance(video, dict): continue video_id = str_or_none(video.get('Id')) if not video_id: continue entries.append(self.url_result( '%s/Play/%s' % (mediasite_url, video_id), ie=MediasiteIE.ie_key(), video_id=video_id)) title = try_get( catalog, lambda x: x['CurrentFolder']['Name'], compat_str) return self.playlist_result(entries, catalog_id, title,) class MediasiteNamedCatalogIE(InfoExtractor): _VALID_URL = r'(?xi)(?P<url>https?://[^/]+/Mediasite)/Catalog/catalogs/(?P<catalog_name>[^/?#&]+)' _TESTS = [{ 'url': 'https://msite.misis.ru/Mediasite/Catalog/catalogs/2016-industrial-management-skriabin-o-o', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) mediasite_url = mobj.group('url') catalog_name = mobj.group('catalog_name') webpage = self._download_webpage(url, catalog_name) catalog_id = self._search_regex( r'CatalogId\s*:\s*["\'](%s)' % _ID_RE, webpage, 'catalog id') return self.url_result( '%s/Catalog/Full/%s' % (mediasite_url, catalog_id), ie=MediasiteCatalogIE.ie_key(), video_id=catalog_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ximalaya.py
youtube_dl/extractor/ximalaya.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor class XimalayaBaseIE(InfoExtractor): _GEO_COUNTRIES = ['CN'] class XimalayaIE(XimalayaBaseIE): IE_NAME = 'ximalaya' IE_DESC = '喜马拉雅FM' _VALID_URL = r'https?://(?:www\.|m\.)?ximalaya\.com/(?P<uid>[0-9]+)/sound/(?P<id>[0-9]+)' _USER_URL_FORMAT = '%s://www.ximalaya.com/zhubo/%i/' _TESTS = [ { 'url': 'http://www.ximalaya.com/61425525/sound/47740352/', 'info_dict': { 'id': '47740352', 'ext': 'm4a', 'uploader': '小彬彬爱听书', 'uploader_id': 61425525, 'uploader_url': 'http://www.ximalaya.com/zhubo/61425525/', 'title': '261.唐诗三百首.卷八.送孟浩然之广陵.李白', 'description': "contains:《送孟浩然之广陵》\n作者:李白\n故人西辞黄鹤楼,烟花三月下扬州。\n孤帆远影碧空尽,惟见长江天际流。", 'thumbnails': [ { 'name': 'cover_url', 'url': r're:^https?://.*\.jpg$', }, { 'name': 'cover_url_142', 'url': r're:^https?://.*\.jpg$', 'width': 180, 'height': 180 } ], 'categories': ['renwen', '人文'], 'duration': 93, 'view_count': int, 'like_count': int, } }, { 'url': 'http://m.ximalaya.com/61425525/sound/47740352/', 'info_dict': { 'id': '47740352', 'ext': 'm4a', 'uploader': '小彬彬爱听书', 'uploader_id': 61425525, 'uploader_url': 'http://www.ximalaya.com/zhubo/61425525/', 'title': '261.唐诗三百首.卷八.送孟浩然之广陵.李白', 'description': "contains:《送孟浩然之广陵》\n作者:李白\n故人西辞黄鹤楼,烟花三月下扬州。\n孤帆远影碧空尽,惟见长江天际流。", 'thumbnails': [ { 'name': 'cover_url', 'url': r're:^https?://.*\.jpg$', }, { 'name': 'cover_url_142', 'url': r're:^https?://.*\.jpg$', 'width': 180, 'height': 180 } ], 'categories': ['renwen', '人文'], 'duration': 93, 'view_count': int, 'like_count': int, } }, { 'url': 'https://www.ximalaya.com/11045267/sound/15705996/', 'info_dict': { 'id': '15705996', 'ext': 'm4a', 'uploader': '李延隆老师', 'uploader_id': 11045267, 'uploader_url': 'https://www.ximalaya.com/zhubo/11045267/', 'title': 'Lesson 1 Excuse me!', 'description': "contains:Listen to the tape then answer\xa0this question. Whose handbag is it?\n" "听录音,然后回答问题,这是谁的手袋?", 'thumbnails': [ { 'name': 'cover_url', 'url': r're:^https?://.*\.jpg$', }, { 'name': 'cover_url_142', 'url': r're:^https?://.*\.jpg$', 'width': 180, 'height': 180 } ], 'categories': ['train', '外语'], 'duration': 40, 'view_count': int, 'like_count': int, } }, ] def _real_extract(self, url): is_m = 'm.ximalaya' in url scheme = 'https' if url.startswith('https') else 'http' audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id, note='Download sound page for %s' % audio_id, errnote='Unable to get sound page') audio_info_file = '%s://m.ximalaya.com/tracks/%s.json' % (scheme, audio_id) audio_info = self._download_json(audio_info_file, audio_id, 'Downloading info json %s' % audio_info_file, 'Unable to download info file') formats = [] for bps, k in (('24k', 'play_path_32'), ('64k', 'play_path_64')): if audio_info.get(k): formats.append({ 'format_id': bps, 'url': audio_info[k], }) thumbnails = [] for k in audio_info.keys(): # cover pics kyes like: cover_url', 'cover_url_142' if k.startswith('cover_url'): thumbnail = {'name': k, 'url': audio_info[k]} if k == 'cover_url_142': thumbnail['width'] = 180 thumbnail['height'] = 180 thumbnails.append(thumbnail) audio_uploader_id = audio_info.get('uid') if is_m: audio_description = self._html_search_regex(r'(?s)<section\s+class=["\']content[^>]+>(.+?)</section>', webpage, 'audio_description', fatal=False) else: audio_description = self._html_search_regex(r'(?s)<div\s+class=["\']rich_intro[^>]*>(.+?</article>)', webpage, 'audio_description', fatal=False) if not audio_description: audio_description_file = '%s://www.ximalaya.com/sounds/%s/rich_intro' % (scheme, audio_id) audio_description = self._download_webpage(audio_description_file, audio_id, note='Downloading description file %s' % audio_description_file, errnote='Unable to download descrip file', fatal=False) audio_description = audio_description.strip() if audio_description else None return { 'id': audio_id, 'uploader': audio_info.get('nickname'), 'uploader_id': audio_uploader_id, 'uploader_url': self._USER_URL_FORMAT % (scheme, audio_uploader_id) if audio_uploader_id else None, 'title': audio_info['title'], 'thumbnails': thumbnails, 'description': audio_description, 'categories': list(filter(None, (audio_info.get('category_name'), audio_info.get('category_title')))), 'duration': audio_info.get('duration'), 'view_count': audio_info.get('play_count'), 'like_count': audio_info.get('favorites_count'), 'formats': formats, } class XimalayaAlbumIE(XimalayaBaseIE): IE_NAME = 'ximalaya:album' IE_DESC = '喜马拉雅FM 专辑' _VALID_URL = r'https?://(?:www\.|m\.)?ximalaya\.com/(?P<uid>[0-9]+)/album/(?P<id>[0-9]+)' _TEMPLATE_URL = '%s://www.ximalaya.com/%s/album/%s/' _BASE_URL_TEMPL = '%s://www.ximalaya.com%s' _LIST_VIDEO_RE = r'<a[^>]+?href="(?P<url>/%s/sound/(?P<id>\d+)/?)"[^>]+?title="(?P<title>[^>]+)">' _TESTS = [{ 'url': 'http://www.ximalaya.com/61425525/album/5534601/', 'info_dict': { 'title': '唐诗三百首(含赏析)', 'id': '5534601', }, 'playlist_count': 312, }, { 'url': 'http://m.ximalaya.com/61425525/album/5534601', 'info_dict': { 'title': '唐诗三百首(含赏析)', 'id': '5534601', }, 'playlist_count': 312, }, ] def _real_extract(self, url): self.scheme = scheme = 'https' if url.startswith('https') else 'http' mobj = re.match(self._VALID_URL, url) uid, playlist_id = mobj.group('uid'), mobj.group('id') webpage = self._download_webpage(self._TEMPLATE_URL % (scheme, uid, playlist_id), playlist_id, note='Download album page for %s' % playlist_id, errnote='Unable to get album info') title = self._html_search_regex(r'detailContent_title[^>]*><h1(?:[^>]+)?>([^<]+)</h1>', webpage, 'title', fatal=False) return self.playlist_result(self._entries(webpage, playlist_id, uid), playlist_id, title) def _entries(self, page, playlist_id, uid): html = page for page_num in itertools.count(1): for entry in self._process_page(html, uid): yield entry next_url = self._search_regex(r'<a\s+href=(["\'])(?P<more>[\S]+)\1[^>]+rel=(["\'])next\3', html, 'list_next_url', default=None, group='more') if not next_url: break next_full_url = self._BASE_URL_TEMPL % (self.scheme, next_url) html = self._download_webpage(next_full_url, playlist_id) def _process_page(self, html, uid): find_from = html.index('album_soundlist') for mobj in re.finditer(self._LIST_VIDEO_RE % uid, html[find_from:]): yield self.url_result(self._BASE_URL_TEMPL % (self.scheme, mobj.group('url')), XimalayaIE.ie_key(), mobj.group('id'), mobj.group('title'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tvp.py
youtube_dl/extractor/tvp.py
# coding: utf-8 from __future__ import unicode_literals import itertools import re from .common import InfoExtractor from ..utils import ( clean_html, determine_ext, ExtractorError, get_element_by_attribute, orderedSet, ) class TVPIE(InfoExtractor): IE_NAME = 'tvp' IE_DESC = 'Telewizja Polska' _VALID_URL = r'https?://[^/]+\.tvp\.(?:pl|info)/(?:video/(?:[^,\s]*,)*|(?:(?!\d+/)[^/]+/)*)(?P<id>\d+)' _TESTS = [{ 'url': 'https://vod.tvp.pl/video/czas-honoru,i-seria-odc-13,194536', 'md5': 'a21eb0aa862f25414430f15fdfb9e76c', 'info_dict': { 'id': '194536', 'ext': 'mp4', 'title': 'Czas honoru, odc. 13 – Władek', 'description': 'md5:437f48b93558370b031740546b696e24', }, }, { 'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176', 'md5': 'b0005b542e5b4de643a9690326ab1257', 'info_dict': { 'id': '17916176', 'ext': 'mp4', 'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata', 'description': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata', }, }, { # page id is not the same as video id(#7799) 'url': 'https://wiadomosci.tvp.pl/33908820/28092017-1930', 'md5': '84cd3c8aec4840046e5ab712416b73d0', 'info_dict': { 'id': '33908820', 'ext': 'mp4', 'title': 'Wiadomości, 28.09.2017, 19:30', 'description': 'Wydanie główne codziennego serwisu informacyjnego.' }, 'skip': 'HTTP Error 404: Not Found', }, { 'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272', 'only_matching': True, }, { 'url': 'http://wiadomosci.tvp.pl/25169746/24052016-1200', 'only_matching': True, }, { 'url': 'http://krakow.tvp.pl/25511623/25lecie-mck-wyjatkowe-miejsce-na-mapie-krakowa', 'only_matching': True, }, { 'url': 'http://teleexpress.tvp.pl/25522307/wierni-wzieli-udzial-w-procesjach', 'only_matching': True, }, { 'url': 'http://sport.tvp.pl/25522165/krychowiak-uspokaja-w-sprawie-kontuzji-dwa-tygodnie-to-maksimum', 'only_matching': True, }, { 'url': 'http://www.tvp.info/25511919/trwa-rewolucja-wladza-zdecydowala-sie-na-pogwalcenie-konstytucji', 'only_matching': True, }] def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) video_id = self._search_regex([ r'<iframe[^>]+src="[^"]*?object_id=(\d+)', r"object_id\s*:\s*'(\d+)'", r'data-video-id="(\d+)"'], webpage, 'video id', default=page_id) return { '_type': 'url_transparent', 'url': 'tvp:' + video_id, 'description': self._og_search_description( webpage, default=None) or self._html_search_meta( 'description', webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'ie_key': 'TVPEmbed', } class TVPEmbedIE(InfoExtractor): IE_NAME = 'tvp:embed' IE_DESC = 'Telewizja Polska' _VALID_URL = r'(?:tvp:|https?://[^/]+\.tvp\.(?:pl|info)/sess/tvplayer\.php\?.*?object_id=)(?P<id>\d+)' _TESTS = [{ 'url': 'tvp:194536', 'md5': 'a21eb0aa862f25414430f15fdfb9e76c', 'info_dict': { 'id': '194536', 'ext': 'mp4', 'title': 'Czas honoru, odc. 13 – Władek', }, }, { # not available 'url': 'http://www.tvp.pl/sess/tvplayer.php?object_id=22670268', 'md5': '8c9cd59d16edabf39331f93bf8a766c7', 'info_dict': { 'id': '22670268', 'ext': 'mp4', 'title': 'Panorama, 07.12.2015, 15:40', }, 'skip': 'Transmisja została zakończona lub materiał niedostępny', }, { 'url': 'tvp:22670268', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id) error = self._html_search_regex( r'(?s)<p[^>]+\bclass=["\']notAvailable__text["\'][^>]*>(.+?)</p>', webpage, 'error', default=None) or clean_html( get_element_by_attribute('class', 'msg error', webpage)) if error: raise ExtractorError('%s said: %s' % ( self.IE_NAME, clean_html(error)), expected=True) title = self._search_regex( r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1', webpage, 'title', group='title') series_title = self._search_regex( r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1', webpage, 'series', group='series', default=None) if series_title: title = '%s, %s' % (series_title, title) thumbnail = self._search_regex( r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None) video_url = self._search_regex( r'0:{src:([\'"])(?P<url>.*?)\1', webpage, 'formats', group='url', default=None) if not video_url or 'material_niedostepny.mp4' in video_url: video_url = self._download_json( 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id, video_id)['video_url'] formats = [] video_url_base = self._search_regex( r'(https?://.+?/video)(?:\.(?:ism|f4m|m3u8)|-\d+\.mp4)', video_url, 'video base url', default=None) if video_url_base: # TODO: <Group> found instead of <AdaptationSet> in MPD manifest. # It's not mentioned in MPEG-DASH standard. Figure that out. # formats.extend(self._extract_mpd_formats( # video_url_base + '.ism/video.mpd', # video_id, mpd_id='dash', fatal=False)) formats.extend(self._extract_ism_formats( video_url_base + '.ism/Manifest', video_id, 'mss', fatal=False)) formats.extend(self._extract_f4m_formats( video_url_base + '.ism/video.f4m', video_id, f4m_id='hds', fatal=False)) m3u8_formats = self._extract_m3u8_formats( video_url_base + '.ism/video.m3u8', video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) self._sort_formats(m3u8_formats) m3u8_formats = list(filter( lambda f: f.get('vcodec') != 'none', m3u8_formats)) formats.extend(m3u8_formats) for i, m3u8_format in enumerate(m3u8_formats, 2): http_url = '%s-%d.mp4' % (video_url_base, i) if self._is_valid_url(http_url, video_id): f = m3u8_format.copy() f.update({ 'url': http_url, 'format_id': f['format_id'].replace('hls', 'http'), 'protocol': 'http', }) formats.append(f) else: formats = [{ 'format_id': 'direct', 'url': video_url, 'ext': determine_ext(video_url, 'mp4'), }] self._sort_formats(formats) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'formats': formats, } class TVPWebsiteIE(InfoExtractor): IE_NAME = 'tvp:series' _VALID_URL = r'https?://vod\.tvp\.pl/website/(?P<display_id>[^,]+),(?P<id>\d+)' _TESTS = [{ # series 'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312/video', 'info_dict': { 'id': '38678312', }, 'playlist_count': 115, }, { # film 'url': 'https://vod.tvp.pl/website/gloria,35139666', 'info_dict': { 'id': '36637049', 'ext': 'mp4', 'title': 'Gloria, Gloria', }, 'params': { 'skip_download': True, }, 'add_ie': ['TVPEmbed'], }, { 'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312', 'only_matching': True, }] def _entries(self, display_id, playlist_id): url = 'https://vod.tvp.pl/website/%s,%s/video' % (display_id, playlist_id) for page_num in itertools.count(1): page = self._download_webpage( url, display_id, 'Downloading page %d' % page_num, query={'page': page_num}) video_ids = orderedSet(re.findall( r'<a[^>]+\bhref=["\']/video/%s,[^,]+,(\d+)' % display_id, page)) if not video_ids: break for video_id in video_ids: yield self.url_result( 'tvp:%s' % video_id, ie=TVPEmbedIE.ie_key(), video_id=video_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id, playlist_id = mobj.group('display_id', 'id') return self.playlist_result( self._entries(display_id, playlist_id), playlist_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/dplay.py
youtube_dl/extractor/dplay.py
# coding: utf-8 from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, strip_or_none, unified_timestamp, ) class DPlayIE(InfoExtractor): _PATH_REGEX = r'/(?P<id>[^/]+/[^/?#]+)' _VALID_URL = r'''(?x)https?:// (?P<domain> (?:www\.)?(?P<host>d (?: play\.(?P<country>dk|fi|jp|se|no)| iscoveryplus\.(?P<plus_country>dk|es|fi|it|se|no) ) )| (?P<subdomain_country>es|it)\.dplay\.com )/[^/]+''' + _PATH_REGEX _TESTS = [{ # non geo restricted, via secure api, unsigned download hls URL 'url': 'https://www.dplay.se/videos/nugammalt-77-handelser-som-format-sverige/nugammalt-77-handelser-som-format-sverige-101', 'info_dict': { 'id': '13628', 'display_id': 'nugammalt-77-handelser-som-format-sverige/nugammalt-77-handelser-som-format-sverige-101', 'ext': 'mp4', 'title': 'Svensken lär sig njuta av livet', 'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8', 'duration': 2649.856, 'timestamp': 1365453720, 'upload_date': '20130408', 'creator': 'Kanal 5', 'series': 'Nugammalt - 77 händelser som format Sverige', 'season_number': 1, 'episode_number': 1, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { # geo restricted, via secure api, unsigned download hls URL 'url': 'http://www.dplay.dk/videoer/ted-bundy-mind-of-a-monster/ted-bundy-mind-of-a-monster', 'info_dict': { 'id': '104465', 'display_id': 'ted-bundy-mind-of-a-monster/ted-bundy-mind-of-a-monster', 'ext': 'mp4', 'title': 'Ted Bundy: Mind Of A Monster', 'description': 'md5:8b780f6f18de4dae631668b8a9637995', 'duration': 5290.027, 'timestamp': 1570694400, 'upload_date': '20191010', 'creator': 'ID - Investigation Discovery', 'series': 'Ted Bundy: Mind Of A Monster', 'season_number': 1, 'episode_number': 1, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, }, { # disco-api 'url': 'https://www.dplay.no/videoer/i-kongens-klr/sesong-1-episode-7', 'info_dict': { 'id': '40206', 'display_id': 'i-kongens-klr/sesong-1-episode-7', 'ext': 'mp4', 'title': 'Episode 7', 'description': 'md5:e3e1411b2b9aebeea36a6ec5d50c60cf', 'duration': 2611.16, 'timestamp': 1516726800, 'upload_date': '20180123', 'series': 'I kongens klær', 'season_number': 1, 'episode_number': 7, }, 'params': { 'format': 'bestvideo', 'skip_download': True, }, 'skip': 'Available for Premium users', }, { 'url': 'http://it.dplay.com/nove/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij/', 'md5': '2b808ffb00fc47b884a172ca5d13053c', 'info_dict': { 'id': '6918', 'display_id': 'biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij', 'ext': 'mp4', 'title': 'Luigi Di Maio: la psicosi di Stanislawskij', 'description': 'md5:3c7a4303aef85868f867a26f5cc14813', 'thumbnail': r're:^https?://.*\.jpe?g', 'upload_date': '20160524', 'timestamp': 1464076800, 'series': 'Biografie imbarazzanti', 'season_number': 1, 'episode': 'Episode 1', 'episode_number': 1, }, }, { 'url': 'https://es.dplay.com/dmax/la-fiebre-del-oro/temporada-8-episodio-1/', 'info_dict': { 'id': '21652', 'display_id': 'la-fiebre-del-oro/temporada-8-episodio-1', 'ext': 'mp4', 'title': 'Episodio 1', 'description': 'md5:b9dcff2071086e003737485210675f69', 'thumbnail': r're:^https?://.*\.png', 'upload_date': '20180709', 'timestamp': 1531173540, 'series': 'La fiebre del oro', 'season_number': 8, 'episode': 'Episode 1', 'episode_number': 1, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.dplay.fi/videot/shifting-gears-with-aaron-kaufman/episode-16', 'only_matching': True, }, { 'url': 'https://www.dplay.jp/video/gold-rush/24086', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.se/videos/nugammalt-77-handelser-som-format-sverige/nugammalt-77-handelser-som-format-sverige-101', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.dk/videoer/ted-bundy-mind-of-a-monster/ted-bundy-mind-of-a-monster', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.no/videoer/i-kongens-klr/sesong-1-episode-7', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.it/videos/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.es/videos/la-fiebre-del-oro/temporada-8-episodio-1', 'only_matching': True, }, { 'url': 'https://www.discoveryplus.fi/videot/shifting-gears-with-aaron-kaufman/episode-16', 'only_matching': True, }] def _process_errors(self, e, geo_countries): info = self._parse_json(e.cause.read().decode('utf-8'), None) error = info['errors'][0] error_code = error.get('code') if error_code == 'access.denied.geoblocked': self.raise_geo_restricted(countries=geo_countries) elif error_code in ('access.denied.missingpackage', 'invalid.token'): raise ExtractorError( 'This video is only available for registered users. You may want to use --cookies.', expected=True) raise ExtractorError(info['errors'][0]['detail'], expected=True) def _update_disco_api_headers(self, headers, disco_base, display_id, realm): headers['Authorization'] = 'Bearer ' + self._download_json( disco_base + 'token', display_id, 'Downloading token', query={ 'realm': realm, })['data']['attributes']['token'] def _download_video_playback_info(self, disco_base, video_id, headers): streaming = self._download_json( disco_base + 'playback/videoPlaybackInfo/' + video_id, video_id, headers=headers)['data']['attributes']['streaming'] streaming_list = [] for format_id, format_dict in streaming.items(): streaming_list.append({ 'type': format_id, 'url': format_dict.get('url'), }) return streaming_list def _get_disco_api_info(self, url, display_id, disco_host, realm, country): geo_countries = [country.upper()] self._initialize_geo_bypass({ 'countries': geo_countries, }) disco_base = 'https://%s/' % disco_host headers = { 'Referer': url, } self._update_disco_api_headers(headers, disco_base, display_id, realm) try: video = self._download_json( disco_base + 'content/videos/' + display_id, display_id, headers=headers, query={ 'fields[channel]': 'name', 'fields[image]': 'height,src,width', 'fields[show]': 'name', 'fields[tag]': 'name', 'fields[video]': 'description,episodeNumber,name,publishStart,seasonNumber,videoDuration', 'include': 'images,primaryChannel,show,tags' }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: self._process_errors(e, geo_countries) raise video_id = video['data']['id'] info = video['data']['attributes'] title = info['name'].strip() formats = [] try: streaming = self._download_video_playback_info( disco_base, video_id, headers) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: self._process_errors(e, geo_countries) raise for format_dict in streaming: if not isinstance(format_dict, dict): continue format_url = format_dict.get('url') if not format_url: continue format_id = format_dict.get('type') ext = determine_ext(format_url) if format_id == 'dash' or ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, display_id, mpd_id='dash', fatal=False)) elif format_id == 'hls' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) else: formats.append({ 'url': format_url, 'format_id': format_id, }) self._sort_formats(formats) creator = series = None tags = [] thumbnails = [] included = video.get('included') or [] if isinstance(included, list): for e in included: attributes = e.get('attributes') if not attributes: continue e_type = e.get('type') if e_type == 'channel': creator = attributes.get('name') elif e_type == 'image': src = attributes.get('src') if src: thumbnails.append({ 'url': src, 'width': int_or_none(attributes.get('width')), 'height': int_or_none(attributes.get('height')), }) if e_type == 'show': series = attributes.get('name') elif e_type == 'tag': name = attributes.get('name') if name: tags.append(name) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': strip_or_none(info.get('description')), 'duration': float_or_none(info.get('videoDuration'), 1000), 'timestamp': unified_timestamp(info.get('publishStart')), 'series': series, 'season_number': int_or_none(info.get('seasonNumber')), 'episode_number': int_or_none(info.get('episodeNumber')), 'creator': creator, 'tags': tags, 'thumbnails': thumbnails, 'formats': formats, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') domain = mobj.group('domain').lstrip('www.') country = mobj.group('country') or mobj.group('subdomain_country') or mobj.group('plus_country') host = 'disco-api.' + domain if domain[0] == 'd' else 'eu2-prod.disco-api.com' return self._get_disco_api_info( url, display_id, host, 'dplay' + country, country) class DiscoveryPlusIE(DPlayIE): _VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/video' + DPlayIE._PATH_REGEX _TESTS = [{ 'url': 'https://www.discoveryplus.com/video/property-brothers-forever-home/food-and-family', 'info_dict': { 'id': '1140794', 'display_id': 'property-brothers-forever-home/food-and-family', 'ext': 'mp4', 'title': 'Food and Family', 'description': 'The brothers help a Richmond family expand their single-level home.', 'duration': 2583.113, 'timestamp': 1609304400, 'upload_date': '20201230', 'creator': 'HGTV', 'series': 'Property Brothers: Forever Home', 'season_number': 1, 'episode_number': 1, }, 'skip': 'Available for Premium users', }] def _update_disco_api_headers(self, headers, disco_base, display_id, realm): headers['x-disco-client'] = 'WEB:UNKNOWN:dplus_us:15.0.0' def _download_video_playback_info(self, disco_base, video_id, headers): return self._download_json( disco_base + 'playback/v3/videoPlaybackInfo', video_id, headers=headers, data=json.dumps({ 'deviceInfo': { 'adBlocker': False, }, 'videoId': video_id, 'wisteriaProperties': { 'platform': 'desktop', 'product': 'dplus_us', }, }).encode('utf-8'))['data']['attributes']['streaming'] def _real_extract(self, url): display_id = self._match_id(url) return self._get_disco_api_info( url, display_id, 'us1-prod-direct.discoveryplus.com', 'go', 'us') class HGTVDeIE(DPlayIE): _VALID_URL = r'https?://de\.hgtv\.com/sendungen' + DPlayIE._PATH_REGEX _TESTS = [{ 'url': 'https://de.hgtv.com/sendungen/tiny-house-klein-aber-oho/wer-braucht-schon-eine-toilette/', 'info_dict': { 'id': '151205', 'display_id': 'tiny-house-klein-aber-oho/wer-braucht-schon-eine-toilette', 'ext': 'mp4', 'title': 'Wer braucht schon eine Toilette', 'description': 'md5:05b40a27e7aed2c9172de34d459134e2', 'duration': 1177.024, 'timestamp': 1595705400, 'upload_date': '20200725', 'creator': 'HGTV', 'series': 'Tiny House - klein, aber oho', 'season_number': 3, 'episode_number': 3, }, 'params': { 'format': 'bestvideo', }, }] def _real_extract(self, url): display_id = self._match_id(url) return self._get_disco_api_info( url, display_id, 'eu1-prod.disco-api.com', 'hgtv', 'de')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/soundgasm.py
youtube_dl/extractor/soundgasm.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor class SoundgasmIE(InfoExtractor): IE_NAME = 'soundgasm' _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_-]+)/(?P<display_id>[0-9a-zA-Z_-]+)' _TEST = { 'url': 'http://soundgasm.net/u/ytdl/Piano-sample', 'md5': '010082a2c802c5275bb00030743e75ad', 'info_dict': { 'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9', 'ext': 'm4a', 'title': 'Piano sample', 'description': 'Royalty Free Sample Music', 'uploader': 'ytdl', } } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('display_id') webpage = self._download_webpage(url, display_id) audio_url = self._html_search_regex( r'(?s)m4a\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 'audio URL', group='url') title = self._search_regex( r'<div[^>]+\bclass=["\']jp-title[^>]+>([^<]+)', webpage, 'title', default=display_id) description = self._html_search_regex( (r'(?s)<div[^>]+\bclass=["\']jp-description[^>]+>(.+?)</div>', r'(?s)<li>Description:\s(.*?)<\/li>'), webpage, 'description', fatal=False) audio_id = self._search_regex( r'/([^/]+)\.m4a', audio_url, 'audio id', default=display_id) return { 'id': audio_id, 'display_id': display_id, 'url': audio_url, 'vcodec': 'none', 'title': title, 'description': description, 'uploader': mobj.group('user'), } class SoundgasmProfileIE(InfoExtractor): IE_NAME = 'soundgasm:profile' _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$' _TEST = { 'url': 'http://soundgasm.net/u/ytdl', 'info_dict': { 'id': 'ytdl', }, 'playlist_count': 1, } def _real_extract(self, url): profile_id = self._match_id(url) webpage = self._download_webpage(url, profile_id) entries = [ self.url_result(audio_url, 'Soundgasm') for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)] return self.playlist_result(entries, profile_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/khanacademy.py
youtube_dl/extractor/khanacademy.py
from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, try_get, ) class KhanAcademyBaseIE(InfoExtractor): _VALID_URL_TEMPL = r'https?://(?:www\.)?khanacademy\.org/(?P<id>(?:[^/]+/){%s}%s[^?#/&]+)' def _parse_video(self, video): return { '_type': 'url_transparent', 'url': video['youtubeId'], 'id': video.get('slug'), 'title': video.get('title'), 'thumbnail': video.get('imageUrl') or video.get('thumbnailUrl'), 'duration': int_or_none(video.get('duration')), 'description': video.get('description'), 'ie_key': 'Youtube', } def _real_extract(self, url): display_id = self._match_id(url) component_props = self._parse_json(self._download_json( 'https://www.khanacademy.org/api/internal/graphql', display_id, query={ 'hash': 1604303425, 'variables': json.dumps({ 'path': display_id, 'queryParams': '', }), })['data']['contentJson'], display_id)['componentProps'] return self._parse_component_props(component_props) class KhanAcademyIE(KhanAcademyBaseIE): IE_NAME = 'khanacademy' _VALID_URL = KhanAcademyBaseIE._VALID_URL_TEMPL % ('4', 'v/') _TEST = { 'url': 'https://www.khanacademy.org/computing/computer-science/cryptography/crypt/v/one-time-pad', 'md5': '9c84b7b06f9ebb80d22a5c8dedefb9a0', 'info_dict': { 'id': 'FlIG3TvQCBQ', 'ext': 'mp4', 'title': 'The one-time pad', 'description': 'The perfect cipher', 'duration': 176, 'uploader': 'Brit Cruise', 'uploader_id': 'khanacademy', 'upload_date': '20120411', 'timestamp': 1334170113, 'license': 'cc-by-nc-sa', }, 'add_ie': ['Youtube'], } def _parse_component_props(self, component_props): video = component_props['tutorialPageData']['contentModel'] info = self._parse_video(video) author_names = video.get('authorNames') info.update({ 'uploader': ', '.join(author_names) if author_names else None, 'timestamp': parse_iso8601(video.get('dateAdded')), 'license': video.get('kaUserLicense'), }) return info class KhanAcademyUnitIE(KhanAcademyBaseIE): IE_NAME = 'khanacademy:unit' _VALID_URL = (KhanAcademyBaseIE._VALID_URL_TEMPL % ('2', '')) + '/?(?:[?#&]|$)' _TEST = { 'url': 'https://www.khanacademy.org/computing/computer-science/cryptography', 'info_dict': { 'id': 'cryptography', 'title': 'Cryptography', 'description': 'How have humans protected their secret messages through history? What has changed today?', }, 'playlist_mincount': 31, } def _parse_component_props(self, component_props): curation = component_props['curation'] entries = [] tutorials = try_get(curation, lambda x: x['tabs'][0]['modules'][0]['tutorials'], list) or [] for tutorial_number, tutorial in enumerate(tutorials, 1): chapter_info = { 'chapter': tutorial.get('title'), 'chapter_number': tutorial_number, 'chapter_id': tutorial.get('id'), } for content_item in (tutorial.get('contentItems') or []): if content_item.get('kind') == 'Video': info = self._parse_video(content_item) info.update(chapter_info) entries.append(info) return self.playlist_result( entries, curation.get('unit'), curation.get('title'), curation.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/onet.py
youtube_dl/extractor/onet.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, float_or_none, get_element_by_class, int_or_none, js_to_json, NO_DEFAULT, parse_iso8601, remove_start, strip_or_none, url_basename, ) class OnetBaseIE(InfoExtractor): _URL_BASE_RE = r'https?://(?:(?:www\.)?onet\.tv|onet100\.vod\.pl)/[a-z]/' def _search_mvp_id(self, webpage): return self._search_regex( r'id=(["\'])mvp:(?P<id>.+?)\1', webpage, 'mvp id', group='id') def _extract_from_id(self, video_id, webpage=None): response = self._download_json( 'http://qi.ckm.onetapi.pl/', video_id, query={ 'body[id]': video_id, 'body[jsonrpc]': '2.0', 'body[method]': 'get_asset_detail', 'body[params][ID_Publikacji]': video_id, 'body[params][Service]': 'www.onet.pl', 'content-type': 'application/jsonp', 'x-onet-app': 'player.front.onetapi.pl', }) error = response.get('error') if error: raise ExtractorError( '%s said: %s' % (self.IE_NAME, error['message']), expected=True) video = response['result'].get('0') formats = [] for format_type, formats_dict in video['formats'].items(): if not isinstance(formats_dict, dict): continue for format_id, format_list in formats_dict.items(): if not isinstance(format_list, list): continue for f in format_list: video_url = f.get('url') if not video_url: continue ext = determine_ext(video_url) if format_id.startswith('ism'): formats.extend(self._extract_ism_formats( video_url, video_id, 'mss', fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) elif format_id.startswith('hls'): formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: http_f = { 'url': video_url, 'format_id': format_id, 'abr': float_or_none(f.get('audio_bitrate')), } if format_type == 'audio': http_f['vcodec'] = 'none' else: http_f.update({ 'height': int_or_none(f.get('vertical_resolution')), 'width': int_or_none(f.get('horizontal_resolution')), 'vbr': float_or_none(f.get('video_bitrate')), }) formats.append(http_f) self._sort_formats(formats) meta = video.get('meta', {}) title = (self._og_search_title( webpage, default=None) if webpage else None) or meta['title'] description = (self._og_search_description( webpage, default=None) if webpage else None) or meta.get('description') duration = meta.get('length') or meta.get('lenght') timestamp = parse_iso8601(meta.get('addDate'), ' ') return { 'id': video_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'formats': formats, } class OnetMVPIE(OnetBaseIE): _VALID_URL = r'onetmvp:(?P<id>\d+\.\d+)' _TEST = { 'url': 'onetmvp:381027.1509591944', 'only_matching': True, } def _real_extract(self, url): return self._extract_from_id(self._match_id(url)) class OnetIE(OnetBaseIE): _VALID_URL = OnetBaseIE._URL_BASE_RE + r'[a-z]+/(?P<display_id>[0-9a-z-]+)/(?P<id>[0-9a-z]+)' IE_NAME = 'onet.tv' _TESTS = [{ 'url': 'http://onet.tv/k/openerfestival/open-er-festival-2016-najdziwniejsze-wymagania-gwiazd/qbpyqc', 'md5': '436102770fb095c75b8bb0392d3da9ff', 'info_dict': { 'id': 'qbpyqc', 'display_id': 'open-er-festival-2016-najdziwniejsze-wymagania-gwiazd', 'ext': 'mp4', 'title': 'Open\'er Festival 2016: najdziwniejsze wymagania gwiazd', 'description': 'Trzy samochody, których nigdy nie użyto, prywatne spa, hotel dekorowany czarnym suknem czy nielegalne używki. Organizatorzy koncertów i festiwali muszą stawać przed nie lada wyzwaniem zapraszając gwia...', 'upload_date': '20160705', 'timestamp': 1467721580, }, }, { 'url': 'https://onet100.vod.pl/k/openerfestival/open-er-festival-2016-najdziwniejsze-wymagania-gwiazd/qbpyqc', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id, video_id = mobj.group('display_id', 'id') webpage = self._download_webpage(url, display_id) mvp_id = self._search_mvp_id(webpage) info_dict = self._extract_from_id(mvp_id, webpage) info_dict.update({ 'id': video_id, 'display_id': display_id, }) return info_dict class OnetChannelIE(OnetBaseIE): _VALID_URL = OnetBaseIE._URL_BASE_RE + r'(?P<id>[a-z]+)(?:[?#]|$)' IE_NAME = 'onet.tv:channel' _TESTS = [{ 'url': 'http://onet.tv/k/openerfestival', 'info_dict': { 'id': 'openerfestival', 'title': "Open'er Festival", 'description': "Tak było na Open'er Festival 2016! Oglądaj nasze reportaże i wywiady z artystami.", }, 'playlist_mincount': 35, }, { 'url': 'https://onet100.vod.pl/k/openerfestival', 'only_matching': True, }] def _real_extract(self, url): channel_id = self._match_id(url) webpage = self._download_webpage(url, channel_id) current_clip_info = self._parse_json(self._search_regex( r'var\s+currentClip\s*=\s*({[^}]+})', webpage, 'video info'), channel_id, transform_source=lambda s: js_to_json(re.sub(r'\'\s*\+\s*\'', '', s))) video_id = remove_start(current_clip_info['ckmId'], 'mvp:') video_name = url_basename(current_clip_info['url']) if self._downloader.params.get('noplaylist'): self.to_screen( 'Downloading just video %s because of --no-playlist' % video_name) return self._extract_from_id(video_id, webpage) self.to_screen( 'Downloading channel %s - add --no-playlist to just download video %s' % ( channel_id, video_name)) matches = re.findall( r'<a[^>]+href=[\'"](%s[a-z]+/[0-9a-z-]+/[0-9a-z]+)' % self._URL_BASE_RE, webpage) entries = [ self.url_result(video_link, OnetIE.ie_key()) for video_link in matches] channel_title = strip_or_none(get_element_by_class('o_channelName', webpage)) channel_description = strip_or_none(get_element_by_class('o_channelDesc', webpage)) return self.playlist_result(entries, channel_id, channel_title, channel_description) class OnetPlIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?(?:onet|businessinsider\.com|plejada)\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)' IE_NAME = 'onet.pl' _TESTS = [{ 'url': 'http://eurosport.onet.pl/zimowe/skoki-narciarskie/ziobro-wygral-kwalifikacje-w-pjongczangu/9ckrly', 'md5': 'b94021eb56214c3969380388b6e73cb0', 'info_dict': { 'id': '1561707.1685479', 'ext': 'mp4', 'title': 'Ziobro wygrał kwalifikacje w Pjongczangu', 'description': 'md5:61fb0740084d2d702ea96512a03585b4', 'upload_date': '20170214', 'timestamp': 1487078046, }, }, { # embedded via pulsembed 'url': 'http://film.onet.pl/pensjonat-nad-rozlewiskiem-relacja-z-planu-serialu/y428n0', 'info_dict': { 'id': '501235.965429946', 'ext': 'mp4', 'title': '"Pensjonat nad rozlewiskiem": relacja z planu serialu', 'upload_date': '20170622', 'timestamp': 1498159955, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://film.onet.pl/zwiastuny/ghost-in-the-shell-drugi-zwiastun-pl/5q6yl3', 'only_matching': True, }, { 'url': 'http://moto.onet.pl/jak-wybierane-sa-miejsca-na-fotoradary/6rs04e', 'only_matching': True, }, { 'url': 'http://businessinsider.com.pl/wideo/scenariusz-na-koniec-swiata-wedlug-nasa/dwnqptk', 'only_matching': True, }, { 'url': 'http://plejada.pl/weronika-rosati-o-swoim-domniemanym-slubie/n2bq89', 'only_matching': True, }] def _search_mvp_id(self, webpage, default=NO_DEFAULT): return self._search_regex( r'data-(?:params-)?mvp=["\'](\d+\.\d+)', webpage, 'mvp id', default=default) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) mvp_id = self._search_mvp_id(webpage, default=None) if not mvp_id: pulsembed_url = self._search_regex( r'data-src=(["\'])(?P<url>(?:https?:)?//pulsembed\.eu/.+?)\1', webpage, 'pulsembed url', group='url') webpage = self._download_webpage( pulsembed_url, video_id, 'Downloading pulsembed webpage') mvp_id = self._search_mvp_id(webpage) return self.url_result( 'onetmvp:%s' % mvp_id, OnetMVPIE.ie_key(), video_id=mvp_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/restudy.py
youtube_dl/extractor/restudy.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class RestudyIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|portal)\.)?restudy\.dk/video/[^/]+/id/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.restudy.dk/video/play/id/1637', 'info_dict': { 'id': '1637', 'ext': 'flv', 'title': 'Leiden-frosteffekt', 'description': 'Denne video er et eksperiment med flydende kvælstof.', }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'https://portal.restudy.dk/video/leiden-frosteffekt/id/1637', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage).strip() description = self._og_search_description(webpage).strip() formats = self._extract_smil_formats( 'https://cdn.portal.restudy.dk/dynamic/themes/front/awsmedia/SmilDirectory/video_%s.xml' % video_id, video_id) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/nhl.py
youtube_dl/extractor/nhl.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, int_or_none, parse_iso8601, parse_duration, ) class NHLBaseIE(InfoExtractor): def _real_extract(self, url): site, tmp_id = re.match(self._VALID_URL, url).groups() video_data = self._download_json( 'https://%s/%s/%sid/v1/%s/details/web-v1.json' % (self._CONTENT_DOMAIN, site[:3], 'item/' if site == 'mlb' else '', tmp_id), tmp_id) if video_data.get('type') != 'video': video_data = video_data['media'] video = video_data.get('video') if video: video_data = video else: videos = video_data.get('videos') if videos: video_data = videos[0] video_id = compat_str(video_data['id']) title = video_data['title'] formats = [] for playback in video_data.get('playbacks', []): playback_url = playback.get('url') if not playback_url: continue ext = determine_ext(playback_url) if ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( playback_url, video_id, 'mp4', 'm3u8_native', m3u8_id=playback.get('name', 'hls'), fatal=False) self._check_formats(m3u8_formats, video_id) formats.extend(m3u8_formats) else: height = int_or_none(playback.get('height')) formats.append({ 'format_id': playback.get('name', 'http' + ('-%dp' % height if height else '')), 'url': playback_url, 'width': int_or_none(playback.get('width')), 'height': height, 'tbr': int_or_none(self._search_regex(r'_(\d+)[kK]', playback_url, 'bitrate', default=None)), }) self._sort_formats(formats) thumbnails = [] cuts = video_data.get('image', {}).get('cuts') or [] if isinstance(cuts, dict): cuts = cuts.values() for thumbnail_data in cuts: thumbnail_url = thumbnail_data.get('src') if not thumbnail_url: continue thumbnails.append({ 'url': thumbnail_url, 'width': int_or_none(thumbnail_data.get('width')), 'height': int_or_none(thumbnail_data.get('height')), }) return { 'id': video_id, 'title': title, 'description': video_data.get('description'), 'timestamp': parse_iso8601(video_data.get('date')), 'duration': parse_duration(video_data.get('duration')), 'thumbnails': thumbnails, 'formats': formats, } class NHLIE(NHLBaseIE): IE_NAME = 'nhl.com' _VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)' _CONTENT_DOMAIN = 'nhl.bamcontent.com' _TESTS = [{ # type=video 'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503', 'md5': '0f7b9a8f986fb4b4eeeece9a56416eaf', 'info_dict': { 'id': '43663503', 'ext': 'mp4', 'title': 'Anisimov cleans up mess', 'description': 'md5:a02354acdfe900e940ce40706939ca63', 'timestamp': 1461288600, 'upload_date': '20160422', }, }, { # type=article 'url': 'https://www.nhl.com/news/dennis-wideman-suspended/c-278258934', 'md5': '1f39f4ea74c1394dea110699a25b366c', 'info_dict': { 'id': '40784403', 'ext': 'mp4', 'title': 'Wideman suspended by NHL', 'description': 'Flames defenseman Dennis Wideman was banned 20 games for violation of Rule 40 (Physical Abuse of Officials)', 'upload_date': '20160204', 'timestamp': 1454544904, }, }, { # Some m3u8 URLs are invalid (https://github.com/ytdl-org/youtube-dl/issues/10713) 'url': 'https://www.nhl.com/predators/video/poile-laviolette-on-subban-trade/t-277437416/c-44315003', 'md5': '50b2bb47f405121484dda3ccbea25459', 'info_dict': { 'id': '44315003', 'ext': 'mp4', 'title': 'Poile, Laviolette on Subban trade', 'description': 'General manager David Poile and head coach Peter Laviolette share their thoughts on acquiring P.K. Subban from Montreal (06/29/16)', 'timestamp': 1467242866, 'upload_date': '20160629', }, }, { 'url': 'https://www.wch2016.com/video/caneur-best-of-game-2-micd-up/t-281230378/c-44983703', 'only_matching': True, }, { 'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068', 'only_matching': True, }]
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/viewlift.py
youtube_dl/extractor/viewlift.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( ExtractorError, int_or_none, parse_age_limit, ) class ViewLiftBaseIE(InfoExtractor): _API_BASE = 'https://prod-api.viewlift.com/' _DOMAINS_REGEX = r'(?:(?:main\.)?snagfilms|snagxtreme|funnyforfree|kiddovid|winnersview|(?:monumental|lax)sportsnetwork|vayafilm|failarmy|ftfnext|lnppass\.legapallacanestro|moviespree|app\.myoutdoortv|neoufitness|pflmma|theidentitytb)\.com|(?:hoichoi|app\.horseandcountry|kronon|marquee|supercrosslive)\.tv' _SITE_MAP = { 'ftfnext': 'lax', 'funnyforfree': 'snagfilms', 'hoichoi': 'hoichoitv', 'kiddovid': 'snagfilms', 'laxsportsnetwork': 'lax', 'legapallacanestro': 'lnp', 'marquee': 'marquee-tv', 'monumentalsportsnetwork': 'monumental-network', 'moviespree': 'bingeflix', 'pflmma': 'pfl', 'snagxtreme': 'snagfilms', 'theidentitytb': 'tampabay', 'vayafilm': 'snagfilms', } _TOKENS = {} def _call_api(self, site, path, video_id, query): token = self._TOKENS.get(site) if not token: token_query = {'site': site} email, password = self._get_login_info(netrc_machine=site) if email: resp = self._download_json( self._API_BASE + 'identity/signin', video_id, 'Logging in', query=token_query, data=json.dumps({ 'email': email, 'password': password, }).encode()) else: resp = self._download_json( self._API_BASE + 'identity/anonymous-token', video_id, 'Downloading authorization token', query=token_query) self._TOKENS[site] = token = resp['authorizationToken'] return self._download_json( self._API_BASE + path, video_id, headers={'Authorization': token}, query=query) class ViewLiftEmbedIE(ViewLiftBaseIE): IE_NAME = 'viewlift:embed' _VALID_URL = r'https?://(?:(?:www|embed)\.)?(?P<domain>%s)/embed/player\?.*\bfilmId=(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})' % ViewLiftBaseIE._DOMAINS_REGEX _TESTS = [{ 'url': 'http://embed.snagfilms.com/embed/player?filmId=74849a00-85a9-11e1-9660-123139220831&w=500', 'md5': '2924e9215c6eff7a55ed35b72276bd93', 'info_dict': { 'id': '74849a00-85a9-11e1-9660-123139220831', 'ext': 'mp4', 'title': '#whilewewatch', 'description': 'md5:b542bef32a6f657dadd0df06e26fb0c8', 'timestamp': 1334350096, 'upload_date': '20120413', } }, { # invalid labels, 360p is better that 480p 'url': 'http://www.snagfilms.com/embed/player?filmId=17ca0950-a74a-11e0-a92a-0026bb61d036', 'md5': '882fca19b9eb27ef865efeeaed376a48', 'info_dict': { 'id': '17ca0950-a74a-11e0-a92a-0026bb61d036', 'ext': 'mp4', 'title': 'Life in Limbo', }, 'skip': 'The video does not exist', }, { 'url': 'http://www.snagfilms.com/embed/player?filmId=0000014c-de2f-d5d6-abcf-ffef58af0017', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:embed\.)?(?:%s)/embed/player.+?)\1' % ViewLiftBaseIE._DOMAINS_REGEX, webpage) if mobj: return mobj.group('url') def _real_extract(self, url): domain, film_id = re.match(self._VALID_URL, url).groups() site = domain.split('.')[-2] if site in self._SITE_MAP: site = self._SITE_MAP[site] try: content_data = self._call_api( site, 'entitlement/video/status', film_id, { 'id': film_id })['video'] except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: error_message = self._parse_json(e.cause.read().decode(), film_id).get('errorMessage') if error_message == 'User does not have a valid subscription or has not purchased this content.': self.raise_login_required() raise ExtractorError(error_message, expected=True) raise gist = content_data['gist'] title = gist['title'] video_assets = content_data['streamingInfo']['videoAssets'] formats = [] mpeg_video_assets = video_assets.get('mpeg') or [] for video_asset in mpeg_video_assets: video_asset_url = video_asset.get('url') if not video_asset: continue bitrate = int_or_none(video_asset.get('bitrate')) height = int_or_none(self._search_regex( r'^_?(\d+)[pP]$', video_asset.get('renditionValue'), 'height', default=None)) formats.append({ 'url': video_asset_url, 'format_id': 'http%s' % ('-%d' % bitrate if bitrate else ''), 'tbr': bitrate, 'height': height, 'vcodec': video_asset.get('codec'), }) hls_url = video_assets.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( hls_url, film_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._sort_formats(formats, ('height', 'tbr', 'format_id')) info = { 'id': film_id, 'title': title, 'description': gist.get('description'), 'thumbnail': gist.get('videoImageUrl'), 'duration': int_or_none(gist.get('runtime')), 'age_limit': parse_age_limit(content_data.get('parentalRating')), 'timestamp': int_or_none(gist.get('publishDate'), 1000), 'formats': formats, } for k in ('categories', 'tags'): info[k] = [v['title'] for v in content_data.get(k, []) if v.get('title')] return info class ViewLiftIE(ViewLiftBaseIE): IE_NAME = 'viewlift' _VALID_URL = r'https?://(?:www\.)?(?P<domain>%s)(?P<path>(?:/(?:films/title|show|(?:news/)?videos?|watch))?/(?P<id>[^?#]+))' % ViewLiftBaseIE._DOMAINS_REGEX _TESTS = [{ 'url': 'http://www.snagfilms.com/films/title/lost_for_life', 'md5': '19844f897b35af219773fd63bdec2942', 'info_dict': { 'id': '0000014c-de2f-d5d6-abcf-ffef58af0017', 'display_id': 'lost_for_life', 'ext': 'mp4', 'title': 'Lost for Life', 'description': 'md5:ea10b5a50405ae1f7b5269a6ec594102', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 4489, 'categories': 'mincount:3', 'age_limit': 14, 'upload_date': '20150421', 'timestamp': 1429656820, } }, { 'url': 'http://www.snagfilms.com/show/the_world_cut_project/india', 'md5': 'e6292e5b837642bbda82d7f8bf3fbdfd', 'info_dict': { 'id': '00000145-d75c-d96e-a9c7-ff5c67b20000', 'display_id': 'the_world_cut_project/india', 'ext': 'mp4', 'title': 'India', 'description': 'md5:5c168c5a8f4719c146aad2e0dfac6f5f', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 979, 'timestamp': 1399478279, 'upload_date': '20140507', } }, { 'url': 'http://main.snagfilms.com/augie_alone/s_2_ep_12_love', 'info_dict': { 'id': '00000148-7b53-de26-a9fb-fbf306f70020', 'display_id': 'augie_alone/s_2_ep_12_love', 'ext': 'mp4', 'title': 'S. 2 Ep. 12 - Love', 'description': 'Augie finds love.', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 107, 'upload_date': '20141012', 'timestamp': 1413129540, 'age_limit': 17, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://main.snagfilms.com/films/title/the_freebie', 'only_matching': True, }, { # Film is not playable in your area. 'url': 'http://www.snagfilms.com/films/title/inside_mecca', 'only_matching': True, }, { # Film is not available. 'url': 'http://www.snagfilms.com/show/augie_alone/flirting', 'only_matching': True, }, { 'url': 'http://www.winnersview.com/videos/the-good-son', 'only_matching': True, }, { # Was once Kaltura embed 'url': 'https://www.monumentalsportsnetwork.com/videos/john-carlson-postgame-2-25-15', 'only_matching': True, }, { 'url': 'https://www.marquee.tv/watch/sadlerswells-sacredmonsters', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ViewLiftEmbedIE.suitable(url) else super(ViewLiftIE, cls).suitable(url) def _real_extract(self, url): domain, path, display_id = re.match(self._VALID_URL, url).groups() site = domain.split('.')[-2] if site in self._SITE_MAP: site = self._SITE_MAP[site] modules = self._call_api( site, 'content/pages', display_id, { 'includeContent': 'true', 'moduleOffset': 1, 'path': path, 'site': site, })['modules'] film_id = next(m['contentData'][0]['gist']['id'] for m in modules if m.get('moduleType') == 'VideoDetailModule') return { '_type': 'url_transparent', 'url': 'http://%s/embed/player?filmId=%s' % (domain, film_id), 'id': film_id, 'display_id': display_id, 'ie_key': 'ViewLiftEmbed', }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/showroomlive.py
youtube_dl/extractor/showroomlive.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_str from ..utils import ( ExtractorError, int_or_none, urljoin, ) class ShowRoomLiveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?showroom-live\.com/(?!onlive|timetable|event|campaign|news|ranking|room)(?P<id>[^/?#&]+)' _TEST = { 'url': 'https://www.showroom-live.com/48_Nana_Okada', 'only_matching': True, } def _real_extract(self, url): broadcaster_id = self._match_id(url) webpage = self._download_webpage(url, broadcaster_id) room_id = self._search_regex( (r'SrGlobal\.roomId\s*=\s*(\d+)', r'(?:profile|room)\?room_id\=(\d+)'), webpage, 'room_id') room = self._download_json( urljoin(url, '/api/room/profile?room_id=%s' % room_id), broadcaster_id) is_live = room.get('is_onlive') if is_live is not True: raise ExtractorError('%s is offline' % broadcaster_id, expected=True) uploader = room.get('performer_name') or broadcaster_id title = room.get('room_name') or room.get('main_name') or uploader streaming_url_list = self._download_json( urljoin(url, '/api/live/streaming_url?room_id=%s' % room_id), broadcaster_id)['streaming_url_list'] formats = [] for stream in streaming_url_list: stream_url = stream.get('url') if not stream_url: continue stream_type = stream.get('type') if stream_type == 'hls': m3u8_formats = self._extract_m3u8_formats( stream_url, broadcaster_id, ext='mp4', m3u8_id='hls', live=True) for f in m3u8_formats: f['quality'] = int_or_none(stream.get('quality', 100)) formats.extend(m3u8_formats) elif stream_type == 'rtmp': stream_name = stream.get('stream_name') if not stream_name: continue formats.append({ 'url': stream_url, 'play_path': stream_name, 'page_url': url, 'player_url': 'https://www.showroom-live.com/assets/swf/v3/ShowRoomLive.swf', 'rtmp_live': True, 'ext': 'flv', 'format_id': 'rtmp', 'format_note': stream.get('label'), 'quality': int_or_none(stream.get('quality', 100)), }) self._sort_formats(formats) return { 'id': compat_str(room.get('live_id') or broadcaster_id), 'title': self._live_title(title), 'description': room.get('description'), 'timestamp': int_or_none(room.get('current_live_started_at')), 'uploader': uploader, 'uploader_id': broadcaster_id, 'view_count': int_or_none(room.get('view_num')), 'formats': formats, 'is_live': True, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false