repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fc2.py
yt_dlp/extractor/fc2.py
import re import urllib.parse from .common import InfoExtractor from ..networking import Request from ..utils import ( ExtractorError, UserNotLive, js_to_json, traverse_obj, update_url_query, urlencode_postdata, urljoin, ) class FC2IE(InfoExtractor): _VALID_URL = r'(?:https?://video\.fc2\.com/(?:[^/]+/)*content/|fc2:)(?P<id>[^/]+)' IE_NAME = 'fc2' _NETRC_MACHINE = 'fc2' _TESTS = [{ 'url': 'http://video.fc2.com/en/content/20121103kUan1KHs', 'md5': 'a6ebe8ebe0396518689d963774a54eb7', 'info_dict': { 'id': '20121103kUan1KHs', 'title': 'Boxing again with Puff', 'ext': 'mp4', 'thumbnail': r're:https?://.+\.jpe?g', }, 'params': { 'skip_download': 'm3u8', }, }, { # Direct video url 'url': 'https://video.fc2.com/content/20121209FP73fxDx', 'md5': '066bdb9b3a56a97f49cbf0d0b8a75a1f', 'info_dict': { 'id': '20121209FP73fxDx', 'title': 'Farewelling The Wiggles Live in Sydney Dec 8 2012', 'ext': 'mp4', 'thumbnail': r're:https?://.+\.jpe?g', 'description': 'Saying goodbye to the Wiggles at their Celebration Concert in Sydney, and what a concert that was!', }, }, { 'url': 'http://video.fc2.com/en/content/20150125cEva0hDn/', 'info_dict': { 'id': '20150125cEva0hDn', 'ext': 'mp4', }, 'params': { 'username': 'ytdl@yt-dl.org', 'password': '(snip)', }, 'skip': 'requires actual password', }, { 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF', 'only_matching': True, }] def _login(self): username, password = self._get_login_info() if username is None or password is None: return False # Log in login_form_strs = { 'email': username, 'password': password, 'done': 'video', 'Submit': ' Login ', } login_data = urlencode_postdata(login_form_strs) request = Request( 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in') if 'mode=redirect&login=done' not in login_results: self.report_warning('unable to log in: bad username or password') return False # this is also needed login_redir = Request('http://id.fc2.com/?mode=redirect&login=done') self._download_webpage( login_redir, None, note='Login redirect', errnote='Login redirect failed') return True def _real_extract(self, url): video_id = self._match_id(url) self._login() webpage = None if not url.startswith('fc2:'): webpage = self._download_webpage(url, video_id) self.cookiejar.clear_session_cookies() # must clear self._login() title, thumbnail, description = None, None, None if webpage is not None: title = self._html_search_regex( (r'<h2\s+class="videoCnt_title">([^<]+?)</h2>', r'\s+href="[^"]+"\s*title="([^"]+?)"\s*rel="nofollow">\s*<img', # there's two matches in the webpage r'\s+href="[^"]+"\s*title="([^"]+?)"\s*rel="nofollow">\s*\1'), webpage, 'title', fatal=False) thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage, default=None) vidplaylist = self._download_json( f'https://video.fc2.com/api/v3/videoplaylist/{video_id}?sh=1&fs=0', video_id, note='Downloading info page') vid_url = traverse_obj(vidplaylist, ('playlist', 'nq')) if not vid_url: raise ExtractorError('Unable to extract video URL') vid_url = urljoin('https://video.fc2.com/', vid_url) return { 'id': video_id, 'title': title, 'url': vid_url, 'ext': 'mp4', 'protocol': 'm3u8_native' if vidplaylist.get('type') == 2 else 'https', 'description': description, 'thumbnail': thumbnail, } class FC2EmbedIE(InfoExtractor): _VALID_URL = r'https?://video\.fc2\.com/flv2\.swf\?(?P<query>.+)' IE_NAME = 'fc2:embed' _TEST = { 'url': 'http://video.fc2.com/flv2.swf?t=201404182936758512407645&i=20130316kwishtfitaknmcgd76kjd864hso93htfjcnaogz629mcgfs6rbfk0hsycma7shkf85937cbchfygd74&i=201403223kCqB3Ez&d=2625&sj=11&lang=ja&rel=1&from=11&cmt=1&tk=TlRBM09EQTNNekU9&tl=プリズン・ブレイク%20S1-01%20マイケル%20【吹替】', 'md5': 'b8aae5334cb691bdb1193a88a6ab5d5a', 'info_dict': { 'id': '201403223kCqB3Ez', 'ext': 'flv', 'title': 'プリズン・ブレイク S1-01 マイケル 【吹替】', 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): mobj = self._match_valid_url(url) query = urllib.parse.parse_qs(mobj.group('query')) video_id = query['i'][-1] title = query.get('tl', [f'FC2 video {video_id}'])[0] sj = query.get('sj', [None])[0] thumbnail = None if sj: # See thumbnailImagePath() in ServerConst.as of flv2.swf thumbnail = 'http://video{}-thumbnail.fc2.com/up/pic/{}.jpg'.format( sj, '/'.join((video_id[:6], video_id[6:8], video_id[-2], video_id[-1], video_id))) return { '_type': 'url_transparent', 'ie_key': FC2IE.ie_key(), 'url': f'fc2:{video_id}', 'title': title, 'thumbnail': thumbnail, } class FC2LiveIE(InfoExtractor): _VALID_URL = r'https?://live\.fc2\.com/(?P<id>\d+)' IE_NAME = 'fc2:live' _TESTS = [{ 'url': 'https://live.fc2.com/57892267/', 'info_dict': { 'id': '57892267', 'title': 'どこまで・・・', 'uploader': 'あつあげ', 'uploader_id': '57892267', 'thumbnail': r're:https?://.+fc2.+', }, 'skip': 'livestream', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'https://live.fc2.com/{video_id}/', video_id) self._set_cookie('live.fc2.com', 'js-player_size', '1') member_api = self._download_json( 'https://live.fc2.com/api/memberApi.php', video_id, data=urlencode_postdata({ 'channel': '1', 'profile': '1', 'user': '1', 'streamid': video_id, }), note='Requesting member info') control_server = self._download_json( 'https://live.fc2.com/api/getControlServer.php', video_id, note='Downloading ControlServer data', data=urlencode_postdata({ 'channel_id': video_id, 'mode': 'play', 'orz': '', 'channel_version': member_api['data']['channel_data']['version'], 'client_version': '2.1.0\n [1]', 'client_type': 'pc', 'client_app': 'browser_hls', 'ipv6': '', }), headers={'X-Requested-With': 'XMLHttpRequest'}) # A non-zero 'status' indicates the stream is not live, so check truthiness if traverse_obj(control_server, ('status', {int})) and 'control_token' not in control_server: raise UserNotLive(video_id=video_id) self._set_cookie('live.fc2.com', 'l_ortkn', control_server['orz_raw']) ws_url = update_url_query(control_server['url'], {'control_token': control_server['control_token']}) playlist_data = None ws = self._request_webpage(Request(ws_url, headers={ 'Origin': 'https://live.fc2.com', }), video_id, note='Fetching HLS playlist info via WebSocket') self.write_debug('Sending HLS server request') while True: recv = ws.recv() if not recv: continue data = self._parse_json(recv, video_id, fatal=False) if not data or not isinstance(data, dict): continue if data.get('name') == 'connect_complete': break ws.send(r'{"name":"get_hls_information","arguments":{},"id":1}') while True: recv = ws.recv() if not recv: continue data = self._parse_json(recv, video_id, fatal=False) if not data or not isinstance(data, dict): continue if data.get('name') == '_response_' and data.get('id') == 1: self.write_debug('Goodbye') playlist_data = data break self.write_debug('Server said: {}{}'.format(recv[:100], '...' if len(recv) > 100 else '')) if not playlist_data: raise ExtractorError('Unable to fetch HLS playlist info via WebSocket') formats = [] for name, playlists in playlist_data['arguments'].items(): if not isinstance(playlists, list): continue for pl in playlists: if pl.get('status') == 0 and 'master_playlist' in pl.get('url'): formats.extend(self._extract_m3u8_formats( pl['url'], video_id, ext='mp4', m3u8_id=name, live=True, headers={ 'Origin': 'https://live.fc2.com', 'Referer': url, })) for fmt in formats: fmt.update({ 'protocol': 'fc2_live', 'ws': ws, }) title = self._html_search_meta(('og:title', 'twitter:title'), webpage, 'live title', fatal=False) if not title: title = self._html_extract_title(webpage, 'html title', fatal=False) if title: # remove service name in <title> title = re.sub(r'\s+-\s+.+$', '', title) uploader = None if title: match = self._search_regex(r'^(.+?)\s*\[(.+?)\]$', title, 'title and uploader', default=None, group=(1, 2)) if match and all(match): title, uploader = match live_info_view = self._search_regex(r'(?s)liveInfoView\s*:\s*({.+?}),\s*premiumStateView', webpage, 'user info', fatal=False) or None if live_info_view: # remove jQuery code from object literal live_info_view = re.sub(r'\$\(.+?\)[^,]+,', '"",', live_info_view) live_info_view = self._parse_json(js_to_json(live_info_view), video_id) return { 'id': video_id, 'title': title or traverse_obj(live_info_view, 'title'), 'description': self._html_search_meta( ('og:description', 'twitter:description'), webpage, 'live description', fatal=False) or traverse_obj(live_info_view, 'info'), 'formats': formats, 'uploader': uploader or traverse_obj(live_info_view, 'name'), 'uploader_id': video_id, 'thumbnail': traverse_obj(live_info_view, 'thumb'), 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/applepodcasts.py
yt_dlp/extractor/applepodcasts.py
from .common import InfoExtractor from ..utils import ( clean_html, clean_podcast_url, int_or_none, parse_iso8601, ) from ..utils.traversal import traverse_obj class ApplePodcastsIE(InfoExtractor): _VALID_URL = r'https?://podcasts\.apple\.com/(?:[^/]+/)?podcast(?:/[^/]+){1,2}.*?\bi=(?P<id>\d+)' _TESTS = [{ 'url': 'https://podcasts.apple.com/us/podcast/ferreck-dawn-to-the-break-of-dawn-117/id1625658232?i=1000665010654', 'md5': '82cc219b8cc1dcf8bfc5a5e99b23b172', 'info_dict': { 'id': '1000665010654', 'ext': 'mp3', 'title': 'Ferreck Dawn - To The Break of Dawn 117', 'episode': 'Ferreck Dawn - To The Break of Dawn 117', 'description': 'md5:8c4f5c2c30af17ed6a98b0b9daf15b76', 'upload_date': '20240812', 'timestamp': 1723449600, 'duration': 3596, 'series': 'Ferreck Dawn - To The Break of Dawn', 'thumbnail': 're:.+[.](png|jpe?g|webp)', }, }, { 'url': 'https://podcasts.apple.com/us/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777', 'md5': 'baf8a6b8b8aa6062dbb4639ed73d0052', 'info_dict': { 'id': '1000482637777', 'ext': 'mp3', 'title': '207 - Whitney Webb Returns', 'episode': '207 - Whitney Webb Returns', 'episode_number': 207, 'description': 'md5:75ef4316031df7b41ced4e7b987f79c6', 'upload_date': '20200705', 'timestamp': 1593932400, 'duration': 5369, 'series': 'The Tim Dillon Show', 'thumbnail': 're:.+[.](png|jpe?g|webp)', }, }, { 'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777', 'only_matching': True, }, { 'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns?i=1000482637777', 'only_matching': True, }, { 'url': 'https://podcasts.apple.com/podcast/id1135137367?i=1000482637777', 'only_matching': True, }] def _real_extract(self, url): episode_id = self._match_id(url) webpage = self._download_webpage(url, episode_id) server_data = self._search_json( r'<script [^>]*\bid=["\']serialized-server-data["\'][^>]*>', webpage, 'server data', episode_id, contains_pattern=r'\[{(?s:.+)}\]')[0]['data'] model_data = traverse_obj(server_data, ( 'headerButtonItems', lambda _, v: v['$kind'] == 'share' and v['modelType'] == 'EpisodeLockup', 'model', {dict}, any)) return { 'id': episode_id, **self._json_ld( traverse_obj(server_data, ('seoData', 'schemaContent', {dict})) or self._yield_json_ld(webpage, episode_id, fatal=False), episode_id, fatal=False), **traverse_obj(model_data, { 'title': ('title', {str}), 'description': ('summary', {clean_html}), 'url': ('playAction', 'episodeOffer', 'streamUrl', {clean_podcast_url}), 'timestamp': ('releaseDate', {parse_iso8601}), 'duration': ('duration', {int_or_none}), }), 'thumbnail': self._og_search_thumbnail(webpage), 'vcodec': 'none', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pearvideo.py
yt_dlp/extractor/pearvideo.py
import re from .common import InfoExtractor from ..utils import ( qualities, traverse_obj, unified_timestamp, ) class PearVideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pearvideo\.com/video_(?P<id>\d+)' _TEST = { 'url': 'http://www.pearvideo.com/video_1076290', 'info_dict': { 'id': '1076290', 'ext': 'mp4', 'title': '小浣熊在主人家玻璃上滚石头:没砸', 'description': 'md5:01d576b747de71be0ee85eb7cac25f9d', 'timestamp': 1494275280, 'upload_date': '20170508', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) quality = qualities( ('ldflv', 'ld', 'sdflv', 'sd', 'hdflv', 'hd', 'src')) formats = [{ 'url': mobj.group('url'), 'format_id': mobj.group('id'), 'quality': quality(mobj.group('id')), } for mobj in re.finditer( r'(?P<id>[a-zA-Z]+)Url\s*=\s*(["\'])(?P<url>(?:https?:)?//.+?)\2', webpage)] if not formats: info = self._download_json( 'https://www.pearvideo.com/videoStatus.jsp', video_id=video_id, query={'contId': video_id}, headers={'Referer': url}) formats = [{ 'format_id': k, 'url': v.replace(info['systemTime'], f'cont-{video_id}') if k == 'srcUrl' else v, } for k, v in traverse_obj(info, ('videoInfo', 'videos'), default={}).items() if v] title = self._search_regex( (r'<h1[^>]+\bclass=(["\'])video-tt\1[^>]*>(?P<value>[^<]+)', r'<[^>]+\bdata-title=(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'title', group='value') description = self._search_regex( (r'<div[^>]+\bclass=(["\'])summary\1[^>]*>(?P<value>[^<]+)', r'<[^>]+\bdata-summary=(["\'])(?P<value>(?:(?!\1).)+)\1'), webpage, 'description', default=None, group='value') or self._html_search_meta('Description', webpage) timestamp = unified_timestamp(self._search_regex( r'<div[^>]+\bclass=["\']date["\'][^>]*>([^<]+)', webpage, 'timestamp', fatal=False)) return { 'id': video_id, 'title': title, 'description': description, 'timestamp': timestamp, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/twitter.py
yt_dlp/extractor/twitter.py
import functools import json import math import re import urllib.parse from .common import InfoExtractor from .periscope import PeriscopeBaseIE, PeriscopeIE from ..jsinterp import js_number_to_string from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, dict_get, filter_dict, float_or_none, format_field, int_or_none, join_nonempty, make_archive_id, remove_end, str_or_none, strip_or_none, truncate_string, try_call, try_get, unified_timestamp, update_url_query, url_or_none, xpath_text, ) from ..utils.traversal import require, traverse_obj class TwitterBaseIE(InfoExtractor): _API_BASE = 'https://api.x.com/1.1/' _GRAPHQL_API_BASE = 'https://x.com/i/api/graphql/' _BASE_REGEX = r'https?://(?:(?:www|m(?:obile)?)\.)?(?:(?:twitter|x)\.com|twitter3e4tixl4xyajtrzo62zg5vztmjuricljdp2c5kshju4avyoid\.onion)/' _AUTH = 'AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA' _LEGACY_AUTH = 'AAAAAAAAAAAAAAAAAAAAAIK1zgAAAAAA2tUWuhGZ2JceoId5GwYWU5GspY4%3DUq7gzFoCZs1QfwGoVdvSac3IniczZEYXIcDyumCauIXpcAPorE' def _extract_variant_formats(self, variant, video_id): variant_url = variant.get('url') if not variant_url: return [], {} elif '.m3u8' in variant_url: fmts, subs = self._extract_m3u8_formats_and_subtitles( variant_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) for f in traverse_obj(fmts, lambda _, v: v['vcodec'] == 'none' and v.get('tbr') is None): if mobj := re.match(r'hls-[Aa]udio-(?P<bitrate>\d{4,})', f['format_id']): f['tbr'] = int_or_none(mobj.group('bitrate'), 1000) return fmts, subs else: tbr = int_or_none(dict_get(variant, ('bitrate', 'bit_rate')), 1000) or None f = { 'url': variant_url, 'format_id': join_nonempty('http', tbr), 'tbr': tbr, } self._search_dimensions_in_video_url(f, variant_url) return [f], {} def _extract_formats_from_vmap_url(self, vmap_url, video_id): vmap_url = url_or_none(vmap_url) if not vmap_url: return [], {} vmap_data = self._download_xml(vmap_url, video_id) formats = [] subtitles = {} urls = [] for video_variant in vmap_data.findall('.//{http://twitter.com/schema/videoVMapV2.xsd}videoVariant'): video_variant.attrib['url'] = urllib.parse.unquote( video_variant.attrib['url']) urls.append(video_variant.attrib['url']) fmts, subs = self._extract_variant_formats( video_variant.attrib, video_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) video_url = strip_or_none(xpath_text(vmap_data, './/MediaFile')) if video_url not in urls: fmts, subs = self._extract_variant_formats({'url': video_url}, video_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) return formats, subtitles @staticmethod def _search_dimensions_in_video_url(a_format, video_url): m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url) if m: a_format.update({ 'width': int(m.group('width')), 'height': int(m.group('height')), }) @property def is_logged_in(self): return bool(self._get_cookies(self._API_BASE).get('auth_token')) @functools.cached_property def _selected_api(self): return self._configuration_arg('api', ['graphql'], ie_key='Twitter')[0] def _fetch_guest_token(self, display_id): guest_token = traverse_obj(self._download_json( f'{self._API_BASE}guest/activate.json', display_id, 'Downloading guest token', data=b'', headers=self._set_base_headers(legacy=display_id and self._selected_api == 'legacy')), ('guest_token', {str})) if not guest_token: raise ExtractorError('Could not retrieve guest token') return guest_token def _set_base_headers(self, legacy=False): bearer_token = self._LEGACY_AUTH if legacy and not self.is_logged_in else self._AUTH return filter_dict({ 'Authorization': f'Bearer {bearer_token}', 'x-csrf-token': try_call(lambda: self._get_cookies(self._API_BASE)['ct0'].value), }) def _call_api(self, path, video_id, query={}, graphql=False): headers = self._set_base_headers(legacy=not graphql and self._selected_api == 'legacy') headers.update({ 'x-twitter-auth-type': 'OAuth2Session', 'x-twitter-client-language': 'en', 'x-twitter-active-user': 'yes', } if self.is_logged_in else { 'x-guest-token': self._fetch_guest_token(video_id), }) allowed_status = {400, 401, 403, 404} if graphql else {403} result = self._download_json( (self._GRAPHQL_API_BASE if graphql else self._API_BASE) + path, video_id, headers=headers, query=query, expected_status=allowed_status, note=f'Downloading {"GraphQL" if graphql else "legacy API"} JSON') if result.get('errors'): errors = ', '.join(set(traverse_obj(result, ('errors', ..., 'message', {str})))) if errors and 'not authorized' in errors: self.raise_login_required(remove_end(errors, '.')) raise ExtractorError(f'Error(s) while querying API: {errors or "Unknown error"}') return result def _build_graphql_query(self, media_id): raise NotImplementedError('Method must be implemented to support GraphQL') def _call_graphql_api(self, endpoint, media_id): data = self._build_graphql_query(media_id) query = {key: json.dumps(value, separators=(',', ':')) for key, value in data.items()} return traverse_obj(self._call_api(endpoint, media_id, query=query, graphql=True), 'data') class TwitterCardIE(InfoExtractor): IE_NAME = 'twitter:card' _VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/(?:cards/tfw/v1|videos(?:/tweet)?)/(?P<id>\d+)' _TESTS = [ { 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', # MD5 checksums are different in different places 'info_dict': { 'id': '560070131976392705', 'ext': 'mp4', 'title': "Twitter - You can now shoot, edit and share video on Twitter. Capture life's most moving moments from your perspective.", 'description': 'md5:18d3e24bb4f6e5007487dd546e53bd96', 'uploader': 'Twitter', 'uploader_id': 'Twitter', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 30.033, 'timestamp': 1422366112, 'upload_date': '20150127', 'age_limit': 0, 'comment_count': int, 'tags': [], 'repost_count': int, 'like_count': int, 'display_id': '560070183650213889', 'uploader_url': 'https://twitter.com/Twitter', }, 'skip': 'This content is no longer available.', }, { 'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768', 'md5': '7137eca597f72b9abbe61e5ae0161399', 'info_dict': { 'id': '623160978427936768', 'ext': 'mp4', 'title': "NASA - Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASA...", 'description': "Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video. https://t.co/BJYgOjSeGA", 'uploader': 'NASA', 'uploader_id': 'NASA', 'timestamp': 1437408129, 'upload_date': '20150720', 'uploader_url': 'https://twitter.com/NASA', 'age_limit': 0, 'comment_count': int, 'like_count': int, 'repost_count': int, 'tags': ['PlutoFlyby'], 'channel_id': '11348282', '_old_archive_ids': ['twitter 623160978427936768'], }, 'params': {'format': '[protocol=https]'}, }, { 'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977', 'md5': 'fb08fbd69595cbd8818f0b2f2a94474d', 'info_dict': { 'id': 'dq4Oj5quskI', 'ext': 'mp4', 'title': 'Ubuntu 11.10 Overview', 'description': 'md5:a831e97fa384863d6e26ce48d1c43376', 'upload_date': '20111013', 'uploader': 'OMG! UBUNTU!', 'uploader_id': '@omgubuntu', 'channel_url': 'https://www.youtube.com/channel/UCIiSwcm9xiFb3Y4wjzR41eQ', 'channel_id': 'UCIiSwcm9xiFb3Y4wjzR41eQ', 'channel_follower_count': int, 'chapters': 'count:8', 'uploader_url': 'https://www.youtube.com/@omgubuntu', 'duration': 138, 'categories': ['Film & Animation'], 'age_limit': 0, 'comment_count': int, 'availability': 'public', 'like_count': int, 'thumbnail': 'https://i.ytimg.com/vi/dq4Oj5quskI/maxresdefault.jpg', 'view_count': int, 'tags': 'count:12', 'channel': 'OMG! UBUNTU!', 'playable_in_embed': True, 'heatmap': 'count:100', 'timestamp': 1318500227, 'live_status': 'not_live', }, 'add_ie': ['Youtube'], 'skip': 'The page does not exist', }, { 'url': 'https://twitter.com/i/videos/tweet/705235433198714880', 'md5': '884812a2adc8aaf6fe52b15ccbfa3b88', 'info_dict': { 'id': '705235433198714880', 'ext': 'mp4', 'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.", 'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns", 'uploader': 'Brent Yarina', 'uploader_id': 'BTNBrentYarina', 'timestamp': 1456976204, 'upload_date': '20160303', }, 'skip': 'This content is no longer available.', }, { 'url': 'https://twitter.com/i/videos/752274308186120192', 'only_matching': True, }, ] def _real_extract(self, url): status_id = self._match_id(url) return self.url_result( 'https://twitter.com/statuses/' + status_id, TwitterIE.ie_key(), status_id) class TwitterIE(TwitterBaseIE): IE_NAME = 'twitter' _VALID_URL = TwitterBaseIE._BASE_REGEX + r'(?:(?:i/web|[^/]+)/status|statuses)/(?P<id>\d+)(?:/(?:video|photo)/(?P<index>\d+))?' _TESTS = [{ 'url': 'https://twitter.com/freethenipple/status/643211948184596480', 'info_dict': { 'id': '643211870443208704', 'display_id': '643211948184596480', 'ext': 'mp4', 'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ', 'channel_id': '549749560', 'uploader': 'FREE THE NIPPLE', 'uploader_id': 'freethenipple', 'duration': 12.922, 'timestamp': 1442188653, 'upload_date': '20150913', 'uploader_url': 'https://twitter.com/freethenipple', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 18, '_old_archive_ids': ['twitter 643211948184596480'], }, 'skip': 'Requires authentication', }, { 'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1', 'md5': 'f36dcd5fb92bf7057f155e7d927eeb42', 'info_dict': { 'id': '657991469417025536', 'ext': 'mp4', 'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai', 'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"', 'thumbnail': r're:^https?://.*\.png', 'uploader': 'Gifs', 'uploader_id': 'giphz', }, 'expected_warnings': ['height', 'width'], 'skip': 'Account suspended', }, { 'url': 'https://twitter.com/starwars/status/665052190608723968', 'info_dict': { 'id': '665052190608723968', 'display_id': '665052190608723968', 'ext': 'mp4', 'title': r're:Star Wars.*A new beginning is coming December 18.*', 'description': 'A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens. https://t.co/OkSqT2fjWJ', 'channel_id': '20106852', 'uploader_id': 'starwars', 'uploader': r're:Star Wars.*', 'timestamp': 1447395772, 'upload_date': '20151113', 'uploader_url': 'https://twitter.com/starwars', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': ['TV', 'StarWars', 'TheForceAwakens'], 'age_limit': 0, '_old_archive_ids': ['twitter 665052190608723968'], }, }, { 'url': 'https://twitter.com/BTNBrentYarina/status/705235433198714880', 'info_dict': { 'id': '705235433198714880', 'ext': 'mp4', 'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.", 'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns", 'uploader_id': 'BTNBrentYarina', 'uploader': 'Brent Yarina', 'timestamp': 1456976204, 'upload_date': '20160303', 'uploader_url': 'https://twitter.com/BTNBrentYarina', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, }, 'params': { # The same video as https://twitter.com/i/videos/tweet/705235433198714880 # Test case of TwitterCardIE 'skip_download': True, }, 'skip': 'Dead external link', }, { 'url': 'https://twitter.com/jaydingeer/status/700207533655363584', 'info_dict': { 'id': '700207414000242688', 'display_id': '700207533655363584', 'ext': 'mp4', 'title': 'jaydin donte geer - BEAT PROD: @suhmeduh #Damndaniel', 'description': 'BEAT PROD: @suhmeduh https://t.co/HBrQ4AfpvZ #Damndaniel https://t.co/byBooq2ejZ', 'thumbnail': r're:^https?://.*\.jpg', 'channel_id': '1383165541', 'uploader': 'jaydin donte geer', 'uploader_id': 'jaydingeer', 'duration': 30.0, 'timestamp': 1455777459, 'upload_date': '20160218', 'uploader_url': 'https://twitter.com/jaydingeer', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': ['Damndaniel'], 'age_limit': 0, '_old_archive_ids': ['twitter 700207533655363584'], }, 'skip': 'Tweet has been deleted', }, { 'url': 'https://twitter.com/captainamerica/status/719944021058060289', 'info_dict': { 'id': '717462543795523584', 'display_id': '719944021058060289', 'ext': 'mp4', 'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theat...', 'description': '@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI', 'channel_id': '701615052', 'uploader_id': 'CaptainAmerica', 'uploader': 'Captain America', 'duration': 3.17, 'timestamp': 1460483005, 'upload_date': '20160412', 'uploader_url': 'https://twitter.com/CaptainAmerica', 'thumbnail': r're:^https?://.*\.jpg', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, '_old_archive_ids': ['twitter 719944021058060289'], }, }, { 'url': 'https://twitter.com/OPP_HSD/status/779210622571536384', 'info_dict': { 'id': '1zqKVVlkqLaKB', 'ext': 'mp4', 'title': 'Sgt Kerry Schmidt - Ontario Provincial Police - Road rage, mischief, assault, rollover and fire in one occurrence', 'upload_date': '20160923', 'uploader_id': '1PmKqpJdOJQoY', 'uploader': 'Sgt Kerry Schmidt - Ontario Provincial Police', 'timestamp': 1474613214, 'thumbnail': r're:^https?://.*\.jpg', }, 'add_ie': ['Periscope'], 'skip': 'Broadcast not found', }, { # has mp4 formats via mobile API 'url': 'https://twitter.com/news_al3alm/status/852138619213144067', 'info_dict': { 'id': '852077943283097602', 'ext': 'mp4', 'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعا...', 'description': 'كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN', 'channel_id': '2526757026', 'uploader': 'عالم الأخبار', 'uploader_id': 'news_al3alm', 'duration': 277.4, 'timestamp': 1492000653, 'upload_date': '20170412', 'display_id': '852138619213144067', 'age_limit': 0, 'uploader_url': 'https://twitter.com/news_al3alm', 'thumbnail': r're:^https?://.*\.jpg', 'tags': [], 'repost_count': int, 'like_count': int, 'comment_count': int, '_old_archive_ids': ['twitter 852138619213144067'], }, 'skip': 'Suspended', }, { 'url': 'https://twitter.com/i/web/status/910031516746514432', 'info_dict': { 'id': '910030238373089285', 'display_id': '910031516746514432', 'ext': 'mp4', 'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terr...', 'thumbnail': r're:^https?://.*\.jpg', 'description': '[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo', 'channel_id': '2319432498', 'uploader': 'Préfet de Guadeloupe', 'uploader_id': 'Prefet971', 'duration': 47.48, 'timestamp': 1505803395, 'upload_date': '20170919', 'uploader_url': 'https://twitter.com/Prefet971', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': ['Maria'], 'age_limit': 0, '_old_archive_ids': ['twitter 910031516746514432'], }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { # card via api.twitter.com/1.1/videos/tweet/config 'url': 'https://twitter.com/LisPower1/status/1001551623938805763', 'info_dict': { 'id': '1001551417340022785', 'display_id': '1001551623938805763', 'ext': 'mp4', 'title': 're:.*?Shep is on a roll today.*?', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:37b9f2ff31720cef23b2bd42ee8a0f09', 'channel_id': '255036353', 'uploader': 'Lis Power', 'uploader_id': 'LisPower1', 'duration': 111.278, 'timestamp': 1527623489, 'upload_date': '20180529', 'uploader_url': 'https://twitter.com/LisPower1', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, '_old_archive_ids': ['twitter 1001551623938805763'], }, 'params': { 'skip_download': True, # requires ffmpeg }, }, { 'url': 'https://twitter.com/foobar/status/1087791357756956680', 'info_dict': { 'id': '1087791272830607360', 'display_id': '1087791357756956680', 'ext': 'mp4', 'title': 'X - A new is coming. Some of you got an opt-in to try it now. Check out the emoji button, quick keyboard shortcuts, upgraded trends, advanced search, and more. Let us know your thoughts!', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:6dfd341a3310fb97d80d2bf7145df976', 'uploader': 'X', 'uploader_id': 'X', 'duration': 61.567, 'timestamp': 1548184644, 'upload_date': '20190122', 'uploader_url': 'https://twitter.com/X', 'comment_count': int, 'repost_count': int, 'like_count': int, 'view_count': int, 'tags': [], 'age_limit': 0, }, 'skip': 'This Tweet is unavailable', }, { # not available in Periscope 'url': 'https://twitter.com/ViviEducation/status/1136534865145286656', 'info_dict': { 'id': '1vOGwqejwoWxB', 'ext': 'mp4', 'title': 'Vivi - Vivi founder @lior_rauchy announcing our new student feedback tool live at @EduTECH_AU #EduTECH2019', 'uploader': 'Vivi', 'uploader_id': '1eVjYOLGkGrQL', 'thumbnail': r're:^https?://.*\.jpg', 'tags': ['EduTECH2019'], 'view_count': int, }, 'add_ie': ['TwitterBroadcast'], 'skip': 'Broadcast no longer exists', }, { # unified card 'url': 'https://twitter.com/BrooklynNets/status/1349794411333394432?s=20', 'info_dict': { 'id': '1349774757969989634', 'display_id': '1349794411333394432', 'ext': 'mp4', 'title': "Brooklyn Nets - WATCH: Sean Marks' full media session after our acquisition of 8-time...", 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:71ead15ec44cee55071547d6447c6a3e', 'channel_id': '18552281', 'uploader': 'Brooklyn Nets', 'uploader_id': 'BrooklynNets', 'duration': 324.484, 'timestamp': 1610651040, 'upload_date': '20210114', 'uploader_url': 'https://twitter.com/BrooklynNets', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, '_old_archive_ids': ['twitter 1349794411333394432'], }, 'params': { 'skip_download': True, }, }, { 'url': 'https://twitter.com/oshtru/status/1577855540407197696', 'info_dict': { 'id': '1577855447914409984', 'display_id': '1577855540407197696', 'ext': 'mp4', 'title': 'Oshtru - gm ✨️ now I can post image and video. nice update.', 'description': 'md5:b9c3699335447391d11753ab21c70a74', 'upload_date': '20221006', 'channel_id': '143077138', 'uploader': 'Oshtru', 'uploader_id': 'oshtru', 'uploader_url': 'https://twitter.com/oshtru', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 30.03, 'timestamp': 1665025050, 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, '_old_archive_ids': ['twitter 1577855540407197696'], }, 'params': {'skip_download': True}, }, { 'url': 'https://twitter.com/UltimaShadowX/status/1577719286659006464', 'info_dict': { 'id': '1577719286659006464', 'title': r're:Ultima.* - Test$', 'description': 'Test https://t.co/Y3KEZD7Dad', 'channel_id': '168922496', 'uploader': r're:Ultima.*', 'uploader_id': 'UltimaShadowX', 'uploader_url': 'https://twitter.com/UltimaShadowX', 'upload_date': '20221005', 'timestamp': 1664992565, 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': [], 'age_limit': 0, }, 'playlist_count': 4, 'params': {'skip_download': True}, }, { 'url': 'https://twitter.com/MesoMax919/status/1575560063510810624', 'info_dict': { 'id': '1575559336759263233', 'display_id': '1575560063510810624', 'ext': 'mp4', 'title': 'Max Olson - Absolutely heartbreaking footage captured by our surge probe of catas...', 'thumbnail': r're:^https?://.*\.jpg', 'description': 'md5:95aea692fda36a12081b9629b02daa92', 'channel_id': '1094109584', 'uploader': 'Max Olson', 'uploader_id': 'MesoMax919', 'uploader_url': 'https://twitter.com/MesoMax919', 'duration': 21.321, 'timestamp': 1664477766, 'upload_date': '20220929', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': ['HurricaneIan'], 'age_limit': 0, '_old_archive_ids': ['twitter 1575560063510810624'], }, }, { # Adult content, fails if not logged in 'url': 'https://twitter.com/Rizdraws/status/1575199173472927762', 'info_dict': { 'id': '1575199163847000068', 'display_id': '1575199173472927762', 'ext': 'mp4', 'title': str, 'description': str, 'channel_id': '1217167793541480450', 'uploader': str, 'uploader_id': 'Rizdraws', 'uploader_url': 'https://twitter.com/Rizdraws', 'upload_date': '20220928', 'timestamp': 1664391723, 'thumbnail': r're:^https?://.+\.jpg', 'like_count': int, 'repost_count': int, 'comment_count': int, 'age_limit': 18, 'tags': [], '_old_archive_ids': ['twitter 1575199173472927762'], }, 'params': {'skip_download': 'The media could not be played'}, 'skip': 'Requires authentication', }, { # Playlist result only with graphql API 'url': 'https://twitter.com/Srirachachau/status/1395079556562706435', 'playlist_mincount': 2, 'info_dict': { 'id': '1395079556562706435', 'title': str, 'tags': [], 'channel_id': '21539378', 'uploader': str, 'like_count': int, 'upload_date': '20210519', 'age_limit': 0, 'repost_count': int, 'description': 'Here it is! Finished my gothic western cartoon. Pretty proud of it. It\'s got some goofs and lots of splashy over the top violence, something for everyone, hope you like it https://t.co/fOsG5glUnw', 'uploader_id': 'Srirachachau', 'comment_count': int, 'uploader_url': 'https://twitter.com/Srirachachau', 'timestamp': 1621447860, }, }, { 'url': 'https://twitter.com/DavidToons_/status/1578353380363501568', 'playlist_mincount': 2, 'info_dict': { 'id': '1578353380363501568', 'title': str, 'channel_id': '2195866214', 'uploader_id': 'DavidToons_', 'repost_count': int, 'like_count': int, 'uploader': str, 'timestamp': 1665143744, 'uploader_url': 'https://twitter.com/DavidToons_', 'description': 'Chris sounds like Linda from Bob\'s Burgers, so as an animator: this had to be done. https://t.co/WgJauwIW1w', 'tags': [], 'comment_count': int, 'upload_date': '20221007', 'age_limit': 0, }, }, { 'url': 'https://twitter.com/primevideouk/status/1578401165338976258', 'playlist_count': 2, 'info_dict': { 'id': '1578401165338976258', 'title': str, 'description': 'md5:659a6b517a034b4cee5d795381a2dc41', 'channel_id': '19338359', 'uploader': str, 'uploader_id': 'primevideouk', 'timestamp': 1665155137, 'upload_date': '20221007', 'age_limit': 0, 'uploader_url': 'https://twitter.com/primevideouk', 'comment_count': int, 'repost_count': int, 'like_count': int, 'tags': ['TheRingsOfPower'], }, }, { # Twitter Spaces 'url': 'https://twitter.com/MoniqueCamarra/status/1550101959377551360', 'info_dict': { 'id': '1lPJqmBeeNAJb', 'ext': 'm4a', 'title': 'EuroFile@6 Ukraine Up-date-Draghi Defenestration-the West', 'uploader': r're:Monique Camarra.+?', 'uploader_id': 'MoniqueCamarra', 'live_status': 'was_live', 'release_timestamp': 1658417305, 'description': r're:Twitter Space participated by Sergej Sumlenny.+', 'timestamp': 1658407771, 'release_date': '20220721', 'upload_date': '20220721', 'thumbnail': 'https://pbs.twimg.com/profile_images/1920514378006188033/xQs6J_yI_400x400.jpg', }, 'add_ie': ['TwitterSpaces'], 'params': {'skip_download': 'm3u8'}, }, { # URL specifies video number but --yes-playlist 'url': 'https://twitter.com/CTVJLaidlaw/status/1600649710662213632/video/1', 'playlist_mincount': 2, 'info_dict': { 'id': '1600649710662213632', 'title': "Jocelyn Laidlaw - How Kirstie Alley's tragic death inspired me to share more about my c...", 'timestamp': 1670459604.0, 'description': 'md5:591c19ce66fadc2359725d5cd0d1052c', 'comment_count': int, 'uploader_id': 'JocelynVLaidlaw', 'channel_id': '80082014', 'repost_count': int, 'tags': ['colorectalcancer', 'cancerjourney', 'imnotaquitter'], 'upload_date': '20221208', 'age_limit': 0, 'uploader': 'Jocelyn Laidlaw', 'uploader_url': 'https://twitter.com/JocelynVLaidlaw', 'like_count': int, }, }, { # URL specifies video number and --no-playlist 'url': 'https://twitter.com/CTVJLaidlaw/status/1600649710662213632/video/2', 'info_dict': { 'id': '1600649511827013632', 'ext': 'mp4', 'title': "Jocelyn Laidlaw - How Kirstie Alley's tragic death inspired me to share more about my c... #1", 'thumbnail': r're:^https?://.+\.jpg', 'timestamp': 1670459604.0, 'channel_id': '80082014', 'uploader_id': 'JocelynVLaidlaw', 'uploader': 'Jocelyn Laidlaw', 'repost_count': int, 'comment_count': int, 'tags': ['colorectalcancer', 'cancerjourney', 'imnotaquitter'], 'duration': 102.226, 'uploader_url': 'https://twitter.com/JocelynVLaidlaw', 'display_id': '1600649710662213632', 'like_count': int,
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ridehome.py
yt_dlp/extractor/ridehome.py
from .art19 import Art19IE from .common import InfoExtractor from ..utils import extract_attributes, get_elements_html_by_class from ..utils.traversal import traverse_obj class RideHomeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ridehome\.info/show/[\w-]+/(?P<id>[\w-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://www.ridehome.info/show/techmeme-ride-home/thu-1228-will-2024-be-the-year-apple-gets-serious-about-gaming-on-macs/', 'info_dict': { 'id': 'thu-1228-will-2024-be-the-year-apple-gets-serious-about-gaming-on-macs', }, 'playlist_count': 1, 'playlist': [{ 'md5': 'c84ea3cc96950a9ab86fe540f3edc588', 'info_dict': { 'id': '540e5493-9fe6-4c14-a488-dc508d8794b2', 'ext': 'mp3', 'title': 'Thu. 12/28 – Will 2024 Be The Year Apple Gets Serious About Gaming On Macs?', 'description': 'md5:9dba86ae9b5047a8150eceddeeb629c2', 'series': 'Techmeme Ride Home', 'series_id': '3c30e8f4-ab48-415b-9421-1ae06cd4058b', 'upload_date': '20231228', 'timestamp': 1703780995, 'modified_date': '20231230', 'episode_id': '540e5493-9fe6-4c14-a488-dc508d8794b2', 'modified_timestamp': 1703912404, 'release_date': '20231228', 'release_timestamp': 1703782800, 'duration': 1000.1502, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com/images/.*\.jpeg$', }, }], }, { 'url': 'https://www.ridehome.info/show/techmeme-ride-home/portfolio-profile-sensel-with-ilyarosenberg/', 'info_dict': { 'id': 'portfolio-profile-sensel-with-ilyarosenberg', }, 'playlist_count': 1, 'playlist': [{ 'md5': 'bf9d6efad221008ce71aea09d5533cf6', 'info_dict': { 'id': '6beed803-b1ef-4536-9fef-c23cf6b4dcac', 'ext': 'mp3', 'title': '(Portfolio Profile) Sensel - With @IlyaRosenberg', 'description': 'md5:e1e4a970bce04290e0ba6f030b0125db', 'series': 'Techmeme Ride Home', 'series_id': '3c30e8f4-ab48-415b-9421-1ae06cd4058b', 'upload_date': '20220108', 'timestamp': 1641656064, 'modified_date': '20230418', 'episode_id': '6beed803-b1ef-4536-9fef-c23cf6b4dcac', 'modified_timestamp': 1681843318, 'release_date': '20220108', 'release_timestamp': 1641672000, 'duration': 2789.38122, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com/images/.*\.jpeg$', }, }], }, { 'url': 'https://www.ridehome.info/show/spacecasts/big-tech-news-apples-macbook-pro-event/', 'info_dict': { 'id': 'big-tech-news-apples-macbook-pro-event', }, 'playlist_count': 1, 'playlist': [{ 'md5': 'b1428530c6e03904a8271e978007fc05', 'info_dict': { 'id': 'f4780044-6c4b-4ce0-8215-8a86cc66bff7', 'ext': 'mp3', 'title': 'md5:e6c05d44d59b6577a4145ac339de5040', 'description': 'md5:14152f7228c8a301a77e3d6bc891b145', 'series': 'SpaceCasts', 'series_id': '8e3e837d-7fe0-4a23-8e11-894917e07e17', 'upload_date': '20211026', 'timestamp': 1635271450, 'modified_date': '20230502', 'episode_id': 'f4780044-6c4b-4ce0-8215-8a86cc66bff7', 'modified_timestamp': 1683057500, 'release_date': '20211026', 'release_timestamp': 1635272124, 'duration': 2266.30531, 'thumbnail': r're:^https?://content\.production\.cdn\.art19\.com/images/.*\.jpeg$', }, }], }] def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) urls = traverse_obj( get_elements_html_by_class('iframeContainer', webpage), (..., {extract_attributes}, lambda k, v: k == 'data-src' and Art19IE.suitable(v))) return self.playlist_from_matches(urls, article_id, ie=Art19IE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/theintercept.py
yt_dlp/extractor/theintercept.py
from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, ) class TheInterceptIE(InfoExtractor): _VALID_URL = r'https?://theintercept\.com/fieldofvision/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://theintercept.com/fieldofvision/thisisacoup-episode-four-surrender-or-die/', 'md5': '145f28b41d44aab2f87c0a4ac8ec95bd', 'info_dict': { 'id': '46214', 'ext': 'mp4', 'title': '#ThisIsACoup – Episode Four: Surrender or Die', 'description': 'md5:74dd27f0e2fbd50817829f97eaa33140', 'timestamp': 1450429239, 'upload_date': '20151218', 'comment_count': int, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) json_data = self._parse_json(self._search_regex( r'initialStoreTree\s*=\s*(?P<json_data>{.+})', webpage, 'initialStoreTree'), display_id) for post in json_data['resources']['posts'].values(): if post['slug'] == display_id: return { '_type': 'url_transparent', 'url': 'jwplatform:{}'.format(post['fov_videoid']), 'id': str(post['ID']), 'display_id': display_id, 'title': post['title'], 'description': post.get('excerpt'), 'timestamp': parse_iso8601(post.get('date')), 'comment_count': int_or_none(post.get('comments_number')), } raise ExtractorError('Unable to find the current post')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/steam.py
yt_dlp/extractor/steam.py
import json from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( ExtractorError, clean_html, extract_attributes, join_nonempty, js_to_json, str_or_none, url_or_none, ) from ..utils.traversal import ( find_element, find_elements, traverse_obj, trim_str, ) class SteamIE(InfoExtractor): _VALID_URL = r'https?://store\.steampowered\.com(?:/agecheck)?/app/(?P<id>\d+)/?(?:[^?/#]+/?)?(?:[?#]|$)' _TESTS = [{ 'url': 'https://store.steampowered.com/app/105600', 'info_dict': { 'id': '105600', 'title': 'Terraria', }, 'playlist_mincount': 3, }, { 'url': 'https://store.steampowered.com/app/271590/Grand_Theft_Auto_V/', 'info_dict': { 'id': '271590', 'title': 'Grand Theft Auto V Legacy', }, 'playlist_mincount': 26, }] def _real_extract(self, url): app_id = self._match_id(url) self._set_cookie('store.steampowered.com', 'wants_mature_content', '1') self._set_cookie('store.steampowered.com', 'birthtime', '946652401') self._set_cookie('store.steampowered.com', 'lastagecheckage', '1-January-2000') webpage = self._download_webpage(url, app_id) app_name = traverse_obj(webpage, ({find_element(cls='apphub_AppName')}, {clean_html})) entries = [] for data_prop in traverse_obj(webpage, ( {find_elements(cls='highlight_player_item highlight_movie', html=True)}, ..., {extract_attributes}, 'data-props', {json.loads}, {dict}, )): formats = [] if hls_manifest := traverse_obj(data_prop, ('hlsManifest', {url_or_none})): formats.extend(self._extract_m3u8_formats( hls_manifest, app_id, 'mp4', m3u8_id='hls', fatal=False)) for dash_manifest in traverse_obj(data_prop, ('dashManifests', ..., {url_or_none})): formats.extend(self._extract_mpd_formats( dash_manifest, app_id, mpd_id='dash', fatal=False)) movie_id = traverse_obj(data_prop, ('id', {trim_str(start='highlight_movie_')})) entries.append({ 'id': movie_id, 'title': join_nonempty(app_name, 'video', movie_id, delim=' '), 'formats': formats, 'series': app_name, 'series_id': app_id, 'thumbnail': traverse_obj(data_prop, ('screenshot', {url_or_none})), }) return self.playlist_result(entries, app_id, app_name) class SteamCommunityIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails(?:/?\?(?:[^#]+&)?id=|/)(?P<id>\d+)' _TESTS = [{ 'url': 'https://steamcommunity.com/sharedfiles/filedetails/2717708756', 'info_dict': { 'id': '39Sp2mB1Ly8', 'ext': 'mp4', 'title': 'Gmod Stamina System + Customisable HUD', 'age_limit': 0, 'availability': 'public', 'categories': ['Gaming'], 'channel': 'Zworld Gmod', 'channel_follower_count': int, 'channel_id': 'UCER1FWFSdMMiTKBnnEDBPaw', 'channel_url': 'https://www.youtube.com/channel/UCER1FWFSdMMiTKBnnEDBPaw', 'chapters': 'count:3', 'comment_count': int, 'description': 'md5:0ba8d8e550231211fa03fac920e5b0bf', 'duration': 162, 'like_count': int, 'live_status': 'not_live', 'media_type': 'video', 'playable_in_embed': True, 'tags': 'count:20', 'thumbnail': r're:https?://i\.ytimg\.com/vi/.+', 'timestamp': 1641955348, 'upload_date': '20220112', 'uploader': 'Zworld Gmod', 'uploader_id': '@gmod-addons', 'uploader_url': 'https://www.youtube.com/@gmod-addons', 'view_count': int, }, 'add_ie': ['Youtube'], 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://steamcommunity.com/sharedfiles/filedetails/?id=3544291945', 'info_dict': { 'id': '5JZZlsAdsvI', 'ext': 'mp4', 'title': 'Memories', 'age_limit': 0, 'availability': 'public', 'categories': ['Gaming'], 'channel': 'Bombass Team', 'channel_follower_count': int, 'channel_id': 'UCIJgtNyCV53IeSkzg3FWSFA', 'channel_url': 'https://www.youtube.com/channel/UCIJgtNyCV53IeSkzg3FWSFA', 'comment_count': int, 'description': 'md5:1b8a103a5d67a3c48d07c065de7e2c63', 'duration': 83, 'like_count': int, 'live_status': 'not_live', 'media_type': 'video', 'playable_in_embed': True, 'tags': 'count:10', 'thumbnail': r're:https?://i\.ytimg\.com/vi/.+', 'timestamp': 1754427291, 'upload_date': '20250805', 'uploader': 'Bombass Team', 'uploader_id': '@BombassTeam', 'uploader_url': 'https://www.youtube.com/@BombassTeam', 'view_count': int, }, 'add_ie': ['Youtube'], 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): file_id = self._match_id(url) webpage = self._download_webpage(url, file_id) flashvars = self._search_json( r'var\s+rgMovieFlashvars\s*=', webpage, 'flashvars', file_id, default={}, transform_source=js_to_json) youtube_id = ( traverse_obj(flashvars, (..., 'YOUTUBE_VIDEO_ID', {str}, any)) or traverse_obj(webpage, ( {find_element(cls='movieFrame modal', html=True)}, {extract_attributes}, 'id', {str}))) if not youtube_id: raise ExtractorError('No video found', expected=True) return self.url_result(youtube_id, YoutubeIE) class SteamCommunityBroadcastIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?steamcommunity\.com/broadcast/watch/(?P<id>\d+)' _TESTS = [{ 'url': 'https://steamcommunity.com/broadcast/watch/76561199073851486', 'info_dict': { 'id': '76561199073851486', 'ext': 'mp4', 'title': str, 'uploader_id': '1113585758', 'uploader': 'pepperm!nt', 'live_status': 'is_live', }, 'params': {'skip_download': 'Livestream'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_data = self._download_json( 'https://steamcommunity.com/broadcast/getbroadcastmpd/', video_id, query={'steamid': f'{video_id}'}) formats, subs = self._extract_m3u8_formats_and_subtitles(json_data['hls_url'], video_id) ''' # We cannot download live dash atm mpd_formats, mpd_subs = self._extract_mpd_formats_and_subtitles(json_data['url'], video_id) formats.extend(mpd_formats) self._merge_subtitles(mpd_subs, target=subs) ''' uploader_json = self._download_json( 'https://steamcommunity.com/actions/ajaxresolveusers', video_id, query={'steamids': video_id})[0] return { 'id': video_id, 'title': self._generic_title('', webpage), 'formats': formats, 'live_status': 'is_live', 'view_count': json_data.get('num_view'), 'uploader': uploader_json.get('persona_name'), 'uploader_id': str_or_none(uploader_json.get('accountid')), 'subtitles': subs, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/instagram.py
yt_dlp/extractor/instagram.py
import hashlib import itertools import json import re from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, bug_reports_message, decode_base_n, encode_base_n, filter_dict, float_or_none, format_field, get_element_by_attribute, int_or_none, join_nonempty, lowercase_escape, str_or_none, str_to_int, traverse_obj, url_or_none, ) _ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_' def _pk_to_id(media_id): """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id""" pk = int(str(media_id).split('_')[0]) return encode_base_n(pk, table=_ENCODING_CHARS) def _id_to_pk(shortcode): """Convert a shortcode to a numeric value""" if len(shortcode) > 28: shortcode = shortcode[:-28] return decode_base_n(shortcode, table=_ENCODING_CHARS) class InstagramBaseIE(InfoExtractor): _API_BASE_URL = 'https://i.instagram.com/api/v1' _LOGIN_URL = 'https://www.instagram.com/accounts/login' @property def _api_headers(self): return { 'X-IG-App-ID': self._configuration_arg('app_id', ['936619743392459'], ie_key=InstagramIE)[0], 'X-ASBD-ID': '198387', 'X-IG-WWW-Claim': '0', 'Origin': 'https://www.instagram.com', 'Accept': '*/*', } def _get_count(self, media, kind, *keys): return traverse_obj( media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys), expected_type=int_or_none) def _get_dimension(self, name, media, webpage=None): return ( traverse_obj(media, ('dimensions', name), expected_type=int_or_none) or int_or_none(self._html_search_meta( (f'og:video:{name}', f'video:{name}'), webpage or '', default=None))) def _extract_nodes(self, nodes, is_direct=False): for idx, node in enumerate(nodes, start=1): if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True: continue video_id = node.get('shortcode') if is_direct: info = { 'id': video_id or node['id'], 'url': node.get('video_url'), 'width': self._get_dimension('width', node), 'height': self._get_dimension('height', node), 'http_headers': { 'Referer': 'https://www.instagram.com/', }, } elif not video_id: continue else: info = { '_type': 'url', 'ie_key': 'Instagram', 'id': video_id, 'url': f'https://instagram.com/p/{video_id}', } yield { **info, 'title': node.get('title') or (f'Video {idx}' if is_direct else None), 'description': traverse_obj( node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str), 'thumbnail': traverse_obj( node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none), 'duration': float_or_none(node.get('video_duration')), 'timestamp': int_or_none(node.get('taken_at_timestamp')), 'view_count': int_or_none(node.get('video_view_count')), 'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'), 'like_count': self._get_count(node, 'likes', 'preview_like'), } def _extract_product_media(self, product_media): media_id = product_media.get('code') or _pk_to_id(product_media.get('pk')) vcodec = product_media.get('video_codec') dash_manifest_raw = product_media.get('video_dash_manifest') videos_list = product_media.get('video_versions') if not (dash_manifest_raw or videos_list): return {} formats = [{ 'format_id': fmt.get('id'), 'url': fmt.get('url'), 'width': fmt.get('width'), 'height': fmt.get('height'), 'vcodec': vcodec, } for fmt in videos_list or []] if dash_manifest_raw: formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash')) thumbnails = [{ 'url': thumbnail.get('url'), 'width': thumbnail.get('width'), 'height': thumbnail.get('height'), } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []] return { 'id': media_id, 'duration': float_or_none(product_media.get('video_duration')), 'formats': formats, 'thumbnails': thumbnails, } def _extract_product(self, product_info): if isinstance(product_info, list): product_info = product_info[0] user_info = product_info.get('user') or {} info_dict = { 'id': _pk_to_id(traverse_obj(product_info, 'pk', 'id', expected_type=str_or_none)[:19]), 'title': product_info.get('title') or f'Video by {user_info.get("username")}', 'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none), 'timestamp': int_or_none(product_info.get('taken_at')), 'channel': user_info.get('username'), 'uploader': user_info.get('full_name'), 'uploader_id': str_or_none(user_info.get('pk')), 'view_count': int_or_none(product_info.get('view_count')), 'like_count': int_or_none(product_info.get('like_count')), 'comment_count': int_or_none(product_info.get('comment_count')), '__post_extractor': self.extract_comments(_pk_to_id(product_info.get('pk'))), 'http_headers': { 'Referer': 'https://www.instagram.com/', }, } carousel_media = product_info.get('carousel_media') if carousel_media: return { '_type': 'playlist', **info_dict, 'title': f'Post by {user_info.get("username")}', 'entries': [{ **info_dict, **self._extract_product_media(product_media), } for product_media in carousel_media], } return { **info_dict, **self._extract_product_media(product_info), } def _get_comments(self, video_id): comments_info = self._download_json( f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/comments/?can_support_threading=true&permalink_enabled=false', video_id, fatal=False, errnote='Comments extraction failed', note='Downloading comments info', headers=self._api_headers) or {} comment_data = traverse_obj(comments_info, ('edge_media_to_parent_comment', 'edges'), 'comments') for comment_dict in comment_data or []: yield { 'author': traverse_obj(comment_dict, ('node', 'owner', 'username'), ('user', 'username')), 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id'), ('user', 'pk')), 'author_thumbnail': traverse_obj(comment_dict, ('node', 'owner', 'profile_pic_url'), ('user', 'profile_pic_url'), expected_type=url_or_none), 'id': traverse_obj(comment_dict, ('node', 'id'), 'pk'), 'text': traverse_obj(comment_dict, ('node', 'text'), 'text'), 'like_count': traverse_obj(comment_dict, ('node', 'edge_liked_by', 'count'), 'comment_like_count', expected_type=int_or_none), 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), 'created_at', expected_type=int_or_none), } class InstagramIOSIE(InfoExtractor): IE_DESC = 'IOS instagram:// URL' _VALID_URL = r'instagram://media\?id=(?P<id>[\d_]+)' _TESTS = [{ 'url': 'instagram://media?id=482584233761418119', 'md5': '0d2da106a9d2631273e192b372806516', 'info_dict': { 'id': 'aye83DjauH', 'ext': 'mp4', 'title': 'Video by naomipq', 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 0, 'timestamp': 1371748545, 'upload_date': '20130620', 'uploader_id': 'naomipq', 'uploader': 'B E A U T Y F O R A S H E S', 'like_count': int, 'comment_count': int, 'comments': list, }, 'add_ie': ['Instagram'], }] def _real_extract(self, url): video_id = _pk_to_id(self._match_id(url)) return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id) class InstagramIE(InstagramBaseIE): _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/(?!share/)[^/?#]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1'] _TESTS = [{ 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc', 'md5': '0d2da106a9d2631273e192b372806516', 'info_dict': { 'id': 'aye83DjauH', 'ext': 'mp4', 'title': 'Video by naomipq', 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 8.747, 'timestamp': 1371748545, 'upload_date': '20130620', 'uploader_id': '2815873', 'uploader': 'B E A U T Y F O R A S H E S', 'channel': 'naomipq', 'like_count': int, 'comment_count': int, 'comments': list, }, 'expected_warnings': [ 'General metadata extraction failed', 'Main webpage is locked behind the login page', ], }, { # reel 'url': 'https://www.instagram.com/reel/Chunk8-jurw/', 'md5': 'f6d8277f74515fa3ff9f5791426e42b1', 'info_dict': { 'id': 'Chunk8-jurw', 'ext': 'mp4', 'title': 'Video by instagram', 'description': 'md5:c9cde483606ed6f80fbe9283a6a2b290', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 5.016, 'timestamp': 1661529231, 'upload_date': '20220826', 'uploader_id': '25025320', 'uploader': 'Instagram', 'channel': 'instagram', 'like_count': int, 'comment_count': int, 'comments': list, }, 'expected_warnings': [ 'General metadata extraction failed', 'Main webpage is locked behind the login page', ], }, { # multi video post 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/', 'playlist': [{ 'info_dict': { 'id': 'BQ0dSaohpPW', 'ext': 'mp4', 'title': 'Video 1', 'thumbnail': r're:^https?://.*\.jpg', 'view_count': int, }, }, { 'info_dict': { 'id': 'BQ0dTpOhuHT', 'ext': 'mp4', 'title': 'Video 2', 'thumbnail': r're:^https?://.*\.jpg', 'view_count': int, }, }, { 'info_dict': { 'id': 'BQ0dT7RBFeF', 'ext': 'mp4', 'title': 'Video 3', 'thumbnail': r're:^https?://.*\.jpg', 'view_count': int, }, }], 'info_dict': { 'id': 'BQ0eAlwhDrw', 'title': 'Post by instagram', 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957', }, 'expected_warnings': [ 'General metadata extraction failed', 'Main webpage is locked behind the login page', ], }, { # IGTV 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/', 'info_dict': { 'id': 'BkfuX9UB-eK', 'ext': 'mp4', 'title': 'Fingerboarding Tricks with @cass.fb', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 53.83, 'timestamp': 1530032919, 'upload_date': '20180626', 'uploader_id': '25025320', 'uploader': 'Instagram', 'channel': 'instagram', 'like_count': int, 'comment_count': int, 'comments': list, 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.', }, 'expected_warnings': [ 'General metadata extraction failed', 'Main webpage is locked behind the login page', ], }, { 'url': 'https://instagram.com/p/-Cmh1cukG2/', 'only_matching': True, }, { 'url': 'http://instagram.com/p/9o6LshA7zy/embed/', 'only_matching': True, }, { 'url': 'https://www.instagram.com/tv/aye83DjauH/', 'only_matching': True, }, { 'url': 'https://www.instagram.com/reel/CDUMkliABpa/', 'only_matching': True, }, { 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/', 'only_matching': True, }, { 'url': 'https://www.instagram.com/reels/Cop84x6u7CP/', 'only_matching': True, }] @classmethod def _extract_embed_urls(cls, url, webpage): res = tuple(super()._extract_embed_urls(url, webpage)) if res: return res mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', get_element_by_attribute('class', 'instagram-media', webpage) or '') if mobj: return [mobj.group('link')] def _real_extract(self, url): video_id, url = self._match_valid_url(url).group('id', 'url') media, webpage = {}, '' if self._get_cookies(url).get('sessionid'): info = traverse_obj(self._download_json( f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/info/', video_id, fatal=False, errnote='Video info extraction failed', note='Downloading video info', headers=self._api_headers), ('items', 0)) if info: media.update(info) return self._extract_product(media) api_check = self._download_json( f'{self._API_BASE_URL}/web/get_ruling_for_content/?content_type=MEDIA&target_id={_id_to_pk(video_id)}', video_id, headers=self._api_headers, fatal=False, note='Setting up session', errnote=False) or {} csrf_token = self._get_cookies('https://www.instagram.com').get('csrftoken') if not csrf_token: self.report_warning('No csrf token set by Instagram API', video_id) else: csrf_token = csrf_token.value if api_check.get('status') == 'ok' else None if not csrf_token: self.report_warning('Instagram API is not granting access', video_id) variables = { 'shortcode': video_id, 'child_comment_count': 3, 'fetch_comment_count': 40, 'parent_comment_count': 24, 'has_threaded_comments': True, } general_info = self._download_json( 'https://www.instagram.com/graphql/query/', video_id, fatal=False, errnote=False, headers={ **self._api_headers, 'X-CSRFToken': csrf_token or '', 'X-Requested-With': 'XMLHttpRequest', 'Referer': url, }, query={ 'doc_id': '8845758582119845', 'variables': json.dumps(variables, separators=(',', ':')), }) if not general_info: self.report_warning('General metadata extraction failed (some metadata might be missing).', video_id) webpage, urlh = self._download_webpage_handle(url, video_id) shared_data = self._search_json( r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {} if shared_data and self._LOGIN_URL not in urlh.url: media.update(traverse_obj( shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'), ('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {}) else: self.report_warning('Main webpage is locked behind the login page. Retrying with embed webpage (some metadata might be missing).') webpage = self._download_webpage( f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False) or '' additional_data = self._search_json( r'window\.__additionalDataLoaded\s*\(\s*[^,]+,', webpage, 'additional data', video_id, fatal=False) if not additional_data and not media: self.raise_login_required('Requested content is not available, rate-limit reached or login required') product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict) if product_item: media.update(product_item) return self._extract_product(media) media.update(traverse_obj( additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {}) else: xdt_shortcode_media = traverse_obj(general_info, ('data', 'xdt_shortcode_media', {dict})) or {} if not xdt_shortcode_media: error = join_nonempty('title', 'description', delim=': ', from_dict=api_check) if 'Restricted Video' in error: self.raise_login_required(error) elif error: raise ExtractorError(error, expected=True) elif len(video_id) > 28: # It's a private post (video_id == shortcode + 28 extra characters) # Only raise after getting empty response; sometimes "long"-shortcode posts are public self.raise_login_required( 'This content is only available for registered users who follow this account') raise ExtractorError( 'Instagram sent an empty media response. Check if this post is accessible in your ' f'browser without being logged-in. If it is not, then u{self._login_hint()[1:]}. ' 'Otherwise, if the post is accessible in browser without being logged-in' f'{bug_reports_message(before=",")}', expected=True) media.update(xdt_shortcode_media) username = traverse_obj(media, ('owner', 'username')) or self._search_regex( r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False) description = ( traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str) or media.get('caption')) if not description: description = self._search_regex( r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None) if description is not None: description = lowercase_escape(description) video_url = media.get('video_url') if not video_url: nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or [] if nodes: return self.playlist_result( self._extract_nodes(nodes, True), video_id, format_field(username, None, 'Post by %s'), description) raise ExtractorError('There is no video in this post', expected=True) formats = [{ 'url': video_url, 'width': self._get_dimension('width', media, webpage), 'height': self._get_dimension('height', media, webpage), }] dash = traverse_obj(media, ('dash_info', 'video_dash_manifest')) if dash: formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash')) comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges')) comments = [{ 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')), 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')), 'id': traverse_obj(comment_dict, ('node', 'id')), 'text': traverse_obj(comment_dict, ('node', 'text')), 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none), } for comment_dict in comment_data] if comment_data else None display_resources = ( media.get('display_resources') or [{'src': media.get(key)} for key in ('display_src', 'display_url')] or [{'src': self._og_search_thumbnail(webpage)}]) thumbnails = [{ 'url': thumbnail['src'], 'width': thumbnail.get('config_width'), 'height': thumbnail.get('config_height'), } for thumbnail in display_resources if thumbnail.get('src')] return { 'id': video_id, 'formats': formats, 'title': media.get('title') or f'Video by {username}', 'description': description, 'duration': float_or_none(media.get('video_duration')), 'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none), 'uploader_id': traverse_obj(media, ('owner', 'id')), 'uploader': traverse_obj(media, ('owner', 'full_name')), 'channel': username, 'like_count': self._get_count(media, 'likes', 'preview_like') or str_to_int(self._search_regex( r'data-log-event="likeCountClick"[^>]*>[^\d]*([\d,\.]+)', webpage, 'like count', fatal=False)), 'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'), 'comments': comments, 'thumbnails': thumbnails, 'http_headers': { 'Referer': 'https://www.instagram.com/', }, } class InstagramPlaylistBaseIE(InstagramBaseIE): _gis_tmpl = None # used to cache GIS request type def _parse_graphql(self, webpage, item_id): # Reads a webpage and returns its GraphQL data. return self._parse_json( self._search_regex( r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'), item_id) def _extract_graphql(self, data, url): # Parses GraphQL queries containing videos and generates a playlist. uploader_id = self._match_id(url) csrf_token = data['config']['csrf_token'] rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8' cursor = '' for page_num in itertools.count(1): variables = { 'first': 12, 'after': cursor, } variables.update(self._query_vars_for(data)) variables = json.dumps(variables) if self._gis_tmpl: gis_tmpls = [self._gis_tmpl] else: gis_tmpls = [ f'{rhx_gis}', '', f'{rhx_gis}:{csrf_token}', '{}:{}:{}'.format(rhx_gis, csrf_token, self.get_param('http_headers')['User-Agent']), ] # try all of the ways to generate a GIS query, and not only use the # first one that works, but cache it for future requests for gis_tmpl in gis_tmpls: try: json_data = self._download_json( 'https://www.instagram.com/graphql/query/', uploader_id, f'Downloading JSON page {page_num}', headers={ 'X-Requested-With': 'XMLHttpRequest', 'X-Instagram-GIS': hashlib.md5( (f'{gis_tmpl}:{variables}').encode()).hexdigest(), }, query={ 'query_hash': self._QUERY_HASH, 'variables': variables, }) media = self._parse_timeline_from(json_data) self._gis_tmpl = gis_tmpl break except ExtractorError as e: # if it's an error caused by a bad query, and there are # more GIS templates to try, ignore it and keep trying if isinstance(e.cause, HTTPError) and e.cause.status == 403: if gis_tmpl != gis_tmpls[-1]: continue raise nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or [] if not nodes: break yield from self._extract_nodes(nodes) has_next_page = traverse_obj(media, ('page_info', 'has_next_page')) cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str) if not has_next_page or not cursor: break def _real_extract(self, url): user_or_tag = self._match_id(url) webpage = self._download_webpage(url, user_or_tag) data = self._parse_graphql(webpage, user_or_tag) self._set_cookie('instagram.com', 'ig_pr', '1') return self.playlist_result( self._extract_graphql(data, url), user_or_tag, user_or_tag) class InstagramUserIE(InstagramPlaylistBaseIE): _WORKING = False _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])' IE_DESC = 'Instagram user profile' IE_NAME = 'instagram:user' _TESTS = [{ 'url': 'https://instagram.com/porsche', 'info_dict': { 'id': 'porsche', 'title': 'porsche', }, 'playlist_count': 5, 'params': { 'extract_flat': True, 'skip_download': True, 'playlistend': 5, }, }] _QUERY_HASH = ('42323d64886122307be10013ad2dcc44',) @staticmethod def _parse_timeline_from(data): # extracts the media timeline data from a GraphQL result return data['data']['user']['edge_owner_to_timeline_media'] @staticmethod def _query_vars_for(data): # returns a dictionary of variables to add to the timeline query based # on the GraphQL of the original page return { 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id'], } class InstagramTagIE(InstagramPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)' IE_DESC = 'Instagram hashtag search URLs' IE_NAME = 'instagram:tag' _TESTS = [{ 'url': 'https://instagram.com/explore/tags/lolcats', 'info_dict': { 'id': 'lolcats', 'title': 'lolcats', }, 'playlist_count': 50, 'params': { 'extract_flat': True, 'skip_download': True, 'playlistend': 50, }, }] _QUERY_HASH = ('f92f56d47dc7a55b606908374b43a314',) @staticmethod def _parse_timeline_from(data): # extracts the media timeline data from a GraphQL result return data['data']['hashtag']['edge_hashtag_to_media'] @staticmethod def _query_vars_for(data): # returns a dictionary of variables to add to the timeline query based # on the GraphQL of the original page return { 'tag_name': data['entry_data']['TagPage'][0]['graphql']['hashtag']['name'], } class InstagramStoryIE(InstagramBaseIE): _VALID_URL = r'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/?#]+)(?:/(?P<id>\d+))?' IE_NAME = 'instagram:story' _TESTS = [{ 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/', 'info_dict': { 'id': '18090946048123978', 'title': 'Rare', }, 'playlist_mincount': 50, }, { 'url': 'https://www.instagram.com/stories/fruits_zipper/3570766765028588805/', 'only_matching': True, }, { 'url': 'https://www.instagram.com/stories/fruits_zipper', 'only_matching': True, }] def _real_extract(self, url): username, story_id = self._match_valid_url(url).group('user', 'id') if username == 'highlights' and not story_id: # story id is only mandatory for highlights raise ExtractorError('Input URL is missing a highlight ID', expected=True) display_id = story_id or username story_info = self._download_webpage(url, display_id) user_info = self._search_json(r'"user":', story_info, 'user info', display_id, fatal=False) if not user_info: self.raise_login_required('This content is unreachable') user_id = traverse_obj(user_info, 'pk', 'id', expected_type=str) if username == 'highlights': story_info_url = f'highlight:{story_id}' else: if not user_id: # user id is only mandatory for non-highlights raise ExtractorError('Unable to extract user id') story_info_url = user_id videos = traverse_obj(self._download_json( f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}', display_id, errnote=False, fatal=False, headers=self._api_headers), 'reels') if not videos: self.raise_login_required('You need to log in to access this content') user_info = traverse_obj(videos, (user_id, 'user', {dict})) or {} full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (user_id, 'user', 'full_name')) story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title')) if not story_title: story_title = f'Story by {username}' highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (user_id, 'items')) info_data = [] for highlight in highlights: highlight.setdefault('user', {}).update(user_info) highlight_data = self._extract_product(highlight) if highlight_data.get('formats'): info_data.append({ 'uploader': full_name, 'uploader_id': user_id, **filter_dict(highlight_data), }) if username != 'highlights' and story_id and not self._yes_playlist(username, story_id): return traverse_obj(info_data, (lambda _, v: v['id'] == _pk_to_id(story_id), any)) return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/viously.py
yt_dlp/extractor/viously.py
import base64 import re from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, parse_iso8601, ) from ..utils.traversal import traverse_obj class ViouslyIE(InfoExtractor): _VALID_URL = False _WEBPAGE_TESTS = [{ 'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html', 'md5': '37a6c3381599381ff53a7e1e0575c0bc', 'info_dict': { 'id': 'F_xQzS2jwb3', 'ext': 'mp4', 'title': 'Turbo du 07/09/2014\xa0: Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...', 'description': 'Turbo du 07/09/2014\xa0: Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...', 'age_limit': 0, 'upload_date': '20230328', 'timestamp': 1680037507, 'duration': 3716, 'categories': ['motors'], }, }] def _extract_from_webpage(self, url, webpage): viously_players = re.findall(r'<div[^>]*class="(?:[^"]*\s)?v(?:iou)?sly-player(?:\s[^"]*)?"[^>]*>', webpage) if not viously_players: return def custom_decode(text): STANDARD_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=' CUSTOM_ALPHABET = 'VIOUSLYABCDEFGHJKMNPQRTWXZviouslyabcdefghjkmnpqrtwxz9876543210+/=' data = base64.b64decode(text.translate(str.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET))) return data.decode('utf-8').strip('\x00') for video_id in traverse_obj(viously_players, (..., {extract_attributes}, 'id')): formats = self._extract_m3u8_formats( f'https://www.viously.com/video/hls/{video_id}/index.m3u8', video_id, fatal=False) if not formats: continue data = self._download_json( f'https://www.viously.com/export/json/{video_id}', video_id, transform_source=custom_decode, fatal=False) yield { 'id': video_id, 'formats': formats, **traverse_obj(data, ('video', { 'title': ('title', {str}), 'description': ('description', {str}), 'duration': ('duration', {int_or_none}), 'timestamp': ('iso_date', {parse_iso8601}), 'categories': ('category', 'name', {str}, {lambda x: [x] if x else None}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/parti.py
yt_dlp/extractor/parti.py
from .common import InfoExtractor from ..utils import UserNotLive, int_or_none, parse_iso8601, url_or_none, urljoin from ..utils.traversal import traverse_obj class PartiBaseIE(InfoExtractor): def _call_api(self, path, video_id, note=None): return self._download_json( f'https://prod-api.parti.com/parti_v2/profile/{path}', video_id, note, headers={ 'Origin': 'https://parti.com', 'Referer': 'https://parti.com/', }) class PartiVideoIE(PartiBaseIE): IE_NAME = 'parti:video' _VALID_URL = r'https?://(?:www\.)?parti\.com/video/(?P<id>\d+)' _TESTS = [{ 'url': 'https://parti.com/video/66284', 'info_dict': { 'id': '66284', 'ext': 'mp4', 'title': 'NOW LIVE ', 'upload_date': '20250327', 'categories': ['Gaming'], 'thumbnail': 'https://media.parti.com/351424_eb9e5250-2821-484a-9c5f-ca99aa666c87.png', 'channel': 'ItZTMGG', 'timestamp': 1743044379, }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api(f'get_livestream_channel_info/recent/{video_id}', video_id) return { 'id': video_id, 'formats': self._extract_m3u8_formats( urljoin('https://media.parti.com/', data['livestream_recording']), video_id, 'mp4'), **traverse_obj(data, { 'title': ('event_title', {str}), 'channel': ('user_name', {str}), 'thumbnail': ('event_file', {url_or_none}), 'categories': ('category_name', {str}, filter, all), 'timestamp': ('event_start_ts', {int_or_none}), }), } class PartiLivestreamIE(PartiBaseIE): IE_NAME = 'parti:livestream' _VALID_URL = r'https?://(?:www\.)?parti\.com/(?!video/)(?P<id>[\w/-]+)' _TESTS = [{ 'url': 'https://parti.com/247CryptoTracker', 'info_dict': { 'ext': 'mp4', 'id': '247CryptoTracker', 'description': 'md5:a78051f3d7e66e6a64c6b1eaf59fd364', 'title': r"re:I'm Live on Parti \d{4}-\d{2}-\d{2} \d{2}:\d{2}", 'thumbnail': r're:https://media\.parti\.com/stream-screenshots/.+\.png', 'live_status': 'is_live', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): creator_slug = self._match_id(url) encoded_creator_slug = creator_slug.replace('/', '%23') creator_id = self._call_api( f'user_id_from_name/{encoded_creator_slug}', creator_slug, note='Fetching user ID')['user_id'] data = self._call_api( f'get_livestream_channel_info/{creator_id}', creator_id, note='Fetching user profile feed')['channel_info'] if not traverse_obj(data, ('channel', 'is_live', {bool})): raise UserNotLive(video_id=creator_id) channel_info = data['channel'] return { 'id': creator_slug, 'formats': self._extract_m3u8_formats(channel_info['playback_url'], creator_slug, live=True), 'is_live': True, **traverse_obj(data, { 'title': ('livestream_event_info', 'event_name', {str}), 'description': ('livestream_event_info', 'event_description', {str}), 'thumbnail': ('livestream_event_info', 'livestream_preview_file', {url_or_none}), 'timestamp': ('stream', 'start_time', {parse_iso8601}), 'view_count': ('stream', 'viewer_count', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/worldstarhiphop.py
yt_dlp/extractor/worldstarhiphop.py
from .common import InfoExtractor class WorldStarHipHopIE(InfoExtractor): _VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?.*?\bv=(?P<id>[^&]+)' _TESTS = [{ 'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO', 'md5': '9d04de741161603bf7071bbf4e883186', 'info_dict': { 'id': 'wshh6a7q1ny0G34ZwuIO', 'ext': 'mp4', 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!', }, }, { 'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) entries = self._parse_html5_media_entries(url, webpage, video_id) if not entries: return self.url_result(url, 'Generic') title = self._html_search_regex( [r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>', r'<span[^>]+class="tc-sp-pinned-title">(.*)</span>'], webpage, 'title') info = entries[0] info.update({ 'id': video_id, 'title': title, }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/damtomo.py
yt_dlp/extractor/damtomo.py
import re from .common import InfoExtractor from ..utils import ExtractorError, clean_html, int_or_none, try_get, unified_strdate class DamtomoBaseIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) webpage, handle = self._download_webpage_handle(self._WEBPAGE_URL_TMPL % video_id, video_id, encoding='sjis') if handle.url == 'https://www.clubdam.com/sorry/': raise ExtractorError('You are rate-limited. Try again later.', expected=True) if '<h2>予期せぬエラーが発生しました。</h2>' in webpage: raise ExtractorError('There is an error on server-side. Try again later.', expected=True) description = self._search_regex(r'(?m)<div id="public_comment">\s*<p>\s*([^<]*?)\s*</p>', webpage, 'description', default=None) uploader_id = self._search_regex(r'<a href="https://www\.clubdam\.com/app/damtomo/member/info/Profile\.do\?damtomoId=([^"]+)"', webpage, 'uploader_id', default=None) data_dict = { mobj.group('class'): re.sub(r'\s+', ' ', clean_html(mobj.group('value'))) for mobj in re.finditer(r'(?s)<(p|div)\s+class="(?P<class>[^" ]+?)">(?P<value>.+?)</\1>', webpage)} # since videos do not have title, give the name of song instead data_dict['user_name'] = re.sub(r'\s*さん\s*$', '', data_dict['user_name']) title = data_dict.get('song_title') stream_tree = self._download_xml( self._DKML_XML_URL % video_id, video_id, note='Requesting stream information', encoding='sjis', # doing this has no problem since there is no character outside ASCII, # and never likely to happen in the future transform_source=lambda x: re.sub(r'\s*encoding="[^"]+?"', '', x)) m3u8_url = try_get(stream_tree, lambda x: x.find( './/d:streamingUrl', {'d': self._DKML_XML_NS}).text.strip(), str) if not m3u8_url: raise ExtractorError('Failed to obtain m3u8 URL') formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') return { 'id': video_id, 'title': title, 'uploader_id': uploader_id, 'description': description, 'uploader': data_dict.get('user_name'), 'upload_date': unified_strdate(self._search_regex(r'(\d{4}/\d{2}/\d{2})', data_dict.get('date'), 'upload_date', default=None)), 'view_count': int_or_none(self._search_regex(r'(\d+)', data_dict['audience'], 'view_count', default=None)), 'like_count': int_or_none(self._search_regex(r'(\d+)', data_dict['nice'], 'like_count', default=None)), 'track': title, 'artist': data_dict.get('song_artist'), 'formats': formats, } class DamtomoVideoIE(DamtomoBaseIE): IE_NAME = 'damtomo:video' _VALID_URL = r'https?://(?:www\.)?clubdam\.com/app/damtomo/(?:SP/)?karaokeMovie/StreamingDkm\.do\?karaokeMovieId=(?P<id>\d+)' _WEBPAGE_URL_TMPL = 'https://www.clubdam.com/app/damtomo/karaokeMovie/StreamingDkm.do?karaokeMovieId=%s' _DKML_XML_URL = 'https://www.clubdam.com/app/damtomo/karaokeMovie/GetStreamingDkmUrlXML.do?movieSelectFlg=2&karaokeMovieId=%s' _DKML_XML_NS = 'https://www.clubdam.com/app/damtomo/karaokeMovie/GetStreamingDkmUrlXML' _TESTS = [{ 'url': 'https://www.clubdam.com/app/damtomo/karaokeMovie/StreamingDkm.do?karaokeMovieId=2414316', 'info_dict': { 'id': '2414316', 'title': 'Get Wild', 'uploader': 'Kドロン', 'uploader_id': 'ODk5NTQwMzQ', 'track': 'Get Wild', 'artist': 'TM NETWORK(TMN)', 'upload_date': '20201226', }, }] class DamtomoRecordIE(DamtomoBaseIE): IE_NAME = 'damtomo:record' _VALID_URL = r'https?://(?:www\.)?clubdam\.com/app/damtomo/(?:SP/)?karaokePost/StreamingKrk\.do\?karaokeContributeId=(?P<id>\d+)' _WEBPAGE_URL_TMPL = 'https://www.clubdam.com/app/damtomo/karaokePost/StreamingKrk.do?karaokeContributeId=%s' _DKML_XML_URL = 'https://www.clubdam.com/app/damtomo/karaokePost/GetStreamingKrkUrlXML.do?karaokeContributeId=%s' _DKML_XML_NS = 'https://www.clubdam.com/app/damtomo/karaokePost/GetStreamingKrkUrlXML' _TESTS = [{ 'url': 'https://www.clubdam.com/app/damtomo/karaokePost/StreamingKrk.do?karaokeContributeId=27376862', 'info_dict': { 'id': '27376862', 'title': 'イカSUMMER [良音]', 'uploader': 'NANA', 'uploader_id': 'MzAyMDExNTY', 'upload_date': '20210721', 'view_count': 4, 'like_count': 1, 'track': 'イカSUMMER [良音]', 'artist': 'ORANGE RANGE', }, }, { 'url': 'https://www.clubdam.com/app/damtomo/karaokePost/StreamingKrk.do?karaokeContributeId=27489418', 'info_dict': { 'id': '27489418', 'title': '心みだれて〜say it with flowers〜(生音)', 'uploader_id': 'NjI1MjI2MjU', 'description': 'やっぱりキーを下げて正解だった感じ。リベンジ成功ということで。', 'uploader': '箱の「中の人」', 'upload_date': '20210815', 'view_count': 5, 'like_count': 3, 'track': '心みだれて〜say it with flowers〜(生音)', 'artist': '小林明子', }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rudovideo.py
yt_dlp/extractor/rudovideo.py
from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, js_to_json, traverse_obj, update_url_query, url_or_none, ) class RudoVideoIE(InfoExtractor): _VALID_URL = r'https?://rudo\.video/(?P<type>vod|podcast|live)/(?P<id>[^/?&#]+)' _EMBED_REGEX = [r'<iframe[^>]+src=[\'"](?P<url>(?:https?:)//rudo\.video/(?:vod|podcast|live)/[^\'"]+)'] _TESTS = [{ 'url': 'https://rudo.video/podcast/cz2wrUy8l0o', 'md5': '28ed82b477708dc5e12e072da2449221', 'info_dict': { 'id': 'cz2wrUy8l0o', 'title': 'Diego Cabot', 'ext': 'mp4', 'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$', }, }, { 'url': 'https://rudo.video/podcast/bQkt07', 'md5': '36b22a9863de0f47f00fc7532a32a898', 'info_dict': { 'id': 'bQkt07', 'title': 'Tubular Bells', 'ext': 'mp4', 'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$', }, }, { 'url': 'https://rudo.video/podcast/b42ZUznHX0', 'md5': 'b91c70d832938871367f8ad10c895821', 'info_dict': { 'id': 'b42ZUznHX0', 'title': 'Columna Ruperto Concha', 'ext': 'mp3', 'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$', }, }, { 'url': 'https://rudo.video/vod/bN5AaJ', 'md5': '01324a329227e2591530ecb4f555c881', 'info_dict': { 'id': 'bN5AaJ', 'title': 'Ucrania 19.03', 'creator': 'La Tercera', 'ext': 'mp4', 'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$', }, }, { 'url': 'https://rudo.video/live/bbtv', 'info_dict': { 'id': 'bbtv', 'ext': 'mp4', 'creator': 'BioBioTV', 'live_status': 'is_live', 'title': r're:^LIVE BBTV\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}$', 'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$', }, }, { 'url': 'https://rudo.video/live/c13', 'info_dict': { 'id': 'c13', 'title': 'CANAL13', 'ext': 'mp4', }, 'skip': 'Geo-restricted to Chile', }, { 'url': 'https://rudo.video/live/t13-13cl', 'info_dict': { 'id': 't13-13cl', 'title': 'T13', 'ext': 'mp4', }, 'skip': 'Geo-restricted to Chile', }] def _real_extract(self, url): video_id, type_ = self._match_valid_url(url).group('id', 'type') is_live = type_ == 'live' webpage = self._download_webpage(url, video_id) if 'Streaming is not available in your area' in webpage: self.raise_geo_restricted() media_url = ( self._search_regex( r'var\s+streamURL\s*=\s*[\'"]([^?\'"]+)', webpage, 'stream url', default=None) # Source URL must be used only if streamURL is unavailable or self._search_regex( r'<source[^>]+src=[\'"]([^\'"]+)', webpage, 'source url', default=None)) if not media_url: youtube_url = self._search_regex(r'file:\s*[\'"]((?:https?:)//(?:www\.)?youtube\.com[^\'"]+)', webpage, 'youtube url', default=None) if youtube_url: return self.url_result(youtube_url, 'Youtube') raise ExtractorError('Unable to extract stream url') token_array = self._search_json( r'<script>var\s+_\$_[a-zA-Z0-9]+\s*=', webpage, 'access token array', video_id, contains_pattern=r'\[(?s:.+)\]', default=None, transform_source=js_to_json) if token_array: token_url = traverse_obj(token_array, (..., {url_or_none}), get_all=False) if not token_url: raise ExtractorError('Invalid access token array') access_token = self._download_json( token_url, video_id, note='Downloading access token')['data']['authToken'] media_url = update_url_query(media_url, {'auth-token': access_token}) ext = determine_ext(media_url) if ext == 'm3u8': formats = self._extract_m3u8_formats(media_url, video_id, live=is_live) elif ext == 'mp3': formats = [{ 'url': media_url, 'vcodec': 'none', }] else: formats = [{'url': media_url}] return { 'id': video_id, 'title': (self._search_regex(r'var\s+titleVideo\s*=\s*[\'"]([^\'"]+)', webpage, 'title', default=None) or self._og_search_title(webpage)), 'creator': self._search_regex(r'var\s+videoAuthor\s*=\s*[\'"]([^?\'"]+)', webpage, 'videoAuthor', default=None), 'thumbnail': (self._search_regex(r'var\s+posterIMG\s*=\s*[\'"]([^?\'"]+)', webpage, 'thumbnail', default=None) or self._og_search_thumbnail(webpage)), 'formats': formats, 'is_live': is_live, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/joj.py
yt_dlp/extractor/joj.py
from .common import InfoExtractor from ..utils import ( format_field, int_or_none, js_to_json, try_get, ) class JojIE(InfoExtractor): _VALID_URL = r'''(?x) (?: joj:| https?://media\.joj\.sk/embed/ ) (?P<id>[^/?#^]+) ''' _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1'] _TESTS = [{ 'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932', 'info_dict': { 'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932', 'ext': 'mp4', 'title': 'NOVÉ BÝVANIE', 'duration': 3118, 'thumbnail': r're:https?://img\.joj\.sk/.+', }, }, { 'url': 'https://media.joj.sk/embed/CSM0Na0l0p1', 'info_dict': { 'id': 'CSM0Na0l0p1', 'ext': 'mp4', 'title': 'Extrémne rodiny 2 - POKRAČOVANIE (2012/04/09 21:30:00)', 'duration': 3937, 'thumbnail': r're:https?://img\.joj\.sk/.+', }, }, { 'url': 'https://media.joj.sk/embed/9i1cxv', 'only_matching': True, }, { 'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932', 'only_matching': True, }, { 'url': 'joj:9i1cxv', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok', 'info_dict': { 'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok', 'title': 'Slovenskom sa prehnala vlna silných búrok', }, 'playlist_mincount': 5, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'https://media.joj.sk/embed/{video_id}', video_id) title = (self._search_json(r'videoTitle\s*:', webpage, 'title', video_id, contains_pattern=r'["\'].+["\']', default=None) or self._html_extract_title(webpage, default=None) or self._og_search_title(webpage)) bitrates = self._parse_json( self._search_regex( r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates', default='{}'), video_id, transform_source=js_to_json, fatal=False) formats = [] for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []: if isinstance(format_url, str): height = self._search_regex( r'(\d+)[pP]|(pal)\.', format_url, 'height', default=None) if height == 'pal': height = 576 formats.append({ 'url': format_url, 'format_id': format_field(height, None, '%sp'), 'height': int_or_none(height), }) if not formats: playlist = self._download_xml( f'https://media.joj.sk/services/Video.php?clip={video_id}', video_id) for file_el in playlist.findall('./files/file'): path = file_el.get('path') if not path: continue format_id = file_el.get('id') or file_el.get('label') formats.append({ 'url': 'http://n16.joj.sk/storage/{}'.format(path.replace( 'dat/', '', 1)), 'format_id': format_id, 'height': int_or_none(self._search_regex( r'(\d+)[pP]', format_id or path, 'height', default=None)), }) thumbnail = self._og_search_thumbnail(webpage) duration = int_or_none(self._search_regex( r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False)) return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/yfanefa.py
yt_dlp/extractor/yfanefa.py
from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, join_nonempty, remove_end, url_or_none, ) from ..utils.traversal import traverse_obj class YfanefaIE(InfoExtractor): IE_NAME = 'yfanefa' _VALID_URL = r'https?://(?:www\.)?yfanefa\.com/(?P<id>[^?#]+)' _TESTS = [{ 'url': 'https://www.yfanefa.com/record/2717', 'info_dict': { 'id': 'record-2717', 'ext': 'mp4', 'title': 'THE HALLAMSHIRE RIFLES LEAVING SHEFFIELD, 1914', 'duration': 5239, 'thumbnail': r're:https://media\.yfanefa\.com/storage/v1/file/', }, }, { 'url': 'https://www.yfanefa.com/news/53', 'info_dict': { 'id': 'news-53', 'ext': 'mp4', 'title': 'Memory Bank: Bradford Launch', 'thumbnail': r're:https://media\.yfanefa\.com/storage/v1/file/', }, }, { 'url': 'https://www.yfanefa.com/evaluating_nature_matters', 'info_dict': { 'id': 'evaluating_nature_matters', 'ext': 'mp4', 'title': 'Evaluating Nature Matters', 'thumbnail': r're:https://media\.yfanefa\.com/storage/v1/file/', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) player_data = self._search_json( r'iwPlayer\.options\["[\w.]+"\]\s*=', webpage, 'player options', video_id) formats = [] video_url = join_nonempty(player_data['url'], player_data.get('signature'), delim='') if determine_ext(video_url) == 'm3u8': formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls') else: formats = [{'url': video_url, 'ext': 'mp4'}] return { 'id': video_id.strip('/').replace('/', '-'), 'title': self._og_search_title(webpage, default=None) or remove_end(self._html_extract_title(webpage), ' | Yorkshire Film Archive'), 'formats': formats, **traverse_obj(player_data, { 'thumbnail': ('preview', {url_or_none}), 'duration': ('duration', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/openload.py
yt_dlp/extractor/openload.py
import collections import contextlib import json import os import subprocess import tempfile import urllib.parse from ..utils import ( ExtractorError, Popen, check_executable, format_field, get_exe_version, is_outdated_version, shell_quote, ) def cookie_to_dict(cookie): cookie_dict = { 'name': cookie.name, 'value': cookie.value, } if cookie.port_specified: cookie_dict['port'] = cookie.port if cookie.domain_specified: cookie_dict['domain'] = cookie.domain if cookie.path_specified: cookie_dict['path'] = cookie.path if cookie.expires is not None: cookie_dict['expires'] = cookie.expires if cookie.secure is not None: cookie_dict['secure'] = cookie.secure if cookie.discard is not None: cookie_dict['discard'] = cookie.discard with contextlib.suppress(TypeError): if (cookie.has_nonstandard_attr('httpOnly') or cookie.has_nonstandard_attr('httponly') or cookie.has_nonstandard_attr('HttpOnly')): cookie_dict['httponly'] = True return cookie_dict def cookie_jar_to_list(cookie_jar): return [cookie_to_dict(cookie) for cookie in cookie_jar] class PhantomJSwrapper: """PhantomJS wrapper class This class is experimental. """ INSTALL_HINT = 'Please download it from https://phantomjs.org/download.html' _BASE_JS = R''' phantom.onError = function(msg, trace) {{ var msgStack = ['PHANTOM ERROR: ' + msg]; if(trace && trace.length) {{ msgStack.push('TRACE:'); trace.forEach(function(t) {{ msgStack.push(' -> ' + (t.file || t.sourceURL) + ': ' + t.line + (t.function ? ' (in function ' + t.function +')' : '')); }}); }} console.error(msgStack.join('\n')); phantom.exit(1); }}; ''' _TEMPLATE = R''' var page = require('webpage').create(); var fs = require('fs'); var read = {{ mode: 'r', charset: 'utf-8' }}; var write = {{ mode: 'w', charset: 'utf-8' }}; JSON.parse(fs.read("{cookies}", read)).forEach(function(x) {{ phantom.addCookie(x); }}); page.settings.resourceTimeout = {timeout}; page.settings.userAgent = "{ua}"; page.onLoadStarted = function() {{ page.evaluate(function() {{ delete window._phantom; delete window.callPhantom; }}); }}; var saveAndExit = function() {{ fs.write("{html}", page.content, write); fs.write("{cookies}", JSON.stringify(phantom.cookies), write); phantom.exit(); }}; page.onLoadFinished = function(status) {{ if(page.url === "") {{ page.setContent(fs.read("{html}", read), "{url}"); }} else {{ {jscode} }} }}; page.open(""); ''' _TMP_FILE_NAMES = ['script', 'html', 'cookies'] @staticmethod def _version(): return get_exe_version('phantomjs', version_re=r'([0-9.]+)') def __init__(self, extractor, required_version=None, timeout=10000): self._TMP_FILES = {} self.exe = check_executable('phantomjs', ['-v']) if not self.exe: raise ExtractorError(f'PhantomJS not found, {self.INSTALL_HINT}', expected=True) self.extractor = extractor if required_version: version = self._version() if is_outdated_version(version, required_version): self.extractor._downloader.report_warning( 'Your copy of PhantomJS is outdated, update it to version ' f'{required_version} or newer if you encounter any errors.') for name in self._TMP_FILE_NAMES: tmp = tempfile.NamedTemporaryFile(delete=False) tmp.close() self._TMP_FILES[name] = tmp self.options = collections.ChainMap({ 'timeout': timeout, }, { x: self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"') for x in self._TMP_FILE_NAMES }) def __del__(self): for name in self._TMP_FILE_NAMES: with contextlib.suppress(OSError, KeyError): os.remove(self._TMP_FILES[name].name) def _save_cookies(self, url): cookies = cookie_jar_to_list(self.extractor.cookiejar) for cookie in cookies: if 'path' not in cookie: cookie['path'] = '/' if 'domain' not in cookie: cookie['domain'] = urllib.parse.urlparse(url).netloc with open(self._TMP_FILES['cookies'].name, 'wb') as f: f.write(json.dumps(cookies).encode()) def _load_cookies(self): with open(self._TMP_FILES['cookies'].name, 'rb') as f: cookies = json.loads(f.read().decode('utf-8')) for cookie in cookies: if cookie['httponly'] is True: cookie['rest'] = {'httpOnly': None} if 'expiry' in cookie: cookie['expire_time'] = cookie['expiry'] self.extractor._set_cookie(**cookie) def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'): """ Downloads webpage (if needed) and executes JS Params: url: website url html: optional, html code of website video_id: video id note: optional, displayed when downloading webpage note2: optional, displayed when executing JS headers: custom http headers jscode: code to be executed when page is loaded Returns tuple with: * downloaded website (after JS execution) * anything you print with `console.log` (but not inside `page.execute`!) In most cases you don't need to add any `jscode`. It is executed in `page.onLoadFinished`. `saveAndExit();` is mandatory, use it instead of `phantom.exit()` It is possible to wait for some element on the webpage, e.g. var check = function() { var elementFound = page.evaluate(function() { return document.querySelector('#b.done') !== null; }); if(elementFound) saveAndExit(); else window.setTimeout(check, 500); } page.evaluate(function(){ document.querySelector('#a').click(); }); check(); """ if 'saveAndExit();' not in jscode: raise ExtractorError('`saveAndExit();` not found in `jscode`') if not html: html = self.extractor._download_webpage(url, video_id, note=note, headers=headers) with open(self._TMP_FILES['html'].name, 'wb') as f: f.write(html.encode()) self._save_cookies(url) user_agent = headers.get('User-Agent') or self.extractor.get_param('http_headers')['User-Agent'] jscode = self._TEMPLATE.format_map(self.options.new_child({ 'url': url, 'ua': user_agent.replace('"', '\\"'), 'jscode': jscode, })) stdout = self.execute(jscode, video_id, note=note2) with open(self._TMP_FILES['html'].name, 'rb') as f: html = f.read().decode('utf-8') self._load_cookies() return html, stdout def execute(self, jscode, video_id=None, *, note='Executing JS'): """Execute JS and return stdout""" if 'phantom.exit();' not in jscode: jscode += ';\nphantom.exit();' jscode = self._BASE_JS + jscode with open(self._TMP_FILES['script'].name, 'w', encoding='utf-8') as f: f.write(jscode) self.extractor.to_screen(f'{format_field(video_id, None, "%s: ")}{note}') cmd = [self.exe, '--ssl-protocol=any', self._TMP_FILES['script'].name] self.extractor.write_debug(f'PhantomJS command line: {shell_quote(cmd)}') try: stdout, stderr, returncode = Popen.run(cmd, timeout=self.options['timeout'] / 1000, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except Exception as e: raise ExtractorError(f'{note} failed: Unable to run PhantomJS binary', cause=e) if returncode: raise ExtractorError(f'{note} failed with returncode {returncode}:\n{stderr.strip()}') return stdout
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cam4.py
yt_dlp/extractor/cam4.py
from .common import InfoExtractor class CAM4IE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?cam4\.com/(?P<id>[a-z0-9_]+)' _TEST = { 'url': 'https://www.cam4.com/foxynesss', 'info_dict': { 'id': 'foxynesss', 'ext': 'mp4', 'title': 're:^foxynesss [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'age_limit': 18, 'live_status': 'is_live', 'thumbnail': 'https://snapshots.xcdnpro.com/thumbnails/foxynesss', }, } def _real_extract(self, url): channel_id = self._match_id(url) m3u8_playlist = self._download_json(f'https://www.cam4.com/rest/v1.0/profile/{channel_id}/streamInfo', channel_id).get('cdnURL') formats = self._extract_m3u8_formats(m3u8_playlist, channel_id, 'mp4', m3u8_id='hls', live=True) return { 'id': channel_id, 'title': channel_id, 'is_live': True, 'age_limit': 18, 'formats': formats, 'thumbnail': f'https://snapshots.xcdnpro.com/thumbnails/{channel_id}', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pornhub.py
yt_dlp/extractor/pornhub.py
import functools import itertools import math import operator import re from .common import InfoExtractor from .openload import PhantomJSwrapper from ..networking import Request from ..networking.exceptions import HTTPError from ..utils import ( NO_DEFAULT, ExtractorError, clean_html, determine_ext, format_field, int_or_none, merge_dicts, orderedSet, remove_quotes, remove_start, str_to_int, update_url_query, url_or_none, urlencode_postdata, ) from ..utils.traversal import find_elements, traverse_obj class PornHubBaseIE(InfoExtractor): _NETRC_MACHINE = 'pornhub' _PORNHUB_HOST_RE = r'(?:(?P<host>pornhub(?:premium)?\.(?:com|net|org))|pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd\.onion)' def _download_webpage_handle(self, *args, **kwargs): def dl(*args, **kwargs): return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs) ret = dl(*args, **kwargs) if not ret: return ret webpage, urlh = ret if any(re.search(p, webpage) for p in ( r'<body\b[^>]+\bonload=["\']go\(\)', r'document\.cookie\s*=\s*["\']RNKEY=', r'document\.location\.reload\(true\)')): url_or_request = args[0] url = (url_or_request.url if isinstance(url_or_request, Request) else url_or_request) phantom = PhantomJSwrapper(self, required_version='2.0') phantom.get(url, html=webpage) webpage, urlh = dl(*args, **kwargs) return webpage, urlh def _real_initialize(self): self._logged_in = False def _set_age_cookies(self, host): self._set_cookie(host, 'age_verified', '1') self._set_cookie(host, 'accessAgeDisclaimerPH', '1') self._set_cookie(host, 'accessAgeDisclaimerUK', '1') self._set_cookie(host, 'accessPH', '1') def _login(self, host): if self._logged_in: return site = host.split('.')[0] # Both sites pornhub and pornhubpremium have separate accounts # so there should be an option to provide credentials for both. # At the same time some videos are available under the same video id # on both sites so that we have to identify them as the same video. # For that purpose we have to keep both in the same extractor # but under different netrc machines. username, password = self._get_login_info(netrc_machine=site) if username is None: return login_url = 'https://www.{}/{}login'.format(host, 'premium/' if 'premium' in host else '') login_page = self._download_webpage( login_url, None, f'Downloading {site} login page') def is_logged(webpage): return any(re.search(p, webpage) for p in ( r'id="profileMenuDropdown"', r'class="ph-icon-logout"')) if is_logged(login_page): self._logged_in = True return login_form = self._hidden_inputs(login_page) login_form.update({ 'email': username, 'password': password, }) response = self._download_json( f'https://www.{host}/front/authenticate', None, f'Logging in to {site}', data=urlencode_postdata(login_form), headers={ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Referer': login_url, 'X-Requested-With': 'XMLHttpRequest', }) if response.get('success') == '1': self._logged_in = True return message = response.get('message') if message is not None: raise ExtractorError( f'Unable to login: {message}', expected=True) raise ExtractorError('Unable to log in') class PornHubIE(PornHubBaseIE): IE_DESC = 'PornHub and Thumbzilla' _VALID_URL = rf'''(?x) https?:// (?: (?:[^/]+\.)? {PornHubBaseIE._PORNHUB_HOST_RE} /(?:(?:view_video\.php|video/show)\?viewkey=|embed/)| (?:www\.)?thumbzilla\.com/video/ ) (?P<id>[\da-z]+) ''' _EMBED_REGEX = [r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)'] _TESTS = [{ 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', 'md5': '4d4a4e9178b655776f86cf89ecaf0edf', 'info_dict': { 'id': '648719015', 'ext': 'mp4', 'title': 'Seductive Indian beauty strips down and fingers her pink pussy', 'uploader': 'BABES-COM', 'uploader_id': '/users/babes-com', 'upload_date': '20130628', 'timestamp': 1372447216, 'duration': 361, 'view_count': int, 'like_count': int, 'comment_count': int, 'age_limit': 18, 'tags': list, 'categories': list, 'cast': list, 'thumbnail': r're:https?://.+', }, }, { # non-ASCII title 'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002', 'info_dict': { 'id': '1331683002', 'ext': 'mp4', 'title': '重庆婷婷女王足交', 'upload_date': '20150213', 'timestamp': 1423804862, 'duration': 1753, 'view_count': int, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'age_limit': 18, 'tags': list, 'categories': list, }, 'params': { 'skip_download': True, }, 'skip': 'Video has been flagged for verification in accordance with our trust and safety policy', }, { # subtitles 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7', 'info_dict': { 'id': 'ph5af5fef7c2aa7', 'ext': 'mp4', 'title': 'BFFS - Cute Teen Girls Share Cock On the Floor', 'uploader': 'BFFs', 'duration': 622, 'view_count': int, 'like_count': int, 'dislike_count': int, 'comment_count': int, 'age_limit': 18, 'tags': list, 'categories': list, 'subtitles': { 'en': [{ 'ext': 'srt', }], }, }, 'params': { 'skip_download': True, }, 'skip': 'This video has been disabled', }, { 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph601dc30bae19a', 'info_dict': { 'id': 'ph601dc30bae19a', 'uploader': 'Projekt Melody', 'uploader_id': 'projekt-melody', 'upload_date': '20210205', 'title': '"Welcome to My Pussy Mansion" - CB Stream (02/03/21)', 'thumbnail': r're:https?://.+', }, }, { 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d', 'only_matching': True, }, { # removed at the request of cam4.com 'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862', 'only_matching': True, }, { # removed at the request of the copyright owner 'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859', 'only_matching': True, }, { # removed by uploader 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111', 'only_matching': True, }, { # private video 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7', 'only_matching': True, }, { 'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex', 'only_matching': True, }, { 'url': 'http://www.pornhub.com/video/show?viewkey=648719015', 'only_matching': True, }, { 'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933', 'only_matching': True, }, { 'url': 'https://www.pornhub.org/view_video.php?viewkey=203640933', 'only_matching': True, }, { 'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5e4acdae54a82', 'only_matching': True, }, { # Some videos are available with the same id on both premium # and non-premium sites (e.g. this and the following test) 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5f75b0f4b18e3', 'only_matching': True, }, { 'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5f75b0f4b18e3', 'only_matching': True, }, { # geo restricted 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5a9813bfa7156', 'only_matching': True, }, { 'url': 'http://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/view_video.php?viewkey=ph5a9813bfa7156', 'only_matching': True, }] def _extract_count(self, pattern, webpage, name): return str_to_int(self._search_regex(pattern, webpage, f'{name} count', default=None)) def _real_extract(self, url): mobj = self._match_valid_url(url) host = mobj.group('host') or 'pornhub.com' video_id = mobj.group('id') self._login(host) self._set_age_cookies(host) def dl_webpage(platform): self._set_cookie(host, 'platform', platform) return self._download_webpage( f'https://www.{host}/view_video.php?viewkey={video_id}', video_id, f'Downloading {platform} webpage') webpage = dl_webpage('pc') error_msg = self._html_search_regex( (r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>', r'(?s)<section[^>]+class=["\']noVideo["\'][^>]*>(?P<error>.+?)</section>'), webpage, 'error message', default=None, group='error') if error_msg: error_msg = re.sub(r'\s+', ' ', error_msg) raise ExtractorError( f'PornHub said: {error_msg}', expected=True, video_id=video_id) if any(re.search(p, webpage) for p in ( r'class=["\']geoBlocked["\']', r'>\s*This content is unavailable in your country')): self.raise_geo_restricted() # video_title from flashvars contains whitespace instead of non-ASCII (see # http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying # on that anymore. title = self._html_search_meta( 'twitter:title', webpage, default=None) or self._html_search_regex( (r'(?s)<h1[^>]+class=["\']title["\'][^>]*>(?P<title>.+?)</h1>', r'<div[^>]+data-video-title=(["\'])(?P<title>(?:(?!\1).)+)\1', r'shareTitle["\']\s*[=:]\s*(["\'])(?P<title>(?:(?!\1).)+)\1'), webpage, 'title', group='title') video_urls = [] video_urls_set = set() subtitles = {} flashvars = self._parse_json( self._search_regex( r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'), video_id) if flashvars: subtitle_url = url_or_none(flashvars.get('closedCaptionsFile')) if subtitle_url: subtitles.setdefault('en', []).append({ 'url': subtitle_url, 'ext': 'srt', }) thumbnail = flashvars.get('image_url') duration = int_or_none(flashvars.get('video_duration')) media_definitions = flashvars.get('mediaDefinitions') if isinstance(media_definitions, list): for definition in media_definitions: if not isinstance(definition, dict): continue video_url = definition.get('videoUrl') if not video_url or not isinstance(video_url, str): continue if video_url in video_urls_set: continue video_urls_set.add(video_url) video_urls.append( (video_url, int_or_none(definition.get('quality')))) else: thumbnail, duration = [None] * 2 def extract_js_vars(webpage, pattern, default=NO_DEFAULT): assignments = self._search_regex( pattern, webpage, 'encoded url', default=default) if not assignments: return {} assignments = assignments.split(';') js_vars = {} def parse_js_value(inp): inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp) if '+' in inp: inps = inp.split('+') return functools.reduce( operator.concat, map(parse_js_value, inps)) inp = inp.strip() if inp in js_vars: return js_vars[inp] return remove_quotes(inp) for assn in assignments: assn = assn.strip() if not assn: continue assn = re.sub(r'var\s+', '', assn) vname, value = assn.split('=', 1) js_vars[vname] = parse_js_value(value) return js_vars def add_video_url(video_url): v_url = url_or_none(video_url) if not v_url: return if v_url in video_urls_set: return video_urls.append((v_url, None)) video_urls_set.add(v_url) def parse_quality_items(quality_items): q_items = self._parse_json(quality_items, video_id, fatal=False) if not isinstance(q_items, list): return for item in q_items: if isinstance(item, dict): add_video_url(item.get('url')) if not video_urls: FORMAT_PREFIXES = ('media', 'quality', 'qualityItems') js_vars = extract_js_vars( webpage, r'(var\s+(?:{})_.+)'.format('|'.join(FORMAT_PREFIXES)), default=None) if js_vars: for key, format_url in js_vars.items(): if key.startswith(FORMAT_PREFIXES[-1]): parse_quality_items(format_url) elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]): add_video_url(format_url) if not video_urls and re.search( r'<[^>]+\bid=["\']lockedPlayer', webpage): raise ExtractorError( f'Video {video_id} is locked', expected=True) if not video_urls: js_vars = extract_js_vars( dl_webpage('tv'), r'(var.+?mediastring.+?)</script>') add_video_url(js_vars['mediastring']) for mobj in re.finditer( r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1', webpage): video_url = mobj.group('url') if video_url not in video_urls_set: video_urls.append((video_url, None)) video_urls_set.add(video_url) upload_date = None formats = [] def add_format(format_url, height=None): ext = determine_ext(format_url) if ext == 'mpd': formats.extend(self._extract_mpd_formats( format_url, video_id, mpd_id='dash', fatal=False)) return if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) return if not height: height = int_or_none(self._search_regex( r'(?P<height>\d+)[pP]?_\d+[kK]', format_url, 'height', default=None)) formats.append({ 'url': format_url, 'format_id': format_field(height, None, '%dp'), 'height': height, }) for video_url, height in video_urls: if not upload_date: upload_date = self._search_regex( r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None) if upload_date: upload_date = upload_date.replace('/', '') if '/video/get_media' in video_url: medias = self._download_json(video_url, video_id, fatal=False) if isinstance(medias, list): for media in medias: if not isinstance(media, dict): continue video_url = url_or_none(media.get('videoUrl')) if not video_url: continue height = int_or_none(media.get('quality')) add_format(video_url, height) continue add_format(video_url) model_profile = self._search_json( r'var\s+MODEL_PROFILE\s*=', webpage, 'model profile', video_id, fatal=False) video_uploader = self._html_search_regex( r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<', webpage, 'uploader', default=None) or model_profile.get('username') def extract_vote_count(kind, name): return self._extract_count( (rf'<span[^>]+\bclass="votes{kind}"[^>]*>([\d,\.]+)</span>', rf'<span[^>]+\bclass=["\']votes{kind}["\'][^>]*\bdata-rating=["\'](\d+)'), webpage, name) view_count = self._extract_count( r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view') like_count = extract_vote_count('Up', 'like') dislike_count = extract_vote_count('Down', 'dislike') comment_count = self._extract_count( r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') info = self._search_json_ld(webpage, video_id, default={}) # description provided in JSON-LD is irrelevant info['description'] = None return merge_dicts({ 'id': video_id, 'uploader': video_uploader, 'uploader_id': remove_start(model_profile.get('modelProfileLink'), '/model/'), 'upload_date': upload_date, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, **traverse_obj(webpage, { 'tags': ({find_elements(attr='data-label', value='tag')}, ..., {clean_html}), 'categories': ({find_elements(attr='data-label', value='category')}, ..., {clean_html}), 'cast': ({find_elements(attr='data-label', value='pornstar')}, ..., {clean_html}), }), 'subtitles': subtitles, }, info) class PornHubPlaylistBaseIE(PornHubBaseIE): def _extract_page(self, url): return int_or_none(self._search_regex( r'\bpage=(\d+)', url, 'page', default=None)) def _extract_entries(self, webpage, host): # Only process container div with main playlist content skipping # drop-down menu that uses similar pattern for videos (see # https://github.com/ytdl-org/youtube-dl/issues/11594). container = self._search_regex( r'(?s)(<div[^>]+class=["\']container.+)', webpage, 'container', default=webpage) return [ self.url_result( f'http://www.{host}/{video_url}', PornHubIE.ie_key(), video_title=title) for video_url, title in orderedSet(re.findall( r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"', container)) ] class PornHubUserIE(PornHubPlaylistBaseIE): _VALID_URL = rf'(?P<url>https?://(?:[^/]+\.)?{PornHubBaseIE._PORNHUB_HOST_RE}/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)' _TESTS = [{ 'url': 'https://www.pornhub.com/model/zoe_ph', 'playlist_mincount': 118, }, { 'url': 'https://www.pornhub.com/pornstar/liz-vicious', 'info_dict': { 'id': 'liz-vicious', }, 'playlist_mincount': 118, }, { 'url': 'https://www.pornhub.com/users/russianveet69', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/channels/povd', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/model/zoe_ph?abc=1', 'only_matching': True, }, { # Unavailable via /videos page, but available with direct pagination # on pornstar page (see [1]), requires premium # 1. https://github.com/ytdl-org/youtube-dl/issues/27853 'url': 'https://www.pornhubpremium.com/pornstar/sienna-west', 'only_matching': True, }, { # Same as before, multi page 'url': 'https://www.pornhubpremium.com/pornstar/lily-labeau', 'only_matching': True, }, { 'url': 'https://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/model/zoe_ph', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) user_id = mobj.group('id') videos_url = '{}/videos'.format(mobj.group('url')) self._set_age_cookies(mobj.group('host')) page = self._extract_page(url) if page: videos_url = update_url_query(videos_url, {'page': page}) return self.url_result( videos_url, ie=PornHubPagedVideoListIE.ie_key(), video_id=user_id) class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE): @staticmethod def _has_more(webpage): return re.search( r'''(?x) <li[^>]+\bclass=["\']page_next| <link[^>]+\brel=["\']next| <button[^>]+\bid=["\']moreDataBtn ''', webpage) is not None def _entries(self, url, host, item_id): page = self._extract_page(url) VIDEOS = '/videos' def download_page(base_url, num, fallback=False): note = 'Downloading page {}{}'.format(num, ' (switch to fallback)' if fallback else '') return self._download_webpage( base_url, item_id, note, query={'page': num}) def is_404(e): return isinstance(e.cause, HTTPError) and e.cause.status == 404 base_url = url has_page = page is not None first_page = page if has_page else 1 for page_num in (first_page, ) if has_page else itertools.count(first_page): try: try: webpage = download_page(base_url, page_num) except ExtractorError as e: # Some sources may not be available via /videos page, # trying to fallback to main page pagination (see [1]) # 1. https://github.com/ytdl-org/youtube-dl/issues/27853 if is_404(e) and page_num == first_page and VIDEOS in base_url: base_url = base_url.replace(VIDEOS, '') webpage = download_page(base_url, page_num, fallback=True) else: raise except ExtractorError as e: if is_404(e) and page_num != first_page: break raise page_entries = self._extract_entries(webpage, host) if not page_entries: break yield from page_entries if not self._has_more(webpage): break def _real_extract(self, url): mobj = self._match_valid_url(url) host = mobj.group('host') item_id = mobj.group('id') self._login(host) self._set_age_cookies(host) return self.playlist_result(self._entries(url, host, item_id), item_id) class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE): _VALID_URL = rf'https?://(?:[^/]+\.)?{PornHubBaseIE._PORNHUB_HOST_RE}/(?!playlist/)(?P<id>(?:[^/]+/)*[^/?#&]+)' _TESTS = [{ 'url': 'https://www.pornhub.com/model/zoe_ph/videos', 'only_matching': True, }, { 'url': 'http://www.pornhub.com/users/rushandlia/videos', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos', 'info_dict': { 'id': 'pornstar/jenny-blighe/videos', }, 'playlist_mincount': 149, }, { 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos?page=3', 'info_dict': { 'id': 'pornstar/jenny-blighe/videos', }, 'playlist_mincount': 40, }, { # default sorting as Top Rated Videos 'url': 'https://www.pornhub.com/channels/povd/videos', 'info_dict': { 'id': 'channels/povd/videos', }, 'playlist_mincount': 293, }, { # Top Rated Videos 'url': 'https://www.pornhub.com/channels/povd/videos?o=ra', 'only_matching': True, }, { # Most Recent Videos 'url': 'https://www.pornhub.com/channels/povd/videos?o=da', 'only_matching': True, }, { # Most Viewed Videos 'url': 'https://www.pornhub.com/channels/povd/videos?o=vi', 'only_matching': True, }, { 'url': 'http://www.pornhub.com/users/zoe_ph/videos/public', 'only_matching': True, }, { # Most Viewed Videos 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=mv', 'only_matching': True, }, { # Top Rated Videos 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=tr', 'only_matching': True, }, { # Longest Videos 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=lg', 'only_matching': True, }, { # Newest Videos 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=cm', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/paid', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/fanonly', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/video', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/video?page=3', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/video/search?search=123', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/categories/teen', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/categories/teen?page=3', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/hd', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/hd?page=3', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/described-video', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/described-video?page=2', 'only_matching': True, }, { 'url': 'https://www.pornhub.com/video/incategories/60fps-1/hd-porn', 'only_matching': True, }, { 'url': 'https://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/model/zoe_ph/videos', 'only_matching': True, }] @classmethod def suitable(cls, url): return (False if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url) else super().suitable(url)) class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE): _VALID_URL = rf'(?P<url>https?://(?:[^/]+\.)?{PornHubBaseIE._PORNHUB_HOST_RE}/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)' _TESTS = [{ 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload', 'info_dict': { 'id': 'jenny-blighe', }, 'playlist_mincount': 129, }, { 'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload', 'only_matching': True, }, { 'url': 'http://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/pornstar/jenny-blighe/videos/upload', 'only_matching': True, }] class PornHubPlaylistIE(PornHubPlaylistBaseIE): _VALID_URL = rf'(?P<url>https?://(?:[^/]+\.)?{PornHubBaseIE._PORNHUB_HOST_RE}/playlist/(?P<id>[^/?#&]+))' _TESTS = [{ 'url': 'https://www.pornhub.com/playlist/44121572', 'info_dict': { 'id': '44121572', }, 'playlist_count': 77, }, { 'url': 'https://www.pornhub.com/playlist/4667351', 'only_matching': True, }, { 'url': 'https://de.pornhub.com/playlist/4667351', 'only_matching': True, }, { 'url': 'https://de.pornhub.com/playlist/4667351?page=2', 'only_matching': True, }] def _entries(self, url, host, item_id): webpage = self._download_webpage(url, item_id, 'Downloading page 1') playlist_id = self._search_regex(r'var\s+playlistId\s*=\s*"([^"]+)"', webpage, 'playlist_id') video_count = int_or_none( self._search_regex(r'var\s+itemsCount\s*=\s*([0-9]+)\s*\|\|', webpage, 'video_count')) token = self._search_regex(r'var\s+token\s*=\s*"([^"]+)"', webpage, 'token') page_count = math.ceil((video_count - 36) / 40.) + 1 page_entries = self._extract_entries(webpage, host) def download_page(page_num): note = f'Downloading page {page_num}' page_url = f'https://www.{host}/playlist/viewChunked' return self._download_webpage(page_url, item_id, note, query={ 'id': playlist_id, 'page': page_num, 'token': token, }) for page_num in range(1, page_count + 1): if page_num > 1: webpage = download_page(page_num) page_entries = self._extract_entries(webpage, host) if not page_entries: break yield from page_entries def _real_extract(self, url): mobj = self._match_valid_url(url) host = mobj.group('host') item_id = mobj.group('id') self._login(host) self._set_age_cookies(host) return self.playlist_result(self._entries(mobj.group('url'), host, item_id), item_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cinemax.py
yt_dlp/extractor/cinemax.py
from .hbo import HBOBaseIE class CinemaxIE(HBOBaseIE): _WORKING = False _VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P<path>[^/]+/video/[0-9a-z-]+-(?P<id>\d+))' _TESTS = [{ 'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903', 'md5': '82e0734bba8aa7ef526c9dd00cf35a05', 'info_dict': { 'id': '20126903', 'ext': 'mp4', 'title': 'S1 Ep 1: Recap', }, 'expected_warnings': ['Unknown MIME type application/mp4 in DASH manifest'], }, { 'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903.embed', 'only_matching': True, }] def _real_extract(self, url): path, video_id = self._match_valid_url(url).groups() info = self._extract_info(f'https://www.cinemax.com/{path}.xml', video_id) info['id'] = video_id return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gab.py
yt_dlp/extractor/gab.py
import re from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, parse_codecs, parse_duration, str_to_int, unified_timestamp, ) class GabTVIE(InfoExtractor): _VALID_URL = r'https?://tv\.gab\.com/channel/[^/]+/view/(?P<id>[a-z0-9-]+)' _TESTS = [{ 'url': 'https://tv.gab.com/channel/wurzelroot/view/why-was-america-in-afghanistan-61217eacea5665de450d0488', 'info_dict': { 'id': '61217eacea5665de450d0488', 'ext': 'mp4', 'title': 'WHY WAS AMERICA IN AFGHANISTAN - AMERICA FIRST AGAINST AMERICAN OLIGARCHY', 'uploader': 'Wurzelroot', 'uploader_id': '608fb0a85738fd1974984f7d', 'thumbnail': 'https://tv.gab.com/image/61217eacea5665de450d0488', }, }] def _real_extract(self, url): video_id = self._match_id(url).split('-')[-1] webpage = self._download_webpage(url, video_id) channel_id = self._search_regex(r'data-channel-id=\"(?P<channel_id>[^\"]+)', webpage, 'channel_id') channel_name = self._search_regex(r'data-channel-name=\"(?P<channel_id>[^\"]+)', webpage, 'channel_name') title = self._search_regex(r'data-episode-title=\"(?P<channel_id>[^\"]+)', webpage, 'title') view_key = self._search_regex(r'data-view-key=\"(?P<channel_id>[^\"]+)', webpage, 'view_key') description = clean_html( self._html_search_regex(self._meta_regex('description'), webpage, 'description', group='content')) or None available_resolutions = re.findall( rf'<a\ data-episode-id=\"{video_id}\"\ data-resolution=\"(?P<resolution>[^\"]+)', webpage) formats = [] for resolution in available_resolutions: frmt = { 'url': f'https://tv.gab.com/media/{video_id}?viewKey={view_key}&r={resolution}', 'format_id': resolution, 'vcodec': 'h264', 'acodec': 'aac', 'ext': 'mp4', } if 'audio-' in resolution: frmt['abr'] = str_to_int(resolution.replace('audio-', '')) frmt['height'] = 144 frmt['quality'] = -10 else: frmt['height'] = str_to_int(resolution.replace('p', '')) formats.append(frmt) return { 'id': video_id, 'title': title, 'formats': formats, 'description': description, 'uploader': channel_name, 'uploader_id': channel_id, 'thumbnail': f'https://tv.gab.com/image/{video_id}', } class GabIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gab\.com/[^/]+/posts/(?P<id>\d+)' _TESTS = [{ 'url': 'https://gab.com/SomeBitchIKnow/posts/107163961867310434', 'md5': '8ca34fb00f1e1033b5c5988d79ec531d', 'info_dict': { 'id': '107163961867310434-0', 'ext': 'mp4', 'title': 'L on Gab', 'uploader_id': '946600', 'uploader': 'SomeBitchIKnow', 'description': 'md5:204055fafd5e1a519f5d6db953567ca3', 'timestamp': 1635192289, 'upload_date': '20211025', }, }, { 'url': 'https://gab.com/TheLonelyProud/posts/107045884469287653', 'md5': 'f9cefcfdff6418e392611a828d47839d', 'info_dict': { 'id': '107045884469287653-0', 'ext': 'mp4', 'title': 'Jody Sadowski on Gab', 'uploader_id': '1390705', 'timestamp': 1633390571, 'upload_date': '20211004', 'uploader': 'TheLonelyProud', }, }] def _real_extract(self, url): post_id = self._match_id(url) json_data = self._download_json(f'https://gab.com/api/v1/statuses/{post_id}', post_id) entries = [] for idx, media in enumerate(json_data['media_attachments']): if media.get('type') not in ('video', 'gifv'): continue metadata = media['meta'] format_metadata = { 'acodec': parse_codecs(metadata.get('audio_encode')).get('acodec'), 'asr': int_or_none((metadata.get('audio_bitrate') or '').split(' ')[0]), 'fps': metadata.get('fps'), } formats = [{ 'url': url, 'width': f.get('width'), 'height': f.get('height'), 'tbr': int_or_none(f.get('bitrate'), scale=1000), **format_metadata, } for url, f in ((media.get('url'), metadata.get('original') or {}), (media.get('source_mp4'), metadata.get('playable') or {})) if url] author = json_data.get('account') or {} entries.append({ 'id': f'{post_id}-{idx}', 'title': f'{json_data["account"]["display_name"]} on Gab', 'timestamp': unified_timestamp(json_data.get('created_at')), 'formats': formats, 'description': clean_html(json_data.get('content')), 'duration': metadata.get('duration') or parse_duration(metadata.get('length')), 'like_count': json_data.get('favourites_count'), 'comment_count': json_data.get('replies_count'), 'repost_count': json_data.get('reblogs_count'), 'uploader': author.get('username'), 'uploader_id': author.get('id'), 'uploader_url': author.get('url'), }) if len(entries) > 1: return self.playlist_result(entries, post_id) return entries[0]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/playerfm.py
yt_dlp/extractor/playerfm.py
from .common import InfoExtractor from ..utils import clean_html, clean_podcast_url, int_or_none, str_or_none, url_or_none from ..utils.traversal import traverse_obj class PlayerFmIE(InfoExtractor): _VALID_URL = r'(?P<url>https?://(?:www\.)?player\.fm/(?:series/)?[\w-]+/(?P<id>[\w-]+))' _TESTS = [{ 'url': 'https://player.fm/series/chapo-trap-house/movie-mindset-33-casino-feat-felix', 'info_dict': { 'ext': 'mp3', 'id': '478606546', 'display_id': 'movie-mindset-33-casino-feat-felix', 'thumbnail': r're:^https://.*\.(jpg|png)', 'title': 'Movie Mindset 33 - Casino feat. Felix', 'creators': ['Chapo Trap House'], 'description': r're:The first episode of this season of Movie Mindset is free .+ we feel about it\.', 'duration': 6830, 'timestamp': 1745406000, 'upload_date': '20250423', }, }, { 'url': 'https://player.fm/series/nbc-nightly-news-with-tom-llamas/thursday-april-17-2025', 'info_dict': { 'ext': 'mp3', 'id': '477635490', 'display_id': 'thursday-april-17-2025', 'title': 'Thursday, April 17, 2025', 'thumbnail': r're:^https://.*\.(jpg|png)', 'duration': 1143, 'description': 'md5:4890b8cf9a55a787561cd5d59dfcda82', 'creators': ['NBC News'], 'timestamp': 1744941374, 'upload_date': '20250418', }, }, { 'url': 'https://player.fm/series/soccer-101/ep-109-its-kicking-off-how-have-the-rules-for-kickoff-changed-what-are-the-best-approaches-to-getting-the-game-underway-and-how-could-we-improve-on-the-present-system-ack3NzL3yibvs4pf', 'info_dict': { 'ext': 'mp3', 'id': '481418710', 'thumbnail': r're:^https://.*\.(jpg|png)', 'title': r're:#109 It\'s kicking off! How have the rules for kickoff changed, .+ the present system\?', 'creators': ['TSS'], 'duration': 1510, 'display_id': 'md5:b52ecacaefab891b59db69721bfd9b13', 'description': 'md5:52a39e36d08d8919527454f152ad3c25', 'timestamp': 1659102055, 'upload_date': '20220729', }, }] def _real_extract(self, url): display_id, url = self._match_valid_url(url).group('id', 'url') data = self._download_json(f'{url}.json', display_id) return { 'display_id': display_id, 'vcodec': 'none', **traverse_obj(data, { 'id': ('id', {int}, {str_or_none}), 'url': ('url', {clean_podcast_url}), 'title': ('title', {str}), 'description': ('description', {clean_html}), 'duration': ('duration', {int_or_none}), 'thumbnail': (('image', ('series', 'image')), 'url', {url_or_none}, any), 'filesize': ('size', {int_or_none}), 'timestamp': ('publishedAt', {int_or_none}), 'creators': ('series', 'author', {str}, filter, all, filter), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/netzkino.py
yt_dlp/extractor/netzkino.py
from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, url_or_none, urljoin, ) from ..utils.traversal import traverse_obj class NetzkinoIE(InfoExtractor): _GEO_COUNTRIES = ['DE'] _VALID_URL = r'https?://(?:www\.)?netzkino\.de/details/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.netzkino.de/details/snow-beast', 'md5': '1a4c90fe40d3ccabce163287e45e56dd', 'info_dict': { 'id': 'snow-beast', 'ext': 'mp4', 'title': 'Snow Beast', 'age_limit': 12, 'alt_title': 'Snow Beast', 'cast': 'count:3', 'categories': 'count:7', 'creators': 'count:2', 'description': 'md5:e604a954a7f827a80e96a3a97d48b269', 'location': 'US', 'release_year': 2011, 'thumbnail': r're:https?://.+\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) next_js_data = self._search_nextjs_data(webpage, video_id) query = traverse_obj(next_js_data, ( 'props', '__dehydratedState', 'queries', ..., 'state', 'data', 'data', lambda _, v: v['__typename'] == 'CmsMovie', any)) if 'DRM' in traverse_obj(query, ('licenses', 'nodes', ..., 'properties', {str})): self.report_drm(video_id) return { 'id': video_id, **traverse_obj(query, { 'title': ('originalTitle', {clean_html}), 'age_limit': ('fskRating', {int_or_none}), 'alt_title': ('originalTitle', {clean_html}, filter), 'cast': ('cast', 'nodes', ..., 'person', 'name', {clean_html}, filter), 'creators': (('directors', 'writers'), 'nodes', ..., 'person', 'name', {clean_html}, filter), 'categories': ('categories', 'nodes', ..., 'category', 'title', {clean_html}, filter), 'description': ('longSynopsis', {clean_html}, filter), 'duration': ('runtimeInSeconds', {int_or_none}), 'location': ('productionCountry', {clean_html}, filter), 'release_year': ('productionYear', {int_or_none}), 'thumbnail': ('coverImage', 'masterUrl', {url_or_none}), 'url': ('videoSource', 'pmdUrl', {urljoin('https://pmd.netzkino-seite.netzkino.de/')}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/digitalconcerthall.py
yt_dlp/extractor/digitalconcerthall.py
import time from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, jwt_decode_hs256, parse_codecs, try_get, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class DigitalConcertHallIE(InfoExtractor): IE_DESC = 'DigitalConcertHall extractor' _VALID_URL = r'https?://(?:www\.)?digitalconcerthall\.com/(?P<language>[a-z]+)/(?P<type>film|concert|work)/(?P<id>[0-9]+)-?(?P<part>[0-9]+)?' _NETRC_MACHINE = 'digitalconcerthall' _TESTS = [{ 'note': 'Playlist with only one video', 'url': 'https://www.digitalconcerthall.com/en/concert/53201', 'info_dict': { 'id': '53201-1', 'ext': 'mp4', 'composer': 'Kurt Weill', 'title': '[Magic Night]', 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$', 'upload_date': '20210624', 'timestamp': 1624548600, 'duration': 2798, 'album_artists': ['Members of the Berliner Philharmoniker', 'Simon Rössler'], 'composers': ['Kurt Weill'], }, 'params': {'skip_download': 'm3u8'}, }, { 'note': 'Concert with several works and an interview', 'url': 'https://www.digitalconcerthall.com/en/concert/53785', 'info_dict': { 'id': '53785', 'album_artists': ['Berliner Philharmoniker', 'Kirill Petrenko'], 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich', 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$', }, 'params': {'skip_download': 'm3u8'}, 'playlist_count': 3, }, { 'url': 'https://www.digitalconcerthall.com/en/film/388', 'info_dict': { 'id': '388', 'ext': 'mp4', 'title': 'The Berliner Philharmoniker and Frank Peter Zimmermann', 'description': 'md5:cfe25a7044fa4be13743e5089b5b5eb2', 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$', 'upload_date': '20220714', 'timestamp': 1657785600, 'album_artists': ['Frank Peter Zimmermann', 'Benedikt von Bernstorff', 'Jakob von Bernstorff'], }, 'params': {'skip_download': 'm3u8'}, }, { 'note': 'Concert with several works and an interview', 'url': 'https://www.digitalconcerthall.com/en/work/53785-1', 'info_dict': { 'id': '53785', 'album_artists': ['Berliner Philharmoniker', 'Kirill Petrenko'], 'title': 'Kirill Petrenko conducts Mendelssohn and Shostakovich', 'thumbnail': r're:^https?://images.digitalconcerthall.com/cms/thumbnails.*\.jpg$', }, 'params': {'skip_download': 'm3u8'}, 'playlist_count': 1, }] _LOGIN_HINT = ('Use --username token --password ACCESS_TOKEN where ACCESS_TOKEN ' 'is the "access_token_production" from your browser local storage') _REFRESH_HINT = 'or else use a "refresh_token" with --username refresh --password REFRESH_TOKEN' _OAUTH_URL = 'https://api.digitalconcerthall.com/v2/oauth2/token' _CLIENT_ID = 'dch.webapp' _CLIENT_SECRET = '2ySLN+2Fwb' _USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.5 Safari/605.1.15' _OAUTH_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', 'Origin': 'https://www.digitalconcerthall.com', 'Referer': 'https://www.digitalconcerthall.com/', 'User-Agent': _USER_AGENT, } _access_token = None _access_token_expiry = 0 _refresh_token = None @property def _access_token_is_expired(self): return self._access_token_expiry - 30 <= int(time.time()) def _set_access_token(self, value): self._access_token = value self._access_token_expiry = traverse_obj(value, ({jwt_decode_hs256}, 'exp', {int})) or 0 def _cache_tokens(self, /): self.cache.store(self._NETRC_MACHINE, 'tokens', { 'access_token': self._access_token, 'refresh_token': self._refresh_token, }) def _fetch_new_tokens(self, invalidate=False): if invalidate: self.report_warning('Access token has been invalidated') self._set_access_token(None) if not self._access_token_is_expired: return if not self._refresh_token: self._set_access_token(None) self._cache_tokens() raise ExtractorError( 'Access token has expired or been invalidated. ' 'Get a new "access_token_production" value from your browser ' f'and try again, {self._REFRESH_HINT}', expected=True) # If we only have a refresh token, we need a temporary "initial token" for the refresh flow bearer_token = self._access_token or self._download_json( self._OAUTH_URL, None, 'Obtaining initial token', 'Unable to obtain initial token', data=urlencode_postdata({ 'affiliate': 'none', 'grant_type': 'device', 'device_vendor': 'unknown', # device_model 'Safari' gets split streams of 4K/HEVC video and lossless/FLAC audio, # but this is no longer effective since actual login is not possible anymore 'device_model': 'unknown', 'app_id': self._CLIENT_ID, 'app_distributor': 'berlinphil', 'app_version': '1.95.0', 'client_secret': self._CLIENT_SECRET, }), headers=self._OAUTH_HEADERS)['access_token'] try: response = self._download_json( self._OAUTH_URL, None, 'Refreshing token', 'Unable to refresh token', data=urlencode_postdata({ 'grant_type': 'refresh_token', 'refresh_token': self._refresh_token, 'client_id': self._CLIENT_ID, 'client_secret': self._CLIENT_SECRET, }), headers={ **self._OAUTH_HEADERS, 'Authorization': f'Bearer {bearer_token}', }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: self._set_access_token(None) self._refresh_token = None self._cache_tokens() raise ExtractorError('Your tokens have been invalidated', expected=True) raise self._set_access_token(response['access_token']) if refresh_token := traverse_obj(response, ('refresh_token', {str})): self.write_debug('New refresh token granted') self._refresh_token = refresh_token self._cache_tokens() def _perform_login(self, username, password): self.report_login() if username == 'refresh': self._refresh_token = password self._fetch_new_tokens() if username == 'token': if not traverse_obj(password, {jwt_decode_hs256}): raise ExtractorError( f'The access token passed to yt-dlp is not valid. {self._LOGIN_HINT}', expected=True) self._set_access_token(password) self._cache_tokens() if username in ('refresh', 'token'): if self.get_param('cachedir') is not False: token_type = 'access' if username == 'token' else 'refresh' self.to_screen(f'Your {token_type} token has been cached to disk. To use the cached ' 'token next time, pass --username cache along with any password') return if username != 'cache': raise ExtractorError( 'Login with username and password is no longer supported ' f'for this site. {self._LOGIN_HINT}, {self._REFRESH_HINT}', expected=True) # Try cached access_token cached_tokens = self.cache.load(self._NETRC_MACHINE, 'tokens', default={}) self._set_access_token(cached_tokens.get('access_token')) self._refresh_token = cached_tokens.get('refresh_token') if not self._access_token_is_expired: return # Try cached refresh_token self._fetch_new_tokens(invalidate=True) def _real_initialize(self): if not self._access_token: self.raise_login_required( 'All content on this site is only available for registered users. ' f'{self._LOGIN_HINT}, {self._REFRESH_HINT}', method=None) def _entries(self, items, language, type_, **kwargs): for item in items: video_id = item['id'] for should_retry in (True, False): self._fetch_new_tokens(invalidate=not should_retry) try: stream_info = self._download_json( self._proto_relative_url(item['_links']['streams']['href']), video_id, headers={ 'Accept': 'application/json', 'Authorization': f'Bearer {self._access_token}', 'Accept-Language': language, 'User-Agent': self._USER_AGENT, }) break except ExtractorError as error: if should_retry and isinstance(error.cause, HTTPError) and error.cause.status == 401: continue raise formats = [] for fmt_url in traverse_obj(stream_info, ('channel', ..., 'stream', ..., 'url', {url_or_none})): ext = determine_ext(fmt_url) if ext == 'm3u8': fmts = self._extract_m3u8_formats(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False) for fmt in fmts: if fmt.get('format_note') and fmt.get('vcodec') == 'none': fmt.update(parse_codecs(fmt['format_note'])) formats.extend(fmts) elif ext == 'mpd': formats.extend(self._extract_mpd_formats(fmt_url, video_id, mpd_id='dash', fatal=False)) else: self.report_warning(f'Skipping unsupported format extension "{ext}"') yield { 'id': video_id, 'title': item.get('title'), 'composer': item.get('name_composer'), 'formats': formats, 'duration': item.get('duration_total'), 'timestamp': traverse_obj(item, ('date', 'published')), 'description': item.get('short_description') or stream_info.get('short_description'), **kwargs, 'chapters': [{ 'start_time': chapter.get('time'), 'end_time': try_get(chapter, lambda x: x['time'] + x['duration']), 'title': chapter.get('text'), } for chapter in item['cuepoints']] if item.get('cuepoints') and type_ == 'concert' else None, } def _real_extract(self, url): language, type_, video_id, part = self._match_valid_url(url).group('language', 'type', 'id', 'part') if not language: language = 'en' api_type = 'concert' if type_ == 'work' else type_ vid_info = self._download_json( f'https://api.digitalconcerthall.com/v2/{api_type}/{video_id}', video_id, headers={ 'Accept': 'application/json', 'Accept-Language': language, 'User-Agent': self._USER_AGENT, }) videos = [vid_info] if type_ == 'film' else traverse_obj(vid_info, ('_embedded', ..., ...)) if type_ == 'work': videos = [videos[int(part) - 1]] album_artists = traverse_obj(vid_info, ('_links', 'artist', ..., 'name', {str})) thumbnail = traverse_obj(vid_info, ( 'image', ..., {self._proto_relative_url}, {url_or_none}, {lambda x: x.format(width=0, height=0)}, any)) # NB: 0x0 is the original size return { '_type': 'playlist', 'id': video_id, 'title': vid_info.get('title'), 'entries': self._entries( videos, language, type_, thumbnail=thumbnail, album_artists=album_artists), 'thumbnail': thumbnail, 'album_artists': album_artists, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/fivetv.py
yt_dlp/extractor/fivetv.py
from .common import InfoExtractor from ..utils import int_or_none class FiveTVIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:www\.)?5-tv\.ru/ (?: (?:[^/]+/)+(?P<id>\d+)| (?P<path>[^/?#]+)(?:[/?#])? ) ''' _TESTS = [{ 'url': 'http://5-tv.ru/news/96814/', 'md5': 'bbff554ad415ecf5416a2f48c22d9283', 'info_dict': { 'id': '96814', 'ext': 'mp4', 'title': 'Россияне выбрали имя для общенациональной платежной системы', 'description': 'md5:a8aa13e2b7ad36789e9f77a74b6de660', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 180, }, }, { 'url': 'http://5-tv.ru/video/1021729/', 'info_dict': { 'id': '1021729', 'ext': 'mp4', 'title': '3D принтер', 'description': 'md5:d76c736d29ef7ec5c0cf7d7c65ffcb41', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 180, }, }, { # redirect to https://www.5-tv.ru/projects/1000095/izvestia-glavnoe/ 'url': 'http://www.5-tv.ru/glavnoe/#itemDetails', 'info_dict': { 'id': 'glavnoe', 'ext': 'mp4', 'title': r're:^Итоги недели с \d+ по \d+ \w+ \d{4} года$', 'thumbnail': r're:^https?://.*\.jpg$', }, 'skip': 'redirect to «Известия. Главное» project page', }, { 'url': 'http://www.5-tv.ru/glavnoe/broadcasts/508645/', 'only_matching': True, }, { 'url': 'http://5-tv.ru/films/1507502/', 'only_matching': True, }, { 'url': 'http://5-tv.ru/programs/broadcast/508713/', 'only_matching': True, }, { 'url': 'http://5-tv.ru/angel/', 'only_matching': True, }, { 'url': 'http://www.5-tv.ru/schedule/?iframe=true&width=900&height=450', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') or mobj.group('path') webpage = self._download_webpage(url, video_id) video_url = self._search_regex( [r'<div[^>]+?class="(?:flow)?player[^>]+?data-href="([^"]+)"', r'<a[^>]+?href="([^"]+)"[^>]+?class="videoplayer"'], webpage, 'video url') title = self._generic_title('', webpage) duration = int_or_none(self._og_search_property( 'video:duration', webpage, 'duration', default=None)) return { 'id': video_id, 'url': video_url, 'title': title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ruutu.py
yt_dlp/extractor/ruutu.py
import json import re import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, find_xpath_attr, int_or_none, traverse_obj, try_call, unified_strdate, url_or_none, xpath_attr, xpath_text, ) class RuutuIE(InfoExtractor): _WORKING = False _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?(?:ruutu|supla)\.fi/(?:video|supla|audio)/| static\.nelonenmedia\.fi/player/misc/embed_player\.html\?.*?\bnid= ) (?P<id>\d+) ''' _TESTS = [{ 'url': 'http://www.ruutu.fi/video/2058907', 'md5': 'ab2093f39be1ca8581963451b3c0234f', 'info_dict': { 'id': '2058907', 'ext': 'mp4', 'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!', 'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 114, 'age_limit': 0, 'upload_date': '20150508', }, }, { 'url': 'http://www.ruutu.fi/video/2057306', 'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9', 'info_dict': { 'id': '2057306', 'ext': 'mp4', 'title': 'Superpesis: katso koko kausi Ruudussa', 'description': 'md5:bfb7336df2a12dc21d18fa696c9f8f23', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 40, 'age_limit': 0, 'upload_date': '20150507', 'series': 'Superpesis', 'categories': ['Urheilu'], }, }, { 'url': 'http://www.supla.fi/supla/2231370', 'md5': 'df14e782d49a2c0df03d3be2a54ef949', 'info_dict': { 'id': '2231370', 'ext': 'mp4', 'title': 'Osa 1: Mikael Jungner', 'description': 'md5:7d90f358c47542e3072ff65d7b1bcffe', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 0, 'upload_date': '20151012', 'series': 'Läpivalaisu', }, }, { # Episode where <SourceFile> is "NOT-USED", but has other # downloadable sources available. 'url': 'http://www.ruutu.fi/video/3193728', 'only_matching': True, }, { # audio podcast 'url': 'https://www.supla.fi/supla/3382410', 'md5': 'b9d7155fed37b2ebf6021d74c4b8e908', 'info_dict': { 'id': '3382410', 'ext': 'mp3', 'title': 'Mikä ihmeen poltergeist?', 'description': 'md5:bbb6963df17dfd0ecd9eb9a61bf14b52', 'thumbnail': r're:^https?://.*\.jpg$', 'age_limit': 0, 'upload_date': '20190320', 'series': 'Mysteeritarinat', 'duration': 1324, }, 'expected_warnings': [ 'HTTP Error 502: Bad Gateway', 'Failed to download m3u8 information', ], }, { 'url': 'http://www.supla.fi/audio/2231370', 'only_matching': True, }, { 'url': 'https://static.nelonenmedia.fi/player/misc/embed_player.html?nid=3618790', 'only_matching': True, }, { # episode 'url': 'https://www.ruutu.fi/video/3401964', 'info_dict': { 'id': '3401964', 'ext': 'mp4', 'title': 'Temptation Island Suomi - Kausi 5 - Jakso 17', 'description': 'md5:87cf01d5e1e88adf0c8a2937d2bd42ba', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2582, 'age_limit': 12, 'upload_date': '20190508', 'series': 'Temptation Island Suomi', 'season_number': 5, 'episode_number': 17, 'categories': ['Reality ja tositapahtumat', 'Kotimaiset suosikit', 'Romantiikka ja parisuhde'], }, 'params': { 'skip_download': True, }, }, { # premium 'url': 'https://www.ruutu.fi/video/3618715', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Broken IE 'url': 'https://www.hs.fi/maailma/art-2000011353059.html', 'info_dict': { 'id': '4746675', 'ext': 'mp4', 'title': 'Yhdysvaltojen Texasin osavaltiota ovat koetelleet tuhoisat tulvat', }, }] _API_BASE = 'https://gatling.nelonenmedia.fi' @classmethod def _extract_embed_urls(cls, url, webpage): # nelonen.fi settings = try_call( lambda: json.loads(re.search( r'jQuery\.extend\(Drupal\.settings, ({.+?})\);', webpage).group(1), strict=False)) if settings: video_id = traverse_obj(settings, ( 'mediaCrossbowSettings', 'file', 'field_crossbow_video_id', 'und', 0, 'value')) if video_id: return [f'http://www.ruutu.fi/video/{video_id}'] # hs.fi and is.fi settings = try_call( lambda: json.loads(re.search( '(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>', webpage).group(1), strict=False)) if settings: video_ids = set(traverse_obj(settings, ( 'props', 'pageProps', 'page', 'assetData', 'splitBody', ..., 'video', 'sourceId')) or []) if video_ids: return [f'http://www.ruutu.fi/video/{v}' for v in video_ids] video_id = traverse_obj(settings, ( 'props', 'pageProps', 'page', 'assetData', 'mainVideo', 'sourceId')) if video_id: return [f'http://www.ruutu.fi/video/{video_id}'] def _real_extract(self, url): video_id = self._match_id(url) video_xml = self._download_xml( f'{self._API_BASE}/media-xml-cache', video_id, query={'id': video_id}) formats = [] processed_urls = [] def extract_formats(node): for child in node: if child.tag.endswith('Files'): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text if (not video_url or video_url in processed_urls or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): continue processed_urls.append(video_url) ext = determine_ext(video_url) auth_video_url = url_or_none(self._download_webpage( f'{self._API_BASE}/auth/access/v2', video_id, note=f'Downloading authenticated {ext} stream URL', fatal=False, query={'stream': video_url})) if auth_video_url: processed_urls.append(auth_video_url) video_url = auth_video_url if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) elif ext == 'mpd': # video-only and audio-only streams are of different # duration resulting in out of sync issue continue formats.extend(self._extract_mpd_formats( video_url, video_id, mpd_id='dash', fatal=False)) elif ext == 'mp3' or child.tag == 'AudioMediaFile': formats.append({ 'format_id': 'audio', 'url': video_url, 'vcodec': 'none', }) else: proto = urllib.parse.urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': continue preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) format_id = f'{proto}-{label if label else tbr}' if label or tbr else proto if not self._is_valid_url(video_url, video_id, format_id): continue width, height = (int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]) formats.append({ 'format_id': format_id, 'url': video_url, 'width': width, 'height': height, 'tbr': tbr, 'preference': preference, }) extract_formats(video_xml.find('./Clip')) def pv(name): value = try_call(lambda: find_xpath_attr( video_xml, './Clip/PassthroughVariables/variable', 'name', name).get('value')) if value != 'NA': return value or None if not formats: if (not self.get_param('allow_unplayable_formats') and xpath_text(video_xml, './Clip/DRM', default=None)): self.report_drm(video_id) ns_st_cds = pv('ns_st_cds') if ns_st_cds != 'free': raise ExtractorError(f'This video is {ns_st_cds}.', expected=True) themes = pv('themes') return { 'id': video_id, 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True), 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'), 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'), 'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')) or int_or_none(pv('runtime')), 'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')), 'upload_date': unified_strdate(pv('date_start')), 'series': pv('series_name'), 'season_number': int_or_none(pv('season_number')), 'episode_number': int_or_none(pv('episode_number')), 'categories': themes.split(',') if themes else None, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telemundo.py
yt_dlp/extractor/telemundo.py
from .common import InfoExtractor from ..networking import HEADRequest from ..utils import try_get, unified_timestamp class TelemundoIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?:\/\/(?:www\.)?telemundo\.com\/.+?video\/[^\/]+(?P<id>tmvo\d{7})' _TESTS = [{ 'url': 'https://www.telemundo.com/noticias/noticias-telemundo-en-la-noche/empleo/video/esta-aplicacion-gratuita-esta-ayudando-los-latinos-encontrar-trabajo-en-estados-unidos-tmvo9829325', 'info_dict': { 'id': 'tmvo9829325', 'timestamp': 1621396800, 'title': 'Esta aplicación gratuita está ayudando a los latinos a encontrar trabajo en Estados Unidos', 'uploader': 'Telemundo', 'uploader_id': 'NBCU_Telemundo', 'ext': 'mp4', 'upload_date': '20210519', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.telemundo.com/shows/al-rojo-vivo/empleo/video/personajes-de-times-square-piden-que-la-ciudad-de-nueva-york-los-deje-volver-trabajar-tmvo9816272', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) metadata = self._search_nextjs_data(webpage, video_id) redirect_url = try_get( metadata, lambda x: x['props']['initialState']['video']['associatedPlaylists'][0]['videos'][0]['videoAssets'][0]['publicUrl']) m3u8_url = self._request_webpage(HEADRequest( redirect_url + '?format=redirect&manifest=m3u&format=redirect&Tracking=true&Embedded=true&formats=MPEG4'), video_id, 'Processing m3u8').url formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') date = unified_timestamp(try_get( metadata, lambda x: x['props']['initialState']['video']['associatedPlaylists'][0]['videos'][0]['datePublished'].split(' ', 1)[1])) return { 'url': url, 'id': video_id, 'title': self._search_regex(r'<h1[^>]+>([^<]+)', webpage, 'title', fatal=False), 'formats': formats, 'timestamp': date, 'uploader': 'Telemundo', 'uploader_id': self._search_regex(r'https?:\/\/(?:[^/]+\/){3}video\/(?P<id>[^\/]+)', m3u8_url, 'Akamai account', fatal=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/srgssr.py
yt_dlp/extractor/srgssr.py
from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, join_nonempty, parse_iso8601, qualities, try_get, ) class SRGSSRIE(InfoExtractor): _VALID_URL = r'''(?x) (?: https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn| srgssr ): (?P<bu> srf|rts|rsi|rtr|swi ):(?:[^:]+:)? (?P<type> video|audio ): (?P<id> [0-9a-f\-]{36}|\d+ ) ''' _GEO_BYPASS = False _GEO_COUNTRIES = ['CH'] _ERRORS = { 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.', 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.', # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.', 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.', 'LEGAL': 'The video cannot be transmitted for legal reasons.', 'STARTDATE': 'This video is not yet available. Please try again later.', } _DEFAULT_LANGUAGE_CODES = { 'srf': 'de', 'rts': 'fr', 'rsi': 'it', 'rtr': 'rm', 'swi': 'en', } def _get_tokenized_src(self, url, video_id, format_id): token = self._download_json( 'http://tp.srgssr.ch/akahd/token?acl=*', video_id, f'Downloading {format_id} token', fatal=False) or {} auth_params = try_get(token, lambda x: x['token']['authparams']) if auth_params: url += ('?' if '?' not in url else '&') + auth_params return url def _get_media_data(self, bu, media_type, media_id): query = {'onlyChapters': True} if media_type == 'video' else {} full_media_data = self._download_json( f'https://il.srgssr.ch/integrationlayer/2.0/{bu}/mediaComposition/{media_type}/{media_id}.json', media_id, query=query)['chapterList'] try: media_data = next( x for x in full_media_data if x.get('id') == media_id) except StopIteration: raise ExtractorError('No media information found') block_reason = media_data.get('blockReason') if block_reason and block_reason in self._ERRORS: message = self._ERRORS[block_reason] if block_reason == 'GEOBLOCK': self.raise_geo_restricted( msg=message, countries=self._GEO_COUNTRIES) raise ExtractorError( f'{self.IE_NAME} said: {message}', expected=True) return media_data def _real_extract(self, url): bu, media_type, media_id = self._match_valid_url(url).groups() media_data = self._get_media_data(bu, media_type, media_id) title = media_data['title'] formats = [] subtitles = {} q = qualities(['SD', 'HD']) for source in (media_data.get('resourceList') or []): format_url = source.get('url') if not format_url: continue protocol = source.get('protocol') quality = source.get('quality') format_id = join_nonempty(protocol, source.get('encoding'), quality) if protocol in ('HDS', 'HLS'): if source.get('tokenType') == 'AKAMAI': format_url = self._get_tokenized_src( format_url, media_id, format_id) fmts, subs = self._extract_akamai_formats_and_subtitles( format_url, media_id) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) elif protocol == 'HLS': m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles( format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False) formats.extend(m3u8_fmts) subtitles = self._merge_subtitles(subtitles, m3u8_subs) elif protocol in ('HTTP', 'HTTPS'): formats.append({ 'format_id': format_id, 'url': format_url, 'quality': q(quality), }) # This is needed because for audio medias the podcast url is usually # always included, even if is only an audio segment and not the # whole episode. if int_or_none(media_data.get('position')) == 0: for p in ('S', 'H'): podcast_url = media_data.get(f'podcast{p}dUrl') if not podcast_url: continue quality = p + 'D' formats.append({ 'format_id': 'PODCAST-' + quality, 'url': podcast_url, 'quality': q(quality), }) if media_type == 'video': for sub in (media_data.get('subtitleList') or []): sub_url = sub.get('url') if not sub_url: continue lang = sub.get('locale') or self._DEFAULT_LANGUAGE_CODES[bu] subtitles.setdefault(lang, []).append({ 'url': sub_url, }) return { 'id': media_id, 'title': title, 'description': media_data.get('description'), 'timestamp': parse_iso8601(media_data.get('date')), 'thumbnail': media_data.get('imageUrl'), 'duration': float_or_none(media_data.get('duration'), 1000), 'subtitles': subtitles, 'formats': formats, } class SRGSSRPlayIE(InfoExtractor): IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites' _VALID_URL = r'''(?x) https?:// (?:(?:www|play)\.)? (?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/ (?: [^/]+/(?P<type>video|audio)/[^?]+| popup(?P<type_2>video|audio)player ) \?.*?\b(?:id=|urn=urn:[^:]+:video:)(?P<id>[0-9a-f\-]{36}|\d+) ''' _TESTS = [{ 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'md5': '6db2226ba97f62ad42ce09783680046c', 'info_dict': { 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'ext': 'mp4', 'upload_date': '20130701', 'title': 'Snowden beantragt Asyl in Russland', 'timestamp': 1372708215, 'duration': 113.827, 'thumbnail': r're:^https?://.*1383719781\.png$', }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc', 'info_dict': { 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc', 'ext': 'mp3', 'upload_date': '20151013', 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem', 'timestamp': 1444709160, 'duration': 336.816, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260', 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df', 'info_dict': { 'id': '6348260', 'display_id': '6348260', 'ext': 'mp4', 'duration': 1796.76, 'title': 'Le 19h30', 'upload_date': '20141201', 'timestamp': 1417458600, 'thumbnail': r're:^https?://.*\.image', }, 'params': { # m3u8 download 'skip_download': True, }, }, { 'url': 'http://play.swissinfo.ch/play/tv/business/video/why-people-were-against-tax-reforms?id=42960270', 'info_dict': { 'id': '42960270', 'ext': 'mp4', 'title': 'Why people were against tax reforms', 'description': 'md5:7ac442c558e9630e947427469c4b824d', 'duration': 94.0, 'upload_date': '20170215', 'timestamp': 1487173560, 'thumbnail': r're:https?://www\.swissinfo\.ch/srgscalableimage/42961964', 'subtitles': 'count:9', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.srf.ch/play/tv/popupvideoplayer?id=c4dba0ca-e75b-43b2-a34f-f708a4932e01', 'only_matching': True, }, { 'url': 'https://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?urn=urn:srf:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5', 'only_matching': True, }, { 'url': 'https://www.rts.ch/play/tv/19h30/video/le-19h30?urn=urn:rts:video:6348260', 'only_matching': True, }, { # audio segment, has podcastSdUrl of the full episode 'url': 'https://www.srf.ch/play/radio/popupaudioplayer?id=50b20dc8-f05b-4972-bf03-e438ff2833eb', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) bu = mobj.group('bu') media_type = mobj.group('type') or mobj.group('type_2') media_id = mobj.group('id') return self.url_result(f'srgssr:{bu[:3]}:{media_type}:{media_id}', 'SRGSSR')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/imgur.py
yt_dlp/extractor/imgur.py
import functools import re from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, float_or_none, int_or_none, js_to_json, mimetype2ext, parse_iso8601, str_or_none, strip_or_none, traverse_obj, url_or_none, ) class ImgurBaseIE(InfoExtractor): _CLIENT_ID = '546c25a59c58ad7' @classmethod def _imgur_result(cls, item_id): return cls.url_result(f'https://imgur.com/{item_id}', ImgurIE, item_id) def _call_api(self, endpoint, video_id, **kwargs): return self._download_json( f'https://api.imgur.com/post/v1/{endpoint}/{video_id}?client_id={self._CLIENT_ID}&include=media,account', video_id, **kwargs) @staticmethod def get_description(s): if 'Discover the magic of the internet at Imgur' in s: return None return s or None class ImgurIE(ImgurBaseIE): _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ 'url': 'https://imgur.com/A61SaA1', 'info_dict': { 'id': 'A61SaA1', 'ext': 'mp4', 'title': 'MRW gifv is up and running without any bugs', 'timestamp': 1416446068, 'upload_date': '20141120', 'dislike_count': int, 'comment_count': int, 'release_timestamp': 1416446068, 'release_date': '20141120', 'like_count': int, 'thumbnail': 'https://i.imgur.com/A61SaA1h.jpg', }, }, { # Test with URL slug 'url': 'https://imgur.com/mrw-gifv-is-up-running-without-any-bugs-A61SaA1', 'info_dict': { 'id': 'A61SaA1', 'ext': 'mp4', 'title': 'MRW gifv is up and running without any bugs', 'timestamp': 1416446068, 'upload_date': '20141120', 'dislike_count': int, 'comment_count': int, 'release_timestamp': 1416446068, 'release_date': '20141120', 'like_count': int, 'thumbnail': 'https://i.imgur.com/A61SaA1h.jpg', }, }, { 'url': 'https://i.imgur.com/A61SaA1.gifv', 'only_matching': True, }, { 'url': 'https://i.imgur.com/crGpqCV.mp4', 'only_matching': True, }, { 'url': 'https://i.imgur.com/jxBXAMC.gifv', 'info_dict': { 'id': 'jxBXAMC', 'ext': 'mp4', 'title': 'Fahaka puffer feeding', 'timestamp': 1533835503, 'upload_date': '20180809', 'release_date': '20180809', 'like_count': int, 'duration': 30.0, 'comment_count': int, 'release_timestamp': 1533835503, 'thumbnail': 'https://i.imgur.com/jxBXAMCh.jpg', 'dislike_count': int, }, }, { # needs Accept header, ref: https://github.com/yt-dlp/yt-dlp/issues/9458 'url': 'https://imgur.com/zV03bd5', 'md5': '59df97884e8ba76143ff6b640a0e2904', 'info_dict': { 'id': 'zV03bd5', 'ext': 'mp4', 'title': 'Ive - Liz', 'timestamp': 1710491255, 'upload_date': '20240315', 'like_count': int, 'dislike_count': int, 'duration': 56.92, 'comment_count': int, 'release_timestamp': 1710491255, 'release_date': '20240315', 'thumbnail': 'https://i.imgur.com/zV03bd5h.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._call_api('media', video_id) if not traverse_obj(data, ('media', 0, ( ('type', {lambda t: t == 'video' or None}), ('metadata', 'is_animated'))), get_all=False): raise ExtractorError(f'{video_id} is not a video or animated image', expected=True) webpage = self._download_webpage( f'https://i.imgur.com/{video_id}.gifv', video_id, fatal=False) or '' formats = [] media_fmt = traverse_obj(data, ('media', 0, { 'url': ('url', {url_or_none}), 'ext': ('ext', {str}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'filesize': ('size', {int_or_none}), 'acodec': ('metadata', 'has_sound', {lambda b: None if b else 'none'}), })) media_url = media_fmt.get('url') if media_url: if not media_fmt.get('ext'): media_fmt['ext'] = mimetype2ext(traverse_obj( data, ('media', 0, 'mime_type'))) or determine_ext(media_url) if traverse_obj(data, ('media', 0, 'type')) == 'image': media_fmt['acodec'] = 'none' media_fmt.setdefault('preference', -10) formats.append(media_fmt) video_elements = self._search_regex( r'(?s)<div class="video-elements">(.*?)</div>', webpage, 'video elements', default=None) if video_elements: def og_get_size(media_type): return { p: int_or_none(self._og_search_property(f'{media_type}:{p}', webpage, default=None)) for p in ('width', 'height') } size = og_get_size('video') if not any(size.values()): size = og_get_size('image') formats = traverse_obj( re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements), (..., { 'format_id': ('type', {lambda s: s.partition('/')[2]}), 'url': ('src', {self._proto_relative_url}), 'ext': ('type', {mimetype2ext}), })) for f in formats: f.update(size) # We can get the original gif format from the webpage as well gif_json = traverse_obj(self._search_json( r'var\s+videoItem\s*=', webpage, 'GIF info', video_id, transform_source=js_to_json, fatal=False), { 'url': ('gifUrl', {self._proto_relative_url}), 'filesize': ('size', {int_or_none}), }) if gif_json: gif_json.update(size) gif_json.update({ 'format_id': 'gif', 'preference': -10, # gifs < videos 'ext': 'gif', 'acodec': 'none', 'vcodec': 'gif', 'container': 'gif', }) formats.append(gif_json) search = functools.partial(self._html_search_meta, html=webpage, default=None) twitter_fmt = { 'format_id': 'twitter', 'url': url_or_none(search('twitter:player:stream')), 'ext': mimetype2ext(search('twitter:player:stream:content_type')), 'width': int_or_none(search('twitter:width')), 'height': int_or_none(search('twitter:height')), } if twitter_fmt['url']: formats.append(twitter_fmt) if not formats: self.raise_no_formats( f'No sources found for video {video_id}. Maybe a plain image?', expected=True) self._remove_duplicate_formats(formats) return { 'title': self._og_search_title(webpage, default=None), 'description': self.get_description(self._og_search_description(webpage, default='')), **traverse_obj(data, { 'uploader_id': ('account_id', {lambda a: str(a) if int_or_none(a) else None}), 'uploader': ('account', 'username', {lambda x: strip_or_none(x) or None}), 'uploader_url': ('account', 'avatar_url', {url_or_none}), 'like_count': ('upvote_count', {int_or_none}), 'dislike_count': ('downvote_count', {int_or_none}), 'comment_count': ('comment_count', {int_or_none}), 'age_limit': ('is_mature', {lambda x: 18 if x else None}), 'timestamp': (('updated_at', 'created_at'), {parse_iso8601}), 'release_timestamp': ('created_at', {parse_iso8601}), }, get_all=False), **traverse_obj(data, ('media', 0, 'metadata', { 'title': ('title', {lambda x: strip_or_none(x) or None}), 'description': ('description', {self.get_description}), 'duration': ('duration', {float_or_none}), 'timestamp': (('updated_at', 'created_at'), {parse_iso8601}), 'release_timestamp': ('created_at', {parse_iso8601}), }), get_all=False), 'id': video_id, 'formats': formats, 'thumbnails': [{ 'url': thumbnail_url, 'http_headers': {'Accept': '*/*'}, }] if (thumbnail_url := search(['thumbnailUrl', 'twitter:image', 'og:image'])) else None, 'http_headers': {'Accept': '*/*'}, } class ImgurGalleryBaseIE(ImgurBaseIE): _GALLERY = True def _real_extract(self, url): gallery_id = self._match_id(url) data = self._call_api('albums', gallery_id, fatal=False, expected_status=404) info = traverse_obj(data, { 'title': ('title', {lambda x: strip_or_none(x) or None}), 'description': ('description', {self.get_description}), }) if traverse_obj(data, 'is_album'): items = traverse_obj(data, ( 'media', lambda _, v: v.get('type') == 'video' or v['metadata']['is_animated'], 'id', {lambda x: str_or_none(x) or None})) # if a gallery with exactly one video, apply album metadata to video media_id = None if self._GALLERY and len(items) == 1: media_id = items[0] if not media_id: result = self.playlist_result( map(self._imgur_result, items), gallery_id) result.update(info) return result gallery_id = media_id result = self._imgur_result(gallery_id) info['_type'] = 'url_transparent' result.update(info) return result class ImgurGalleryIE(ImgurGalleryBaseIE): IE_NAME = 'imgur:gallery' _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/?#]+)/(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)' _TESTS = [{ # TODO: static images - replace with animated/video gallery 'url': 'http://imgur.com/topic/Aww/ll5Vk', 'only_matching': True, }, { 'url': 'https://imgur.com/gallery/YcAQlkx', 'add_ies': ['Imgur'], 'info_dict': { 'id': 'YcAQlkx', 'ext': 'mp4', 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....', 'timestamp': 1358554297, 'upload_date': '20130119', 'uploader_id': '1648642', 'uploader': 'wittyusernamehere', 'release_timestamp': 1358554297, 'thumbnail': 'https://i.imgur.com/YcAQlkxh.jpg', 'release_date': '20130119', 'uploader_url': 'https://i.imgur.com/N5Flb2v_d.png?maxwidth=290&fidelity=grand', 'comment_count': int, 'dislike_count': int, 'like_count': int, }, }, { # Test with slug 'url': 'https://imgur.com/gallery/classic-steve-carell-gif-cracks-me-up-everytime-repost-downvotes-YcAQlkx', 'add_ies': ['Imgur'], 'info_dict': { 'id': 'YcAQlkx', 'ext': 'mp4', 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....', 'timestamp': 1358554297, 'upload_date': '20130119', 'uploader_id': '1648642', 'uploader': 'wittyusernamehere', 'release_timestamp': 1358554297, 'release_date': '20130119', 'thumbnail': 'https://i.imgur.com/YcAQlkxh.jpg', 'uploader_url': 'https://i.imgur.com/N5Flb2v_d.png?maxwidth=290&fidelity=grand', 'comment_count': int, 'dislike_count': int, 'like_count': int, }, }, { # TODO: static image - replace with animated/video gallery 'url': 'http://imgur.com/topic/Funny/N8rOudd', 'only_matching': True, }, { 'url': 'http://imgur.com/r/aww/VQcQPhM', 'add_ies': ['Imgur'], 'info_dict': { 'id': 'VQcQPhM', 'ext': 'mp4', 'title': 'The boss is here', 'timestamp': 1476494751, 'upload_date': '20161015', 'uploader_id': '19138530', 'uploader': 'thematrixcam', 'comment_count': int, 'dislike_count': int, 'uploader_url': 'https://i.imgur.com/qCjr5Pi_d.png?maxwidth=290&fidelity=grand', 'release_timestamp': 1476494751, 'like_count': int, 'release_date': '20161015', 'thumbnail': 'https://i.imgur.com/VQcQPhMh.jpg', }, }, # from https://github.com/ytdl-org/youtube-dl/pull/16674 { 'url': 'https://imgur.com/t/unmuted/6lAn9VQ', 'info_dict': { 'id': '6lAn9VQ', 'title': 'Penguins !', }, 'playlist_count': 3, }, { 'url': 'https://imgur.com/t/unmuted/penguins-penguins-6lAn9VQ', 'info_dict': { 'id': '6lAn9VQ', 'title': 'Penguins !', }, 'playlist_count': 3, }, { 'url': 'https://imgur.com/t/unmuted/kx2uD3C', 'add_ies': ['Imgur'], 'info_dict': { 'id': 'ZVMv45i', 'ext': 'mp4', 'title': 'Intruder', 'timestamp': 1528129683, 'upload_date': '20180604', 'release_timestamp': 1528129683, 'release_date': '20180604', 'like_count': int, 'dislike_count': int, 'comment_count': int, 'duration': 30.03, 'thumbnail': 'https://i.imgur.com/ZVMv45ih.jpg', }, }, { 'url': 'https://imgur.com/t/unmuted/wXSK0YH', 'add_ies': ['Imgur'], 'info_dict': { 'id': 'JCAP4io', 'ext': 'mp4', 'title': 're:I got the blues$', 'description': 'Luka’s vocal stylings.\n\nFP edit: don’t encourage me. I’ll never stop posting Luka and friends.', 'timestamp': 1527809525, 'upload_date': '20180531', 'like_count': int, 'dislike_count': int, 'duration': 30.03, 'comment_count': int, 'release_timestamp': 1527809525, 'thumbnail': 'https://i.imgur.com/JCAP4ioh.jpg', 'release_date': '20180531', }, }] class ImgurAlbumIE(ImgurGalleryBaseIE): IE_NAME = 'imgur:album' _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?:[^/?#]+-)?(?P<id>[a-zA-Z0-9]+)' _GALLERY = False _TESTS = [{ # TODO: only static images - replace with animated/video gallery 'url': 'http://imgur.com/a/j6Orj', 'only_matching': True, }, # from https://github.com/ytdl-org/youtube-dl/pull/21693 { 'url': 'https://imgur.com/a/iX265HX', 'info_dict': { 'id': 'iX265HX', 'title': 'enen-no-shouboutai', }, 'playlist_count': 2, }, { # Test with URL slug 'url': 'https://imgur.com/a/enen-no-shouboutai-iX265HX', 'info_dict': { 'id': 'iX265HX', 'title': 'enen-no-shouboutai', }, 'playlist_count': 2, }, { 'url': 'https://imgur.com/a/8pih2Ed', 'info_dict': { 'id': '8pih2Ed', }, 'playlist_mincount': 1, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/substack.py
yt_dlp/extractor/substack.py
import re import urllib.parse from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( determine_ext, js_to_json, str_or_none, ) from ..utils.traversal import traverse_obj class SubstackIE(InfoExtractor): _VALID_URL = r'https?://[\w-]+\.substack\.com/p/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r', 'md5': 'f27e4fc6252001d48d479f45e65cdfd5', 'info_dict': { 'id': '47660949', 'ext': 'mp4', 'title': 'I MADE A VLOG', 'description': 'md5:9248af9a759321e1027226f988f54d96', 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18', 'uploader': 'Maybe Baby', 'uploader_id': '33628', }, }, { 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r', 'md5': '0a63eacec877a1171a62cfa69710fcea', 'info_dict': { 'id': '51045592', 'ext': 'mpga', 'title': "🎧 Dear Danny: I found my boyfriend's secret Twitter account", 'description': 'md5:a57f2439319e56e0af92dd0c95d75797', 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639', 'uploader': 'Maybe Baby', 'uploader_id': '33628', }, }, { 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe', 'md5': 'fd3c07077b02444ff0130715b5f632bb', 'info_dict': { 'id': '47368578', 'ext': 'mp4', 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7', 'description': 'md5:b96234a2906c7d854d5229818d889515', 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232', 'uploader': "Andrew Zimmern's Spilled Milk ", 'uploader_id': '577659', }, }, { # Podcast that needs its file extension resolved to mp3 'url': 'https://persuasion1.substack.com/p/summers', 'md5': '1456a755d46084744facdfac9edf900f', 'info_dict': { 'id': '141970405', 'ext': 'mp3', 'title': 'Larry Summers on What Went Wrong on Campus', 'description': 'Yascha Mounk and Larry Summers also discuss the promise and perils of artificial intelligence.', 'thumbnail': r're:https://substackcdn\.com/image/.+\.jpeg', 'uploader': 'Persuasion', 'uploader_id': '61579', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.mollymovieclub.com/p/interstellar', 'info_dict': { 'id': '53602801', 'ext': 'mpga', 'title': 'Interstellar', 'description': 'Listen now | Episode One', 'thumbnail': r're:https?://.+\.jpeg', 'uploader': 'Molly Movie Club', 'uploader_id': '839621', }, }, { 'url': 'https://www.blockedandreported.org/p/episode-117-lets-talk-about-depp', 'info_dict': { 'id': '57962052', 'ext': 'mpga', 'title': 'md5:855b2756f0ee10f6723fa00b16266f8d', 'description': 'The takes the takes the takes', 'thumbnail': r're:https?://.+\.jpeg', 'uploader': 'Blocked and Reported', 'uploader_id': '500230', }, }] @classmethod def _extract_embed_urls(cls, url, webpage): if not re.search(r'<script[^>]+src=["\']https://substackcdn.com/[^"\']+\.js', webpage): return mobj = re.search(r'{[^}]*\\?["\']subdomain\\?["\']\s*:\s*\\?["\'](?P<subdomain>[^\\"\']+)', webpage) if mobj: parsed = urllib.parse.urlparse(url) yield parsed._replace(netloc=f'{mobj.group("subdomain")}.substack.com').geturl() raise cls.StopExtraction def _extract_video_formats(self, video_id, url): formats, subtitles = [], {} for video_format in ('hls', 'mp4'): video_url = urllib.parse.urljoin(url, f'/api/v1/video/upload/{video_id}/src?type={video_format}') if video_format == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'url': video_url, 'ext': video_format, }) return formats, subtitles def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) webpage_info = self._parse_json(self._search_json( r'window\._preloads\s*=\s*JSON\.parse\(', webpage, 'json string', display_id, transform_source=js_to_json, contains_pattern=r'"{(?s:.+)}"'), display_id) canonical_url = url domain = traverse_obj(webpage_info, ('domainInfo', 'customDomain', {str})) if domain: canonical_url = urllib.parse.urlparse(url)._replace(netloc=domain).geturl() post_type = webpage_info['post']['type'] formats, subtitles = [], {} if post_type == 'podcast': fmt = {'url': webpage_info['post']['podcast_url']} if not determine_ext(fmt['url'], default_ext=None): # The redirected format URL expires but the original URL doesn't, # so we only want to extract the extension from this request fmt['ext'] = determine_ext(self._request_webpage( HEADRequest(fmt['url']), display_id, 'Resolving podcast file extension', 'Podcast URL is invalid').url) formats.append(fmt) elif post_type == 'video': formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], canonical_url) else: self.raise_no_formats(f'Page type "{post_type}" is not supported') return { 'id': str(webpage_info['post']['id']), 'formats': formats, 'subtitles': subtitles, 'title': traverse_obj(webpage_info, ('post', 'title')), 'description': traverse_obj(webpage_info, ('post', 'description')), 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')), 'uploader': traverse_obj(webpage_info, ('pub', 'name')), 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))), 'webpage_url': canonical_url, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/francetv.py
yt_dlp/extractor/francetv.py
import re import urllib.parse from .common import InfoExtractor from .dailymotion import DailymotionIE from ..networking import HEADRequest from ..utils import ( ExtractorError, clean_html, determine_ext, extract_attributes, filter_dict, format_field, int_or_none, join_nonempty, parse_iso8601, smuggle_url, unsmuggle_url, url_or_none, ) from ..utils.traversal import ( find_element, get_first, traverse_obj, ) class FranceTVBaseInfoExtractor(InfoExtractor): def _make_url_result(self, video_id, url=None): video_id = video_id.split('@')[0] # for compat with old @catalog IDs full_id = f'francetv:{video_id}' if url: full_id = smuggle_url(full_id, {'hostname': urllib.parse.urlparse(url).hostname}) return self.url_result(full_id, FranceTVIE, video_id) class FranceTVIE(InfoExtractor): IE_NAME = 'francetv' _VALID_URL = r'francetv:(?P<id>[^@#]+)' _GEO_COUNTRIES = ['FR'] _GEO_BYPASS = False _TESTS = [{ # tokenized url is in dinfo['video']['token'] 'url': 'francetv:ec217ecc-0733-48cf-ac06-af1347b849d1', 'info_dict': { 'id': 'ec217ecc-0733-48cf-ac06-af1347b849d1', 'ext': 'mp4', 'title': '13h15, le dimanche... - Les mystères de Jésus', 'timestamp': 1502623500, 'duration': 2580, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170813', }, 'params': {'skip_download': 'm3u8'}, }, { # tokenized url is in dinfo['video']['token']['akamai'] 'url': 'francetv:c5bda21d-2c6f-4470-8849-3d8327adb2ba', 'info_dict': { 'id': 'c5bda21d-2c6f-4470-8849-3d8327adb2ba', 'ext': 'mp4', 'title': '13h15, le dimanche... - Les mystères de Jésus', 'timestamp': 1514118300, 'duration': 2880, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20171224', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'francetv:162311093', 'only_matching': True, }, { 'url': 'francetv:NI_1004933@Zouzous', 'only_matching': True, }, { 'url': 'francetv:NI_983319@Info-web', 'only_matching': True, }, { 'url': 'francetv:NI_983319', 'only_matching': True, }, { 'url': 'francetv:NI_657393@Regions', 'only_matching': True, }, { # france-3 live 'url': 'francetv:SIM_France3', 'only_matching': True, }] def _extract_video(self, video_id, hostname=None): is_live = None videos = [] drm_formats = False title = None subtitle = None episode_number = None season_number = None image = None duration = None timestamp = None spritesheets = None # desktop+chrome returns dash; mobile+safari returns hls for device_type, browser in [('desktop', 'chrome'), ('mobile', 'safari')]: dinfo = self._download_json( f'https://k7.ftven.fr/videos/{video_id}', video_id, f'Downloading {device_type} {browser} video JSON', query=filter_dict({ 'device_type': device_type, 'browser': browser, 'domain': hostname, }), fatal=False, expected_status=422) # 422 json gives detailed error code/message if not dinfo: continue if video := traverse_obj(dinfo, ('video', {dict})): videos.append(video) if duration is None: duration = video.get('duration') if is_live is None: is_live = video.get('is_live') if spritesheets is None: spritesheets = video.get('spritesheets') elif code := traverse_obj(dinfo, ('code', {int})): if code == 2009: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif code in (2015, 2017, 2019): # 2015: L'accès à cette vidéo est impossible. (DRM-only) # 2017: Cette vidéo n'est pas disponible depuis le site web mobile (b/c DRM) # 2019: L'accès à cette vidéo est incompatible avec votre configuration. (DRM-only) drm_formats = True continue self.report_warning( f'{self.IE_NAME} said: {code} "{clean_html(dinfo.get("message"))}"') continue if meta := traverse_obj(dinfo, ('meta', {dict})): if title is None: title = meta.get('title') # meta['pre_title'] contains season and episode number for series in format "S<ID> E<ID>" season_number, episode_number = self._search_regex( r'S(\d+)\s*E(\d+)', meta.get('pre_title'), 'episode info', group=(1, 2), default=(None, None)) if subtitle is None: subtitle = meta.get('additional_title') if image is None: image = meta.get('image_url') if timestamp is None: timestamp = parse_iso8601(meta.get('broadcasted_at')) if not videos and drm_formats: self.report_drm(video_id) formats, subtitles, video_url = [], {}, None for video in traverse_obj(videos, lambda _, v: url_or_none(v['url'])): video_url = video['url'] format_id = video.get('format') if token_url := traverse_obj(video, ('token', (None, 'akamai'), {url_or_none}, any)): tokenized_url = traverse_obj(self._download_json( token_url, video_id, f'Downloading signed {format_id} manifest URL', fatal=False, query={ 'format': 'json', 'url': video_url, }), ('url', {url_or_none})) if tokenized_url: video_url = tokenized_url ext = determine_ext(video_url) if ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id=format_id or ext, fatal=False)) elif ext == 'm3u8': format_id = format_id or 'hls' fmts, subs = self._extract_m3u8_formats_and_subtitles( video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) for f in traverse_obj(fmts, lambda _, v: v['vcodec'] == 'none' and v.get('tbr') is None): if mobj := re.match(rf'{format_id}-[Aa]udio-\w+-(?P<bitrate>\d+)', f['format_id']): f.update({ 'tbr': int_or_none(mobj.group('bitrate')), 'acodec': 'mp4a', }) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif ext == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles( video_url, video_id, mpd_id=format_id or 'dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif video_url.startswith('rtmp'): formats.append({ 'url': video_url, 'format_id': join_nonempty('rtmp', format_id), 'ext': 'flv', }) else: if self._is_valid_url(video_url, video_id, format_id): formats.append({ 'url': video_url, 'format_id': format_id, }) # XXX: what is video['captions']? if not formats and video_url: urlh = self._request_webpage( HEADRequest(video_url), video_id, 'Checking for geo-restriction', fatal=False, expected_status=403) if urlh and urlh.headers.get('x-errortype') == 'geo': self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True) for f in formats: if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'): f['language_preference'] = -10 f['format_note'] = 'audio description{}'.format(format_field(f, 'format_note', ', %s')) if spritesheets: formats.append({ 'format_id': 'spritesheets', 'format_note': 'storyboard', 'acodec': 'none', 'vcodec': 'none', 'ext': 'mhtml', 'protocol': 'mhtml', 'url': 'about:invalid', 'fragments': [{ 'url': sheet, # XXX: not entirely accurate; each spritesheet seems to be # a 10x10 grid of thumbnails corresponding to approximately # 2 seconds of the video; the last spritesheet may be shorter 'duration': 200, } for sheet in traverse_obj(spritesheets, (..., {url_or_none}))], }) return { 'id': video_id, 'title': join_nonempty(title, subtitle, delim=' - ').strip(), 'thumbnail': image, 'duration': duration, 'timestamp': timestamp, 'is_live': is_live, 'formats': formats, 'subtitles': subtitles, 'episode': subtitle if episode_number else None, 'series': title if episode_number else None, 'episode_number': int_or_none(episode_number), 'season_number': int_or_none(season_number), '_format_sort_fields': ('res', 'tbr', 'proto'), # prioritize m3u8 over dash } def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) hostname = smuggled_data.get('hostname') or 'www.france.tv' return self._extract_video(video_id, hostname=hostname) class FranceTVSiteIE(FranceTVBaseInfoExtractor): IE_NAME = 'francetv:site' _VALID_URL = r'https?://(?:(?:www\.)?france\.tv|mobile\.france\.tv)/(?:[^/]+/)*(?P<id>[^/]+)\.html' _TESTS = [{ 'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html', 'info_dict': { 'id': 'b2cf9fd8-e971-4757-8651-848f2772df61', # old: ec217ecc-0733-48cf-ac06-af1347b849d1 'ext': 'mp4', 'title': '13h15, le dimanche... - Les mystères de Jésus', 'timestamp': 1502623500, 'duration': 2580, 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20170813', }, 'params': { 'skip_download': True, }, 'skip': 'Unfortunately, this video is no longer available', }, { # geo-restricted 'url': 'https://www.france.tv/enfants/six-huit-ans/foot2rue/saison-1/3066387-duel-au-vieux-port.html', 'info_dict': { 'id': 'a9050959-eedd-4b4a-9b0d-de6eeaa73e44', 'ext': 'mp4', 'title': 'Foot2Rue - Duel au vieux port', 'episode': 'Duel au vieux port', 'series': 'Foot2Rue', 'episode_number': 1, 'season_number': 1, 'timestamp': 1642761360, 'upload_date': '20220121', 'season': 'Season 1', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 1441, }, 'skip': 'Unfortunately, this video is no longer available', }, { # geo-restricted livestream (workflow == 'token-akamai') 'url': 'https://www.france.tv/france-4/direct.html', 'info_dict': { 'id': '9a6a7670-dde9-4264-adbc-55b89558594b', 'ext': 'mp4', 'title': r're:France 4 en direct .+', 'live_status': 'is_live', }, 'skip': 'geo-restricted livestream', }, { # livestream (workflow == 'dai') 'url': 'https://www.france.tv/france-2/direct.html', 'info_dict': { 'id': '006194ea-117d-4bcf-94a9-153d999c59ae', 'ext': 'mp4', 'title': r're:France 2 en direct .+', 'live_status': 'is_live', }, 'params': {'skip_download': 'livestream'}, }, { # Not geo-restricted 'url': 'https://www.france.tv/france-2/la-maison-des-maternelles/5574051-nous-sommes-amis-et-nous-avons-fait-un-enfant-ensemble.html', 'info_dict': { 'id': 'b448bfe4-9fe7-11ee-97d8-2ba3426fa3df', 'ext': 'mp4', 'title': 'Nous sommes amis et nous avons fait un enfant ensemble - Émission du jeudi 21 décembre 2023', 'duration': 1065, 'thumbnail': r're:https?://.+/.+\.jpg', 'timestamp': 1703147921, 'upload_date': '20231221', }, 'params': {'skip_download': 'm3u8'}, }, { # france3 'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html', 'only_matching': True, }, { # france4 'url': 'https://www.france.tv/france-4/hero-corp/saison-1/134151-apres-le-calme.html', 'only_matching': True, }, { # france5 'url': 'https://www.france.tv/france-5/c-a-dire/saison-10/137013-c-a-dire.html', 'only_matching': True, }, { # franceo 'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html', 'only_matching': True, }, { 'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html', 'only_matching': True, }, { 'url': 'https://www.france.tv/jeux-et-divertissements/divertissements/133965-le-web-contre-attaque.html', 'only_matching': True, }, { 'url': 'https://mobile.france.tv/france-5/c-dans-l-air/137347-emission-du-vendredi-12-mai-2017.html', 'only_matching': True, }, { 'url': 'https://www.france.tv/142749-rouge-sang.html', 'only_matching': True, }, { # france-3 live 'url': 'https://www.france.tv/france-3/direct.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) nextjs_data = self._search_nextjs_v13_data(webpage, display_id) video_id = get_first(nextjs_data, ('options', 'id', {str})) if not video_id: raise ExtractorError('Unable to extract video ID') return self._make_url_result(video_id, url=url) class FranceTVInfoIE(FranceTVBaseInfoExtractor): IE_NAME = 'francetvinfo.fr' _VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<id>[^/?#&.]+)' _TESTS = [{ 'url': 'https://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-jeudi-22-aout-2019_3561461.html', 'info_dict': { 'id': 'd12458ee-5062-48fe-bfdd-a30d6a01b793', 'ext': 'mp4', 'title': 'Soir 3', 'upload_date': '20190822', 'timestamp': 1566510730, 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 1637, 'subtitles': { 'fr': 'mincount:2', }, }, 'params': { 'skip_download': True, }, 'add_ie': [FranceTVIE.ie_key()], }, { 'note': 'Only an image exists in initial webpage instead of the video', 'url': 'https://www.francetvinfo.fr/sante/maladie/coronavirus/covid-19-en-inde-une-situation-catastrophique-a-new-dehli_4381095.html', 'info_dict': { 'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482', 'ext': 'mp4', 'title': 'Covid-19 : une situation catastrophique à New Dehli - Édition du mercredi 21 avril 2021', 'thumbnail': r're:^https?://.*\.jpe?g$', 'duration': 76, 'timestamp': 1619028518, 'upload_date': '20210421', }, 'params': { 'skip_download': True, }, 'add_ie': [FranceTVIE.ie_key()], }, { 'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html', 'only_matching': True, }, { 'url': 'http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html', 'only_matching': True, }, { 'url': 'http://france3-regions.francetvinfo.fr/bretagne/cotes-d-armor/thalassa-echappee-breizh-ce-venredi-dans-les-cotes-d-armor-954961.html', 'only_matching': True, }, { # Dailymotion embed 'url': 'http://www.francetvinfo.fr/politique/notre-dame-des-landes/video-sur-france-inter-cecile-duflot-denonce-le-regard-meprisant-de-patrick-cohen_1520091.html', 'md5': 'ee7f1828f25a648addc90cb2687b1f12', 'info_dict': { 'id': 'x4iiko0', 'ext': 'mp4', 'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen', 'description': 'md5:fdcb582c370756293a65cdfbc6ecd90e', 'timestamp': 1467011958, 'uploader': 'France Inter', 'uploader_id': 'x2q2ez', 'upload_date': '20160627', 'view_count': int, 'tags': ['Politique', 'France Inter', '27 juin 2016', 'Linvité de 8h20', 'Cécile Duflot', 'Patrick Cohen'], 'age_limit': 0, 'duration': 640, 'like_count': int, 'thumbnail': r're:https://[^/?#]+/v/[^/?#]+/x1080', }, 'add_ie': ['Dailymotion'], }, { 'url': 'http://france3-regions.francetvinfo.fr/limousin/emissions/jt-1213-limousin', 'only_matching': True, }, { # "<figure id=" pattern (#28792) 'url': 'https://www.francetvinfo.fr/culture/patrimoine/incendie-de-notre-dame-de-paris/notre-dame-de-paris-de-l-incendie-de-la-cathedrale-a-sa-reconstruction_4372291.html', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) dailymotion_urls = tuple(DailymotionIE._extract_embed_urls(url, webpage)) if dailymotion_urls: return self.playlist_result([ self.url_result(dailymotion_url, DailymotionIE.ie_key()) for dailymotion_url in dailymotion_urls]) video_id = ( traverse_obj(webpage, ( {find_element(tag='button', attr='data-cy', value='francetv-player-wrapper', html=True)}, {extract_attributes}, 'id')) or self._search_regex( (r'player\.load[^;]+src:\s*["\']([^"\']+)', r'id-video=([^@]+@[^"]+)', r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"', r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'), webpage, 'video id') ) return self._make_url_result(video_id, url=url)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/theweatherchannel.py
yt_dlp/extractor/theweatherchannel.py
import json from .theplatform import ThePlatformIE from ..utils import ( determine_ext, parse_duration, parse_iso8601, ) class TheWeatherChannelIE(ThePlatformIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:www\.)?weather\.com(?P<asset_name>(?:/(?P<locale>[a-z]{2}-[A-Z]{2}))?/(?:[^/]+/)*video/(?P<id>[^/?#]+))' _TESTS = [{ 'url': 'https://weather.com/storms/hurricane/video/invest-95l-in-atlantic-has-a-medium-chance-of-development', 'md5': '68f0cf616435683f27ce36bd9c927394', 'info_dict': { 'id': '81acef2d-ee8c-4545-ba83-bff3cc80db97', 'ext': 'mp4', 'title': 'Invest 95L In Atlantic Has A Medium Chance Of Development', 'description': 'md5:0de720fd5f0d0e32207bd4c270fff824', 'uploader': 'TWC - Digital', 'uploader_id': 'b5a999e0-9e04-11e1-9ee2-001d092f5a10', 'upload_date': '20230721', 'timestamp': 1689967343, 'display_id': 'invest-95l-in-atlantic-has-a-medium-chance-of-development', 'duration': 34.0, }, }, { 'url': 'https://weather.com/en-CA/international/videos/video/unidentified-object-falls-from-sky-in-india', 'only_matching': True, }] def _real_extract(self, url): asset_name, locale, display_id = self._match_valid_url(url).groups() if not locale: locale = 'en-US' video_data = next(iter(self._download_json( 'https://weather.com/api/v1/p/redux-dal', display_id, data=json.dumps([{ 'name': 'getCMSAssetsUrlConfig', 'params': { 'language': locale.replace('-', '_'), 'query': { 'assetName': { '$in': asset_name, }, }, }, }]).encode(), headers={ 'Content-Type': 'application/json', })['dal']['getCMSAssetsUrlConfig'].values()))['data'][0] video_id = video_data['id'] seo_meta = video_data.get('seometa', {}) title = video_data.get('title') or seo_meta['title'] urls = [] thumbnails = [] formats = [] for variant_id, variant_url in video_data.get('variants', []).items(): variant_url = variant_url.strip() if not variant_url or variant_url in urls: continue urls.append(variant_url) ext = determine_ext(variant_url) if ext == 'jpg': thumbnails.append({ 'url': variant_url, 'id': variant_id, }) elif ThePlatformIE.suitable(variant_url): tp_formats, _ = self._extract_theplatform_smil(variant_url, video_id) formats.extend(tp_formats) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( variant_url, video_id, 'mp4', 'm3u8_native', m3u8_id=variant_id, fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( variant_url, video_id, f4m_id=variant_id, fatal=False)) else: formats.append({ 'url': variant_url, 'format_id': variant_id, }) cc_url = video_data.get('cc_url') return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': video_data.get('description') or seo_meta.get('description') or seo_meta.get('og:description'), 'duration': parse_duration(video_data.get('duration')), 'uploader': video_data.get('providername'), 'uploader_id': video_data.get('providerid'), 'timestamp': parse_iso8601(video_data.get('publishdate')), 'subtitles': {locale[:2]: [{'url': cc_url}]} if cc_url else None, 'thumbnails': thumbnails, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/elementorembed.py
yt_dlp/extractor/elementorembed.py
import re from .common import InfoExtractor from .vimeo import VimeoIE from .youtube import YoutubeIE from ..utils import unescapeHTML, url_or_none from ..utils.traversal import traverse_obj class ElementorEmbedIE(InfoExtractor): _VALID_URL = False _WEBPAGE_TESTS = [{ 'url': 'https://capitaltv.cy/2023/12/14/υγεια-και-ζωη-14-12-2023-δρ-ξενια-κωσταντινιδο/', 'info_dict': { 'id': 'KgzuxwuQwM4', 'ext': 'mp4', 'title': 'ΥΓΕΙΑ ΚΑΙ ΖΩΗ 14 12 2023 ΔΡ ΞΕΝΙΑ ΚΩΣΤΑΝΤΙΝΙΔΟΥ', 'thumbnail': 'https://i.ytimg.com/vi/KgzuxwuQwM4/maxresdefault.jpg', 'playable_in_embed': True, 'tags': 'count:16', 'like_count': int, 'channel': 'Capital TV Cyprus', 'channel_id': 'UCR8LwVKTLGEXt4ZAErpCMrg', 'availability': 'public', 'description': 'md5:7a3308a22881aea4612358c4ba121f77', 'duration': 2891, 'upload_date': '20231214', 'uploader_id': '@capitaltvcyprus6389', 'live_status': 'not_live', 'channel_url': 'https://www.youtube.com/channel/UCR8LwVKTLGEXt4ZAErpCMrg', 'uploader_url': 'https://www.youtube.com/@capitaltvcyprus6389', 'uploader': 'Capital TV Cyprus', 'age_limit': 0, 'categories': ['News & Politics'], 'view_count': int, 'channel_follower_count': int, }, }, { 'url': 'https://elementor.com/academy/theme-builder-collection/?playlist=76011151&video=9e59909', 'info_dict': { 'id': '?playlist=76011151&video=9e59909', 'title': 'Theme Builder Collection - Academy', 'age_limit': 0, 'timestamp': 1702196984.0, 'upload_date': '20231210', 'description': 'md5:7f52c52715ee9e54fd7f82210511673d', 'thumbnail': 'https://elementor.com/academy/wp-content/uploads/2021/07/Theme-Builder-1.png', }, 'playlist_count': 11, 'params': { 'skip_download': True, }, }] _WIDGET_REGEX = r'<div[^>]+class="[^"]*elementor-widget-video(?:-playlist)?[^"]*"[^>]*data-settings="([^"]*)"' def _extract_from_webpage(self, url, webpage): for data_settings in re.findall(self._WIDGET_REGEX, webpage): data = self._parse_json(data_settings, None, fatal=False, transform_source=unescapeHTML) if youtube_url := traverse_obj(data, ('youtube_url', {url_or_none})): yield self.url_result(youtube_url, ie=YoutubeIE) for video in traverse_obj(data, ('tabs', lambda _, v: v['_id'], {dict})): if youtube_url := traverse_obj(video, ('youtube_url', {url_or_none})): yield self.url_result(youtube_url, ie=YoutubeIE) if vimeo_url := traverse_obj(video, ('vimeo_url', {url_or_none})): yield self.url_result(vimeo_url, ie=VimeoIE) for direct_url in traverse_obj(video, (('hosted_url', 'external_url'), 'url', {url_or_none})): yield { 'id': video['_id'], 'url': direct_url, 'title': video.get('title'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/startrek.py
yt_dlp/extractor/startrek.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import ( clean_html, parse_iso8601, update_url, url_or_none, ) from ..utils.traversal import subs_list_to_dict, traverse_obj class StarTrekIE(InfoExtractor): IE_NAME = 'startrek' IE_DESC = 'STAR TREK' _VALID_URL = r'https?://(?:www\.)?startrek\.com(?:/en-(?:ca|un))?/videos/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.startrek.com/en-un/videos/official-trailer-star-trek-lower-decks-season-4', 'info_dict': { 'id': 'official-trailer-star-trek-lower-decks-season-4', 'ext': 'mp4', 'title': 'Official Trailer | Star Trek: Lower Decks - Season 4', 'alt_title': 'md5:dd7e3191aaaf9e95db16fc3abd5ef68b', 'categories': ['TRAILERS'], 'description': 'md5:563d7856ddab99bee7a5e50f45531757', 'release_date': '20230722', 'release_timestamp': 1690033200, 'series': 'Star Trek: Lower Decks', 'series_id': 'star-trek-lower-decks', 'thumbnail': r're:https?://.+\.(?:jpg|png)', }, }, { 'url': 'https://www.startrek.com/en-ca/videos/my-first-contact-senator-cory-booker', 'info_dict': { 'id': 'my-first-contact-senator-cory-booker', 'ext': 'mp4', 'title': 'My First Contact: Senator Cory Booker', 'alt_title': 'md5:fe74a8bdb0afab421c6e159a7680db4d', 'categories': ['MY FIRST CONTACT'], 'description': 'md5:a3992ab3b3e0395925d71156bbc018ce', 'release_date': '20250401', 'release_timestamp': 1743512400, 'series': 'Star Trek: The Original Series', 'series_id': 'star-trek-the-original-series', 'thumbnail': r're:https?://.+\.(?:jpg|png)', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) page_props = self._search_nextjs_data(webpage, video_id)['props']['pageProps'] video_data = page_props['video']['data'] if youtube_id := video_data.get('youtube_video_id'): return self.url_result(youtube_id, YoutubeIE) series_id = traverse_obj(video_data, ( 'series_and_movies', ..., 'series_or_movie', 'slug', {str}, any)) return { 'id': video_id, 'series': traverse_obj(page_props, ( 'queried', 'header', 'tab3', 'slices', ..., 'items', lambda _, v: v['link']['slug'] == series_id, 'link_copy', {str}, any)), 'series_id': series_id, **traverse_obj(video_data, { 'title': ('title', ..., 'text', {clean_html}, any), 'alt_title': ('subhead', ..., 'text', {clean_html}, any), 'categories': ('category', 'data', 'category_name', {str.upper}, filter, all), 'description': ('slices', ..., 'primary', 'content', ..., 'text', {clean_html}, any), 'release_timestamp': ('published', {parse_iso8601}), 'subtitles': ({'url': 'legacy_subtitle_file'}, all, {subs_list_to_dict(lang='en')}), 'thumbnail': ('poster_frame', 'url', {url_or_none}, {update_url(query=None)}), 'url': ('legacy_video_url', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/seznamzpravy.py
yt_dlp/extractor/seznamzpravy.py
import urllib.parse from .common import InfoExtractor from ..utils import ( int_or_none, parse_codecs, parse_qs, try_get, urljoin, ) def _raw_id(src_url): return urllib.parse.urlparse(src_url).path.split('/')[-1] class SeznamZpravyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?seznamzpravy\.cz/iframe/player\?.*\bsrc=' _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?seznamzpravy\.cz/iframe/player\?.*?)\1'] _TESTS = [{ 'url': 'https://www.seznamzpravy.cz/iframe/player?duration=241&serviceSlug=zpravy&src=https%3A%2F%2Fv39-a.sdn.szn.cz%2Fv_39%2Fvmd%2F5999c902ea707c67d8e267a9%3Ffl%3Dmdk%2C432f65a0%7C&itemType=video&autoPlay=false&title=Sv%C4%9Bt%20bez%20obalu%3A%20%C4%8Ce%C5%A1t%C3%AD%20voj%C3%A1ci%20na%20mis%C3%ADch%20(kr%C3%A1tk%C3%A1%20verze)&series=Sv%C4%9Bt%20bez%20obalu&serviceName=Seznam%20Zpr%C3%A1vy&poster=%2F%2Fd39-a.sdn.szn.cz%2Fd_39%2Fc_img_F_I%2FR5puJ.jpeg%3Ffl%3Dcro%2C0%2C0%2C1920%2C1080%7Cres%2C1200%2C%2C1%7Cjpg%2C80%2C%2C1&width=1920&height=1080&cutFrom=0&cutTo=0&splVersion=VOD&contentId=170889&contextId=35990&showAdvert=true&collocation=&autoplayPossible=true&embed=&isVideoTooShortForPreroll=false&isVideoTooLongForPostroll=true&videoCommentOpKey=&videoCommentId=&version=4.0.76&dotService=zpravy&gemiusPrismIdentifier=bVc1ZIb_Qax4W2v5xOPGpMeCP31kFfrTzj0SqPTLh_b.Z7&zoneIdPreroll=seznam.pack.videospot&skipOffsetPreroll=5&sectionPrefixPreroll=%2Fzpravy', 'info_dict': { 'id': '170889', 'ext': 'mp4', 'title': 'Svět bez obalu: Čeští vojáci na misích (krátká verze)', 'thumbnail': r're:^https?://.*\.jpe?g', 'duration': 241, 'series': 'Svět bez obalu', }, 'params': { 'skip_download': True, }, }, { # with Location key 'url': 'https://www.seznamzpravy.cz/iframe/player?duration=null&serviceSlug=zpravy&src=https%3A%2F%2Flive-a.sdn.szn.cz%2Fv_39%2F59e468fe454f8472a96af9fa%3Ffl%3Dmdk%2C5c1e2840%7C&itemType=livevod&autoPlay=false&title=P%C5%99edseda%20KDU-%C4%8CSL%20Pavel%20B%C4%9Blobr%C3%A1dek%20ve%20volebn%C3%AD%20V%C3%BDzv%C4%9B%20Seznamu&series=V%C3%BDzva&serviceName=Seznam%20Zpr%C3%A1vy&poster=%2F%2Fd39-a.sdn.szn.cz%2Fd_39%2Fc_img_G_J%2FjTBCs.jpeg%3Ffl%3Dcro%2C0%2C0%2C1280%2C720%7Cres%2C1200%2C%2C1%7Cjpg%2C80%2C%2C1&width=16&height=9&cutFrom=0&cutTo=0&splVersion=VOD&contentId=185688&contextId=38489&showAdvert=true&collocation=&hideFullScreen=false&hideSubtitles=false&embed=&isVideoTooShortForPreroll=false&isVideoTooShortForPreroll2=false&isVideoTooLongForPostroll=false&fakePostrollZoneID=seznam.clanky.zpravy.preroll&fakePrerollZoneID=seznam.clanky.zpravy.preroll&videoCommentId=&trim=default_16x9&noPrerollVideoLength=30&noPreroll2VideoLength=undefined&noMidrollVideoLength=0&noPostrollVideoLength=999999&autoplayPossible=true&version=5.0.41&dotService=zpravy&gemiusPrismIdentifier=zD3g7byfW5ekpXmxTVLaq5Srjw5i4hsYo0HY1aBwIe..27&zoneIdPreroll=seznam.pack.videospot&skipOffsetPreroll=5&sectionPrefixPreroll=%2Fzpravy%2Fvyzva&zoneIdPostroll=seznam.pack.videospot&skipOffsetPostroll=5&sectionPrefixPostroll=%2Fzpravy%2Fvyzva&regression=false', 'info_dict': { 'id': '185688', 'ext': 'mp4', 'title': 'Předseda KDU-ČSL Pavel Bělobrádek ve volební Výzvě Seznamu', 'thumbnail': r're:^https?://.*\.jpe?g', 'series': 'Výzva', }, 'params': { 'skip_download': True, }, }] def _extract_sdn_formats(self, sdn_url, video_id): sdn_data = self._download_json(sdn_url, video_id) if sdn_data.get('Location'): sdn_url = sdn_data['Location'] sdn_data = self._download_json(sdn_url, video_id) formats = [] mp4_formats = try_get(sdn_data, lambda x: x['data']['mp4'], dict) or {} for format_id, format_data in mp4_formats.items(): relative_url = format_data.get('url') if not relative_url: continue try: width, height = format_data.get('resolution') except (TypeError, ValueError): width, height = None, None f = { 'url': urljoin(sdn_url, relative_url), 'format_id': f'http-{format_id}', 'tbr': int_or_none(format_data.get('bandwidth'), scale=1000), 'width': int_or_none(width), 'height': int_or_none(height), } f.update(parse_codecs(format_data.get('codec'))) formats.append(f) pls = sdn_data.get('pls', {}) def get_url(format_id): return try_get(pls, lambda x: x[format_id]['url'], str) dash_rel_url = get_url('dash') if dash_rel_url: formats.extend(self._extract_mpd_formats( urljoin(sdn_url, dash_rel_url), video_id, mpd_id='dash', fatal=False)) hls_rel_url = get_url('hls') if hls_rel_url: formats.extend(self._extract_m3u8_formats( urljoin(sdn_url, hls_rel_url), video_id, ext='mp4', m3u8_id='hls', fatal=False)) return formats def _real_extract(self, url): params = parse_qs(url) src = params['src'][0] title = params['title'][0] video_id = params.get('contentId', [_raw_id(src)])[0] formats = self._extract_sdn_formats(src + 'spl2,2,VOD', video_id) duration = int_or_none(params.get('duration', [None])[0]) series = params.get('series', [None])[0] thumbnail = params.get('poster', [None])[0] return { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'series': series, 'formats': formats, } class SeznamZpravyArticleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:seznam\.cz/zpravy|seznamzpravy\.cz)/clanek/(?:[^/?#&]+)-(?P<id>\d+)' _API_URL = 'https://apizpravy.seznam.cz/' _TESTS = [{ # two videos on one page, with SDN URL 'url': 'https://www.seznamzpravy.cz/clanek/jejich-svet-na-nas-utoci-je-lepsi-branit-se-na-jejich-pisecku-rika-reziser-a-major-v-zaloze-marhoul-35990', 'info_dict': { 'id': '35990', 'title': 'md5:6011c877a36905f28f271fcd8dcdb0f2', 'description': 'md5:933f7b06fa337a814ba199d3596d27ba', }, 'playlist_count': 2, }, { # video with live stream URL 'url': 'https://www.seznam.cz/zpravy/clanek/znovu-do-vlady-s-ano-pavel-belobradek-ve-volebnim-specialu-seznamu-38489', 'info_dict': { 'id': '38489', 'title': 'md5:8fa1afdc36fd378cf0eba2b74c5aca60', 'description': 'md5:428e7926a1a81986ec7eb23078004fb4', }, 'playlist_count': 1, }] def _real_extract(self, url): article_id = self._match_id(url) webpage = self._download_webpage(url, article_id) info = self._search_json_ld(webpage, article_id, default={}) title = info.get('title') or self._og_search_title(webpage, fatal=False) description = info.get('description') or self._og_search_description(webpage) return self.playlist_result([ self.url_result(entry_url, ie=SeznamZpravyIE.ie_key()) for entry_url in SeznamZpravyIE._extract_embed_urls(url, webpage)], article_id, title, description)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/crowdbunker.py
yt_dlp/extractor/crowdbunker.py
import itertools from .common import InfoExtractor from ..utils import ( int_or_none, try_get, unified_strdate, url_or_none, ) from ..utils.traversal import traverse_obj class CrowdBunkerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?crowdbunker\.com/v/(?P<id>[^/?#$&]+)' _TESTS = [{ 'url': 'https://crowdbunker.com/v/0z4Kms8pi8I', 'info_dict': { 'id': '0z4Kms8pi8I', 'ext': 'mp4', 'title': '117) Pass vax et solutions', 'description': 'md5:86bcb422c29475dbd2b5dcfa6ec3749c', 'view_count': int, 'duration': 5386, 'uploader': 'Jérémie Mercier', 'uploader_id': 'UCeN_qQV829NYf0pvPJhW5dQ', 'like_count': int, 'upload_date': '20211218', 'thumbnail': 'https://scw.divulg.org/cb-medias4/images/0z4Kms8pi8I/maxres.jpg', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) data_json = self._download_json( f'https://api.divulg.org/post/{video_id}/details', video_id, headers={'accept': 'application/json, text/plain, */*'}) video_json = data_json['video'] formats, subtitles = [], {} for sub in video_json.get('captions') or []: sub_url = try_get(sub, lambda x: x['file']['url']) if not sub_url: continue subtitles.setdefault(sub.get('languageCode', 'fr'), []).append({ 'url': sub_url, }) if mpd_url := traverse_obj(video_json, ('dashManifest', 'url', {url_or_none})): fmts, subs = self._extract_mpd_formats_and_subtitles(mpd_url, video_id, mpd_id='dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if m3u8_url := traverse_obj(video_json, ('hlsManifest', 'url', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) thumbnails = [{ 'url': image['url'], 'height': int_or_none(image.get('height')), 'width': int_or_none(image.get('width')), } for image in video_json.get('thumbnails') or [] if image.get('url')] return { 'id': video_id, 'title': video_json.get('title'), 'description': video_json.get('description'), 'view_count': video_json.get('viewCount'), 'duration': video_json.get('duration'), 'uploader': try_get(data_json, lambda x: x['channel']['name']), 'uploader_id': try_get(data_json, lambda x: x['channel']['id']), 'like_count': data_json.get('likesCount'), 'upload_date': unified_strdate(video_json.get('publishedAt') or video_json.get('createdAt')), 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } class CrowdBunkerChannelIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?crowdbunker\.com/@(?P<id>[^/?#$&]+)' _TESTS = [{ 'url': 'https://crowdbunker.com/@Milan_UHRIN', 'playlist_mincount': 14, 'info_dict': { 'id': 'Milan_UHRIN', }, }] def _entries(self, playlist_id): last = None for page in itertools.count(): channel_json = self._download_json( f'https://api.divulg.org/organization/{playlist_id}/posts', playlist_id, headers={'accept': 'application/json, text/plain, */*'}, query={'after': last} if last else {}, note=f'Downloading Page {page}') for item in channel_json.get('items') or []: v_id = item.get('uid') if not v_id: continue yield self.url_result( f'https://crowdbunker.com/v/{v_id}', ie=CrowdBunkerIE.ie_key(), video_id=v_id) last = channel_json.get('last') if not last: break def _real_extract(self, url): playlist_id = self._match_id(url) return self.playlist_result(self._entries(playlist_id), playlist_id=playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sunporno.py
yt_dlp/extractor/sunporno.py
import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_duration, qualities, ) class SunPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www\.)?sunporno\.com/videos|embeds\.sunporno\.com/embed)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.sunporno.com/videos/807778/', 'md5': '507887e29033502f29dba69affeebfc9', 'info_dict': { 'id': '807778', 'ext': 'mp4', 'title': 'md5:0a400058e8105d39e35c35e7c5184164', 'description': 'md5:a31241990e1bd3a64e72ae99afb325fb', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 302, 'age_limit': 18, }, }, { 'url': 'http://embeds.sunporno.com/embed/807778', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'http://www.sunporno.com/videos/{video_id}', video_id) title = self._html_extract_title(webpage) description = self._html_search_meta( 'description', webpage, 'description') thumbnail = self._html_search_regex( r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) duration = parse_duration(self._search_regex( (r'itemprop="duration"[^>]*>\s*(\d+:\d+)\s*<', r'>Duration:\s*<span[^>]+>\s*(\d+:\d+)\s*<'), webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'class="views">(?:<noscript>)?\s*(\d+)\s*<', webpage, 'view count', fatal=False)) comment_count = int_or_none(self._html_search_regex( r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False, default=None)) formats = [] quality = qualities(['mp4', 'flv']) for video_url in re.findall(r'<(?:source|video) src="([^"]+)"', webpage): video_ext = determine_ext(video_url) formats.append({ 'url': video_url, 'format_id': video_ext, 'quality': quality(video_ext), }) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'view_count': view_count, 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/goshgay.py
yt_dlp/extractor/goshgay.py
import urllib.parse from .common import InfoExtractor from ..utils import ( parse_duration, ) class GoshgayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?goshgay\.com/video(?P<id>\d+?)($|/)' _TEST = { 'url': 'http://www.goshgay.com/video299069/diesel_sfw_xxx_video', 'md5': '4b6db9a0a333142eb9f15913142b0ed1', 'info_dict': { 'id': '299069', 'ext': 'flv', 'title': 'DIESEL SFW XXX Video', 'thumbnail': r're:^http://.*\.jpg$', 'duration': 80, 'age_limit': 18, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h2>(.*?)<', webpage, 'title') duration = parse_duration(self._html_search_regex( r'<span class="duration">\s*-?\s*(.*?)</span>', webpage, 'duration', fatal=False)) flashvars = urllib.parse.parse_qs(self._html_search_regex( r'<embed.+?id="flash-player-embed".+?flashvars="([^"]+)"', webpage, 'flashvars')) thumbnail = flashvars.get('url_bigthumb', [None])[0] video_url = flashvars['flv_url'][0] return { 'id': video_id, 'url': video_url, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'age_limit': 18, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/businessinsider.py
yt_dlp/extractor/businessinsider.py
from .common import InfoExtractor from .jwplatform import JWPlatformIE class BusinessInsiderIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?businessinsider\.(?:com|nl)/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://uk.businessinsider.com/how-much-radiation-youre-exposed-to-in-everyday-life-2016-6', 'md5': 'ffed3e1e12a6f950aa2f7d83851b497a', 'info_dict': { 'id': 'cjGDb0X9', 'ext': 'mp4', 'title': 'Bananas give you more radiation exposure than living next to a nuclear power plant', 'description': 'md5:0175a3baf200dd8fa658f94cade841b3', 'upload_date': '20160611', 'timestamp': 1465675620, }, }, { 'url': 'https://www.businessinsider.nl/5-scientifically-proven-things-make-you-less-attractive-2017-7/', 'md5': '43f438dbc6da0b89f5ac42f68529d84a', 'info_dict': { 'id': '5zJwd4FK', 'ext': 'mp4', 'title': 'Deze dingen zorgen ervoor dat je minder snel een date scoort', 'description': 'md5:2af8975825d38a4fed24717bbe51db49', 'upload_date': '20170705', 'timestamp': 1499270528, }, }, { 'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) jwplatform_id = self._search_regex( (r'data-media-id=["\']([a-zA-Z0-9]{8})', r'id=["\']jwplayer_([a-zA-Z0-9]{8})', r'id["\']?\s*:\s*["\']?([a-zA-Z0-9]{8})', r'(?:jwplatform\.com/players/|jwplayer_)([a-zA-Z0-9]{8})'), webpage, 'jwplatform id') return self.url_result( f'jwplatform:{jwplatform_id}', ie=JWPlatformIE.ie_key(), video_id=video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/telecaribe.py
yt_dlp/extractor/telecaribe.py
import re from .common import InfoExtractor from ..utils import traverse_obj class TelecaribePlayIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?play\.telecaribe\.co/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.play.telecaribe.co/breicok', 'info_dict': { 'id': 'breicok', 'title': 'Breicok', }, 'playlist_count': 7, }, { 'url': 'https://www.play.telecaribe.co/si-fue-gol-de-yepes', 'info_dict': { 'id': 'si-fue-gol-de-yepes', 'title': 'Sí Fue Gol de Yepes', }, 'playlist_count': 6, }, { 'url': 'https://www.play.telecaribe.co/ciudad-futura', 'info_dict': { 'id': 'ciudad-futura', 'title': 'Ciudad Futura', }, 'playlist_count': 10, }, { 'url': 'https://www.play.telecaribe.co/live', 'info_dict': { 'id': 'live', 'title': r're:^Señal en vivo', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }, { 'url': 'https://www.play.telecaribe.co/liveplus', 'info_dict': { 'id': 'liveplus', 'title': r're:^Señal en vivo Plus', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, 'skip': 'Geo-restricted to Colombia', }] def _download_player_webpage(self, webpage, display_id): page_id = self._search_regex( (r'window\.firstPageId\s*=\s*["\']([^"\']+)', r'<div[^>]+id\s*=\s*"pageBackground_([^"]+)'), webpage, 'page_id') props = self._download_json(self._search_regex( rf'<link[^>]+href\s*=\s*"([^"]+)"[^>]+id\s*=\s*"features_{page_id}"', webpage, 'json_props_url'), display_id)['props']['render']['compProps'] return self._download_webpage(traverse_obj(props, (..., 'url'))[-1], display_id) def _get_clean_title(self, title): return re.sub(r'\s*\|\s*Telecaribe\s*VOD', '', title or '').strip() or None def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) player = self._download_player_webpage(webpage, display_id) livestream_url = self._search_regex( r'(?:let|const|var)\s+source\s*=\s*["\']([^"\']+)', player, 'm3u8 url', default=None) if not livestream_url: return self.playlist_from_matches( re.findall(r'<a[^>]+href\s*=\s*"([^"]+\.mp4)', player), display_id, self._get_clean_title(self._og_search_title(webpage))) formats, subtitles = self._extract_m3u8_formats_and_subtitles( livestream_url, display_id, 'mp4', live=True) return { 'id': display_id, 'title': self._get_clean_title(self._og_search_title(webpage)), 'formats': formats, 'subtitles': subtitles, 'is_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thisamericanlife.py
yt_dlp/extractor/thisamericanlife.py
from .common import InfoExtractor class ThisAmericanLifeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one', 'md5': '8f7d2da8926298fdfca2ee37764c11ce', 'info_dict': { 'id': '487', 'ext': 'm4a', 'title': '487: Harper High School, Part One', 'description': 'md5:ee40bdf3fb96174a9027f76dbecea655', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.thisamericanlife.org/play_full.php?play=487', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'http://www.thisamericanlife.org/radio-archives/episode/{video_id}', video_id) return { 'id': video_id, 'url': f'http://stream.thisamericanlife.org/{video_id}/stream/{video_id}_64k.m3u8', 'protocol': 'm3u8_native', 'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none', 'abr': 64, 'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True), 'description': self._html_search_meta(r'description', webpage, 'description'), 'thumbnail': self._og_search_thumbnail(webpage), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thehighwire.py
yt_dlp/extractor/thehighwire.py
from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, url_or_none, ) from ..utils.traversal import ( find_element, require, traverse_obj, ) class TheHighWireIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thehighwire\.com/ark-videos/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://thehighwire.com/ark-videos/the-deposition-of-stanley-plotkin/', 'info_dict': { 'id': 'the-deposition-of-stanley-plotkin', 'ext': 'mp4', 'title': 'THE DEPOSITION OF STANLEY PLOTKIN', 'description': 'md5:6d0be4f1181daaa10430fd8b945a5e54', 'thumbnail': r're:https?://static\.arkengine\.com/video/.+\.jpg', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) embed_url = traverse_obj(webpage, ( {find_element(cls='ark-video-embed', html=True)}, {extract_attributes}, 'src', {url_or_none}, {require('embed URL')})) embed_page = self._download_webpage(embed_url, display_id) return { 'id': display_id, **traverse_obj(webpage, { 'title': ({find_element(cls='section-header')}, {clean_html}), 'description': ({find_element(cls='episode-description__copy')}, {clean_html}), }), **self._parse_html5_media_entries(embed_url, embed_page, display_id, m3u8_id='hls')[0], }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/twentymin.py
yt_dlp/extractor/twentymin.py
from .common import InfoExtractor from ..utils import ( int_or_none, try_get, ) class TwentyMinutenIE(InfoExtractor): _WORKING = False IE_NAME = '20min' _VALID_URL = r'''(?x) https?:// (?:www\.)?20min\.ch/ (?: videotv/*\?.*?\bvid=| videoplayer/videoplayer\.html\?.*?\bvideoId@ ) (?P<id>\d+) ''' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:(?:https?:)?//)?(?:www\.)?20min\.ch/videoplayer/videoplayer.html\?.*?\bvideoId@\d+.*?)\1'] _TESTS = [{ 'url': 'http://www.20min.ch/videotv/?vid=469148&cid=2', 'md5': 'e7264320db31eed8c38364150c12496e', 'info_dict': { 'id': '469148', 'ext': 'mp4', 'title': '85 000 Franken für 15 perfekte Minuten', 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'http://www.20min.ch/videoplayer/videoplayer.html?params=client@twentyDE|videoId@523629', 'info_dict': { 'id': '523629', 'ext': 'mp4', 'title': 'So kommen Sie bei Eis und Schnee sicher an', 'description': 'md5:117c212f64b25e3d95747e5276863f7d', 'thumbnail': r're:https?://.+\.jpg', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.20min.ch/videotv/?cid=44&vid=468738', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Update _VALID_URL 'url': 'https://www.20min.ch/story/so-kommen-sie-bei-eis-und-schnee-sicher-an-557858045456', 'info_dict': { 'id': '523629', 'ext': 'mp4', 'title': 'So kommen Sie bei Eis und Schnee sicher an', 'description': 'md5:117c212f64b25e3d95747e5276863f7d', }, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( f'http://api.20min.ch/video/{video_id}/show', video_id)['content'] title = video['title'] formats = [{ 'format_id': format_id, 'url': f'http://podcast.20min-tv.ch/podcast/20min/{video_id}{p}.mp4', 'quality': quality, } for quality, (format_id, p) in enumerate([('sd', ''), ('hd', 'h')])] description = video.get('lead') thumbnail = video.get('thumbnail') def extract_count(kind): return try_get( video, lambda x: int_or_none(x['communityobject'][f'thumbs_{kind}'])) like_count = extract_count('up') dislike_count = extract_count('down') return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'like_count': like_count, 'dislike_count': dislike_count, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mlb.py
yt_dlp/extractor/mlb.py
import json import re import time import uuid from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, determine_ext, int_or_none, join_nonempty, jwt_decode_hs256, parse_duration, parse_iso8601, try_get, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class MLBBaseIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) video = self._download_video_data(display_id) video_id = video['id'] title = video['title'] feed = self._get_feed(video) formats = [] for playback in (feed.get('playbacks') or []): playback_url = playback.get('url') if not playback_url: continue name = playback.get('name') ext = determine_ext(playback_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( playback_url, video_id, 'mp4', 'm3u8_native', m3u8_id=name, fatal=False)) else: f = { 'format_id': name, 'url': playback_url, } mobj = re.search(r'_(\d+)K_(\d+)X(\d+)', name) if mobj: f.update({ 'height': int(mobj.group(3)), 'tbr': int(mobj.group(1)), 'width': int(mobj.group(2)), }) mobj = re.search(r'_(\d+)x(\d+)_(\d+)_(\d+)K\.mp4', playback_url) if mobj: f.update({ 'fps': int(mobj.group(3)), 'height': int(mobj.group(2)), 'tbr': int(mobj.group(4)), 'width': int(mobj.group(1)), }) formats.append(f) thumbnails = [] for cut in (try_get(feed, lambda x: x['image']['cuts'], list) or []): src = cut.get('src') if not src: continue thumbnails.append({ 'height': int_or_none(cut.get('height')), 'url': src, 'width': int_or_none(cut.get('width')), }) language = (video.get('language') or 'EN').lower() return { 'id': video_id, 'title': title, 'formats': formats, 'description': video.get('description'), 'duration': parse_duration(feed.get('duration')), 'thumbnails': thumbnails, 'timestamp': parse_iso8601(video.get(self._TIMESTAMP_KEY)), 'subtitles': self._extract_mlb_subtitles(feed, language), } class MLBIE(MLBBaseIE): _VALID_URL = r'''(?x) https?:// (?:[\da-z_-]+\.)*mlb\.com/ (?: (?: (?:[^/]+/)*video/[^/]+/c-| (?: shared/video/embed/(?:embed|m-internal-embed)\.html| (?:[^/]+/)+(?:play|index)\.jsp| )\?.*?\bcontent_id= ) (?P<id>\d+) ) ''' _EMBED_REGEX = [ r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1', r'data-video-link=["\'](?P<url>http://m\.mlb\.com/video/[^"\']+)', ] _TESTS = [{ 'url': 'https://www.mlb.com/mariners/video/ackleys-spectacular-catch/c-34698933', 'info_dict': { 'id': '34698933', 'ext': 'mp4', 'title': 'Ackley\'s spectacular catch', 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0', 'duration': 66, 'timestamp': 1405995000, 'upload_date': '20140722', 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'https://www.mlb.com/video/stanton-prepares-for-derby/c-34496663', 'info_dict': { 'id': '34496663', 'ext': 'mp4', 'title': 'Stanton prepares for Derby', 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57', 'duration': 46, 'timestamp': 1405120200, 'upload_date': '20140711', 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'https://www.mlb.com/video/cespedes-repeats-as-derby-champ/c-34578115', 'info_dict': { 'id': '34578115', 'ext': 'mp4', 'title': 'Cespedes repeats as Derby champ', 'description': 'md5:08df253ce265d4cf6fb09f581fafad07', 'duration': 488, 'timestamp': 1405414336, 'upload_date': '20140715', 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'https://www.mlb.com/video/bautista-on-home-run-derby/c-34577915', 'info_dict': { 'id': '34577915', 'ext': 'mp4', 'title': 'Bautista on Home Run Derby', 'description': 'md5:b80b34031143d0986dddc64a8839f0fb', 'duration': 52, 'timestamp': 1405405122, 'upload_date': '20140715', 'thumbnail': r're:https?://.+\.jpg', }, }, { 'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694', 'only_matching': True, }, { 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb', 'only_matching': True, }, { 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553', 'only_matching': True, }, { 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553', 'only_matching': True, }, { 'url': 'https://www.mlb.com/cardinals/video/piscottys-great-sliding-catch/c-51175783', 'only_matching': True, }, { # From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer 'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.mlbdailydish.com/2013/2/25/4028804/mlb-classic-video-vault-open-watch-embed-share', 'info_dict': { 'id': 'mlb-classic-video-vault-open-watch-embed-share', 'title': 'MLB Classic vault is open! Don\'t avert your eyes!', 'age_limit': 0, 'description': 'All the video needed to hold you over until real baseball starts next month.', 'thumbnail': r're:https?://cdn\.vox-cdn\.com/thumbor/.+\.jpg', }, 'playlist_count': 3, }] _TIMESTAMP_KEY = 'date' @staticmethod def _get_feed(video): return video @staticmethod def _extract_mlb_subtitles(feed, language): subtitles = {} for keyword in (feed.get('keywordsAll') or []): keyword_type = keyword.get('type') if keyword_type and keyword_type.startswith('closed_captions_location_'): cc_location = keyword.get('value') if cc_location: subtitles.setdefault(language, []).append({ 'url': cc_location, }) return subtitles def _download_video_data(self, display_id): return self._download_json( f'http://content.mlb.com/mlb/item/id/v1/{display_id}/details/web-v1.json', display_id) class MLBVideoIE(MLBBaseIE): _VALID_URL = r'https?://(?:www\.)?mlb\.com/(?:[^/]+/)*video/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.mlb.com/mariners/video/ackley-s-spectacular-catch-c34698933', 'info_dict': { 'id': 'c04a8863-f569-42e6-9f87-992393657614', 'ext': 'mp4', 'title': 'Ackley\'s spectacular catch', 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0', 'duration': 66, 'timestamp': 1405995000, 'upload_date': '20140722', 'thumbnail': r're:https?://.+', }, }] _TIMESTAMP_KEY = 'timestamp' @classmethod def suitable(cls, url): return False if MLBIE.suitable(url) else super().suitable(url) @staticmethod def _get_feed(video): return video['feeds'][0] @staticmethod def _extract_mlb_subtitles(feed, language): subtitles = {} for cc_location in (feed.get('closedCaptions') or []): subtitles.setdefault(language, []).append({ 'url': cc_location, }) def _download_video_data(self, display_id): # https://www.mlb.com/data-service/en/videos/[SLUG] return self._download_json( 'https://fastball-gateway.mlb.com/graphql', display_id, query={ 'query': '''{ mediaPlayback(ids: "%s") { description feeds(types: CMS) { closedCaptions duration image { cuts { width height src } } playbacks { name url } } id timestamp title } }''' % display_id, # noqa: UP031 })['data']['mediaPlayback'][0] class MLBTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mlb\.com/tv/g(?P<id>\d{6})' _NETRC_MACHINE = 'mlb' _TESTS = [{ 'url': 'https://www.mlb.com/tv/g661581/vee2eff5f-a7df-4c20-bdb4-7b926fa12638', 'info_dict': { 'id': '661581', 'ext': 'mp4', 'title': '2022-07-02 - St. Louis Cardinals @ Philadelphia Phillies', 'release_date': '20220702', 'release_timestamp': 1656792300, }, 'params': {'skip_download': 'm3u8'}, }, { # makeup game: has multiple dates, need to avoid games with 'rescheduleDate' 'url': 'https://www.mlb.com/tv/g747039/vd22541c4-5a29-45f7-822b-635ec041cf5e', 'info_dict': { 'id': '747039', 'ext': 'mp4', 'title': '2024-07-29 - Toronto Blue Jays @ Baltimore Orioles', 'release_date': '20240729', 'release_timestamp': 1722280200, }, 'params': {'skip_download': 'm3u8'}, }] _GRAPHQL_INIT_QUERY = '''\ mutation initSession($device: InitSessionInput!, $clientType: ClientType!, $experience: ExperienceTypeInput) { initSession(device: $device, clientType: $clientType, experience: $experience) { deviceId sessionId entitlements { code } location { countryCode regionName zipCode latitude longitude } clientExperience features } }''' _GRAPHQL_PLAYBACK_QUERY = '''\ mutation initPlaybackSession( $adCapabilities: [AdExperienceType] $mediaId: String! $deviceId: String! $sessionId: String! $quality: PlaybackQuality ) { initPlaybackSession( adCapabilities: $adCapabilities mediaId: $mediaId deviceId: $deviceId sessionId: $sessionId quality: $quality ) { playbackSessionId playback { url token expiration cdn } } }''' _APP_VERSION = '7.8.2' _device_id = None _session_id = None _access_token = None _token_expiry = 0 @property def _api_headers(self): if (self._token_expiry - 120) <= time.time(): self.write_debug('Access token has expired; re-logging in') self._perform_login(*self._get_login_info()) return {'Authorization': f'Bearer {self._access_token}'} def _real_initialize(self): if not self._access_token: self.raise_login_required( 'All videos are only available to registered users', method='password') def _set_device_id(self, username): if self._device_id: return device_id_cache = self.cache.load(self._NETRC_MACHINE, 'device_ids', default={}) self._device_id = device_id_cache.get(username) if self._device_id: return self._device_id = str(uuid.uuid4()) device_id_cache[username] = self._device_id self.cache.store(self._NETRC_MACHINE, 'device_ids', device_id_cache) def _perform_login(self, username, password): try: self._access_token = self._download_json( 'https://ids.mlb.com/oauth2/aus1m088yK07noBfh356/v1/token', None, 'Logging in', 'Unable to log in', headers={ 'User-Agent': 'okhttp/3.12.1', 'Content-Type': 'application/x-www-form-urlencoded', }, data=urlencode_postdata({ 'grant_type': 'password', 'username': username, 'password': password, 'scope': 'openid offline_access', 'client_id': '0oa3e1nutA1HLzAKG356', }))['access_token'] except ExtractorError as error: if isinstance(error.cause, HTTPError) and error.cause.status == 400: raise ExtractorError('Invalid username or password', expected=True) raise self._token_expiry = traverse_obj(self._access_token, ({jwt_decode_hs256}, 'exp', {int})) or 0 self._set_device_id(username) self._session_id = self._call_api({ 'operationName': 'initSession', 'query': self._GRAPHQL_INIT_QUERY, 'variables': { 'device': { 'appVersion': self._APP_VERSION, 'deviceFamily': 'desktop', 'knownDeviceId': self._device_id, 'languagePreference': 'ENGLISH', 'manufacturer': '', 'model': '', 'os': '', 'osVersion': '', }, 'clientType': 'WEB', }, }, None, 'session ID')['data']['initSession']['sessionId'] def _call_api(self, data, video_id, description='GraphQL JSON', fatal=True): return self._download_json( 'https://media-gateway.mlb.com/graphql', video_id, f'Downloading {description}', f'Unable to download {description}', fatal=fatal, headers={ **self._api_headers, 'Accept': 'application/json', 'Content-Type': 'application/json', 'x-client-name': 'WEB', 'x-client-version': self._APP_VERSION, }, data=json.dumps(data, separators=(',', ':')).encode()) def _extract_formats_and_subtitles(self, broadcast, video_id): feed = traverse_obj(broadcast, ('homeAway', {str.title})) medium = traverse_obj(broadcast, ('type', {str})) language = traverse_obj(broadcast, ('language', {str.lower})) format_id = join_nonempty(feed, medium, language) response = self._call_api({ 'operationName': 'initPlaybackSession', 'query': self._GRAPHQL_PLAYBACK_QUERY, 'variables': { 'adCapabilities': ['GOOGLE_STANDALONE_AD_PODS'], 'deviceId': self._device_id, 'mediaId': broadcast['mediaId'], 'quality': 'PLACEHOLDER', 'sessionId': self._session_id, }, }, video_id, f'{format_id} broadcast JSON', fatal=False) playback = traverse_obj(response, ('data', 'initPlaybackSession', 'playback', {dict})) m3u8_url = traverse_obj(playback, ('url', {url_or_none})) token = traverse_obj(playback, ('token', {str})) if not (m3u8_url and token): errors = '; '.join(traverse_obj(response, ('errors', ..., 'message', {str}))) if errors: # Only warn when 'blacked out' or 'not entitled'; radio formats may be available self.report_warning(f'API returned errors for {format_id}: {errors}') else: self.report_warning(f'No formats available for {format_id} broadcast; skipping') return [], {} fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', m3u8_id=format_id, fatal=False) for fmt in fmts: fmt.setdefault('format_note', join_nonempty(feed, medium, delim=' ')) fmt.setdefault('language', language) if fmt.get('vcodec') == 'none' and fmt['language'] == 'en': fmt['source_preference'] = 10 return fmts, subs def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( 'https://statsapi.mlb.com/api/v1/schedule', video_id, query={ 'gamePk': video_id, 'hydrate': 'broadcasts(all),statusFlags', }) metadata = traverse_obj(data, ( 'dates', ..., 'games', lambda _, v: str(v['gamePk']) == video_id and not v.get('rescheduleDate'), any)) broadcasts = traverse_obj(metadata, ( 'broadcasts', lambda _, v: v['mediaId'] and v['mediaState']['mediaStateCode'] != 'MEDIA_OFF')) formats, subtitles = [], {} for broadcast in broadcasts: fmts, subs = self._extract_formats_and_subtitles(broadcast, video_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) return { 'id': video_id, 'title': join_nonempty( traverse_obj(metadata, ('officialDate', {str})), traverse_obj(metadata, ('teams', ('away', 'home'), 'team', 'name', {str}, all, {' @ '.join})), delim=' - '), 'is_live': traverse_obj(broadcasts, (..., 'mediaState', 'mediaStateCode', {str}, any)) == 'MEDIA_ON', 'release_timestamp': traverse_obj(metadata, ('gameDate', {parse_iso8601})), 'formats': formats, 'subtitles': subtitles, } class MLBArticleIE(InfoExtractor): _VALID_URL = r'https?://www\.mlb\.com/news/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.mlb.com/news/manny-machado-robs-guillermo-heredia-reacts', 'info_dict': { 'id': '36db7394-343c-4ea3-b8ca-ead2e61bca9a', 'title': 'Machado\'s grab draws hilarious irate reaction', 'modified_timestamp': 1675888370, 'description': 'md5:a19d4eb0487b2cb304e9a176f6b67676', 'modified_date': '20230208', }, 'playlist_mincount': 2, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) apollo_cache_json = self._search_json(r'window\.initState\s*=', webpage, 'window.initState', display_id)['apolloCache'] content_real_info = traverse_obj( apollo_cache_json, ('ROOT_QUERY', lambda k, _: k.startswith('getArticle')), get_all=False) return self.playlist_from_matches( traverse_obj(content_real_info, ('parts', lambda _, v: v['__typename'] == 'Video' or v['type'] == 'video')), getter=lambda x: f'https://www.mlb.com/video/{x["slug"]}', ie=MLBVideoIE, playlist_id=content_real_info.get('translationId'), title=self._html_search_meta('og:title', webpage), description=content_real_info.get('summary'), modified_timestamp=parse_iso8601(content_real_info.get('lastUpdatedDate')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/skynewsarabia.py
yt_dlp/extractor/skynewsarabia.py
from .common import InfoExtractor from ..utils import ( parse_duration, parse_iso8601, ) class SkyNewsArabiaBaseIE(InfoExtractor): _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images' def _call_api(self, path, value): return self._download_json(f'http://api.skynewsarabia.com/web/rest/v2/{path}/{value}.json', value) def _get_limelight_media_id(self, url): return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id') def _get_image_url(self, image_path_template, width='1600', height='1200'): return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height) def _extract_video_info(self, video_data): video_id = str(video_data['id']) topic = video_data.get('topicTitle') return { '_type': 'url_transparent', 'url': 'limelight:media:{}'.format(self._get_limelight_media_id(video_data['videoUrl'][0]['url'])), 'id': video_id, 'title': video_data['headline'], 'description': video_data.get('summary'), 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']), 'timestamp': parse_iso8601(video_data.get('date')), 'duration': parse_duration(video_data.get('runTime')), 'tags': video_data.get('tags', []), 'categories': [topic] if topic else [], 'webpage_url': f'http://www.skynewsarabia.com/web/video/{video_id}', 'ie_key': 'LimelightMedia', } class SkyNewsArabiaIE(SkyNewsArabiaBaseIE): _WORKING = False IE_NAME = 'skynewsarabia:video' _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)' _TEST = { 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3', 'info_dict': { 'id': '794902', 'ext': 'flv', 'title': 'نصف مليون مصباح على شجرة كريسماس', 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6', 'upload_date': '20151128', 'timestamp': 1448697198, 'duration': 2119, }, 'params': { # rtmp download 'skip_download': True, }, } def _real_extract(self, url): video_id = self._match_id(url) video_data = self._call_api('video', video_id) return self._extract_video_info(video_data) class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE): _WORKING = False IE_NAME = 'skynewsarabia:article' _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9', 'info_dict': { 'id': '794549', 'ext': 'flv', 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة', 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f', 'upload_date': '20151126', 'timestamp': 1448559336, 'duration': 281.6, }, 'params': { # rtmp download 'skip_download': True, }, }, { 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD', 'info_dict': { 'id': '794844', 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن', 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e', }, 'playlist_mincount': 2, }] def _real_extract(self, url): article_id = self._match_id(url) article_data = self._call_api('article', article_id) media_asset = article_data['mediaAsset'] if media_asset['type'] == 'VIDEO': topic = article_data.get('topicTitle') return { '_type': 'url_transparent', 'url': 'limelight:media:{}'.format(self._get_limelight_media_id(media_asset['videoUrl'][0]['url'])), 'id': article_id, 'title': article_data['headline'], 'description': article_data.get('summary'), 'thumbnail': self._get_image_url(media_asset['imageUrl']), 'timestamp': parse_iso8601(article_data.get('date')), 'tags': article_data.get('tags', []), 'categories': [topic] if topic else [], 'webpage_url': url, 'ie_key': 'LimelightMedia', } entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO'] return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/streetvoice.py
yt_dlp/extractor/streetvoice.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, str_or_none, strip_or_none, try_get, urljoin, ) class StreetVoiceIE(InfoExtractor): _VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://streetvoice.com/skippylu/songs/123688/', 'md5': '0eb535970629a5195685355f3ed60bfd', 'info_dict': { 'id': '123688', 'ext': 'mp3', 'title': '流浪', 'description': 'md5:8eb0bfcc9dcd8aa82bd6efca66e3fea6', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 270, 'upload_date': '20100923', 'uploader': 'Crispy脆樂團', 'uploader_id': '627810', 'uploader_url': 're:^https?://streetvoice.com/skippylu/', 'timestamp': 1285261661, 'view_count': int, 'like_count': int, 'comment_count': int, 'repost_count': int, 'track': '流浪', 'track_id': '123688', 'album': '2010', }, }, { 'url': 'http://tw.streetvoice.com/skippylu/songs/94440/', 'only_matching': True, }] def _real_extract(self, url): song_id = self._match_id(url) base_url = f'https://streetvoice.com/api/v4/song/{song_id}/' song = self._download_json(base_url, song_id, query={ 'fields': 'album,comments_count,created_at,id,image,length,likes_count,name,nickname,plays_count,profile,share_count,synopsis,user,username', }) title = song['name'] formats = [] for suffix, format_id in [('hls/file', 'hls'), ('file', 'http'), ('file/original', 'original')]: f_url = (self._download_json( base_url + suffix + '/', song_id, f'Downloading {format_id} format URL', data=b'', fatal=False) or {}).get('file') if not f_url: continue f = { 'ext': 'mp3', 'format_id': format_id, 'url': f_url, 'vcodec': 'none', } if format_id == 'hls': f['protocol'] = 'm3u8_native' abr = self._search_regex(r'\.mp3\.(\d+)k', f_url, 'bitrate', default=None) if abr: abr = int(abr) f.update({ 'abr': abr, 'tbr': abr, }) formats.append(f) user = song.get('user') or {} username = user.get('username') get_count = lambda x: int_or_none(song.get(x + '_count')) return { 'id': song_id, 'formats': formats, 'title': title, 'description': strip_or_none(song.get('synopsis')), 'thumbnail': song.get('image'), 'duration': int_or_none(song.get('length')), 'timestamp': parse_iso8601(song.get('created_at')), 'uploader': try_get(user, lambda x: x['profile']['nickname']), 'uploader_id': str_or_none(user.get('id')), 'uploader_url': urljoin(url, f'/{username}/') if username else None, 'view_count': get_count('plays'), 'like_count': get_count('likes'), 'comment_count': get_count('comments'), 'repost_count': get_count('share'), 'track': title, 'track_id': song_id, 'album': try_get(song, lambda x: x['album']['name']), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/senalcolombia.py
yt_dlp/extractor/senalcolombia.py
from .common import InfoExtractor from .rtvcplay import RTVCKalturaIE class SenalColombiaLiveIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?senalcolombia\.tv/(?P<id>senal-en-vivo)' _TESTS = [{ 'url': 'https://www.senalcolombia.tv/senal-en-vivo', 'info_dict': { 'id': 'indexSC', 'title': 're:^Señal Colombia', 'description': 'md5:799f16a401d97f40c33a2c6a3e2a507b', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'live_status': 'is_live', 'ext': 'mp4', }, 'params': { 'skip_download': 'Livestream', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) hydration = self._search_json( r'<script\b[^>]*data-drupal-selector\s*=\s*"[^"]*drupal-settings-json[^"]*"[^>]*>', webpage, 'hydration', display_id) return self.url_result(hydration['envivosrc'], RTVCKalturaIE, display_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aws.py
yt_dlp/extractor/aws.py
import datetime as dt import hashlib import hmac import urllib.parse from .common import InfoExtractor class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor _AWS_ALGORITHM = 'AWS4-HMAC-SHA256' _AWS_REGION = 'us-east-1' def _aws_execute_api(self, aws_dict, video_id, query=None): query = query or {} amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ') date = amz_date[:8] headers = { 'Accept': 'application/json', 'Host': self._AWS_PROXY_HOST, 'X-Amz-Date': amz_date, 'X-Api-Key': self._AWS_API_KEY, } session_token = aws_dict.get('session_token') if session_token: headers['X-Amz-Security-Token'] = session_token def aws_hash(s): return hashlib.sha256(s.encode()).hexdigest() # Task 1: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html canonical_querystring = urllib.parse.urlencode(query) canonical_headers = '' for header_name, header_value in sorted(headers.items()): canonical_headers += f'{header_name.lower()}:{header_value}\n' signed_headers = ';'.join([header.lower() for header in sorted(headers.keys())]) canonical_request = '\n'.join([ 'GET', aws_dict['uri'], canonical_querystring, canonical_headers, signed_headers, aws_hash(''), ]) # Task 2: http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html credential_scope_list = [date, self._AWS_REGION, 'execute-api', 'aws4_request'] credential_scope = '/'.join(credential_scope_list) string_to_sign = '\n'.join([self._AWS_ALGORITHM, amz_date, credential_scope, aws_hash(canonical_request)]) # Task 3: http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html def aws_hmac(key, msg): return hmac.new(key, msg.encode(), hashlib.sha256) def aws_hmac_digest(key, msg): return aws_hmac(key, msg).digest() def aws_hmac_hexdigest(key, msg): return aws_hmac(key, msg).hexdigest() k_signing = ('AWS4' + aws_dict['secret_key']).encode() for value in credential_scope_list: k_signing = aws_hmac_digest(k_signing, value) signature = aws_hmac_hexdigest(k_signing, string_to_sign) # Task 4: http://docs.aws.amazon.com/general/latest/gr/sigv4-add-signature-to-request.html headers['Authorization'] = ', '.join([ '{} Credential={}/{}'.format(self._AWS_ALGORITHM, aws_dict['access_key'], credential_scope), f'SignedHeaders={signed_headers}', f'Signature={signature}', ]) return self._download_json( 'https://{}{}{}'.format(self._AWS_PROXY_HOST, aws_dict['uri'], '?' + canonical_querystring if canonical_querystring else ''), video_id, headers=headers)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radioradicale.py
yt_dlp/extractor/radioradicale.py
from .common import InfoExtractor from ..utils import url_or_none from ..utils.traversal import traverse_obj class RadioRadicaleIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?radioradicale\.it/scheda/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.radioradicale.it/scheda/471591', 'md5': 'eb0fbe43a601f1a361cbd00f3c45af4a', 'info_dict': { 'id': '471591', 'ext': 'mp4', 'title': 'md5:e8fbb8de57011a3255db0beca69af73d', 'description': 'md5:5e15a789a2fe4d67da8d1366996e89ef', 'location': 'Napoli', 'duration': 2852.0, 'timestamp': 1459987200, 'upload_date': '20160407', 'thumbnail': 'https://www.radioradicale.it/photo400/0/0/9/0/1/00901768.jpg', }, }, { 'url': 'https://www.radioradicale.it/scheda/742783/parlamento-riunito-in-seduta-comune-11a-della-xix-legislatura', 'info_dict': { 'id': '742783', 'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)', 'description': '-) Votazione per l\'elezione di un giudice della Corte Costituzionale (nono scrutinio)', 'location': 'CAMERA', 'duration': 5868.0, 'timestamp': 1730246400, 'upload_date': '20241030', }, 'playlist': [{ 'md5': 'aa48de55dcc45478e4cd200f299aab7d', 'info_dict': { 'id': '742783-0', 'ext': 'mp4', 'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)', }, }, { 'md5': 'be915c189c70ad2920e5810f32260ff5', 'info_dict': { 'id': '742783-1', 'ext': 'mp4', 'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)', }, }, { 'md5': 'f0ee4047342baf8ed3128a8417ac5e0a', 'info_dict': { 'id': '742783-2', 'ext': 'mp4', 'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)', }, }], }] def _entries(self, videos_info, page_id): for idx, video in enumerate(traverse_obj( videos_info, ('playlist', lambda _, v: v['sources']))): video_id = f'{page_id}-{idx}' formats = [] subtitles = {} for m3u8_url in traverse_obj(video, ('sources', ..., 'src', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) for sub in traverse_obj(video, ('subtitles', ..., lambda _, v: url_or_none(v['src']))): self._merge_subtitles({sub.get('srclang') or 'und': [{ 'url': sub['src'], 'name': sub.get('label'), }]}, target=subtitles) yield { 'id': video_id, 'title': video.get('title'), 'formats': formats, 'subtitles': subtitles, } def _real_extract(self, url): page_id = self._match_id(url) webpage = self._download_webpage(url, page_id) videos_info = self._search_json( r'jQuery\.extend\(Drupal\.settings\s*,', webpage, 'videos_info', page_id)['RRscheda'] entries = list(self._entries(videos_info, page_id)) common_info = { 'id': page_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), 'location': videos_info.get('luogo'), **self._search_json_ld(webpage, page_id), } if len(entries) == 1: return { **entries[0], **common_info, } return self.playlist_result(entries, multi_video=True, **common_info)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/netapp.py
yt_dlp/extractor/netapp.py
from .brightcove import BrightcoveNewIE from .common import InfoExtractor from ..utils import parse_iso8601 from ..utils.traversal import require, traverse_obj class NetAppBaseIE(InfoExtractor): _BC_URL = 'https://players.brightcove.net/6255154784001/default_default/index.html?videoId={}' @staticmethod def _parse_metadata(item): return traverse_obj(item, { 'title': ('name', {str}), 'description': ('description', {str}), 'timestamp': ('createdAt', {parse_iso8601}), }) class NetAppVideoIE(NetAppBaseIE): _VALID_URL = r'https?://media\.netapp\.com/video-detail/(?P<id>[0-9a-f-]+)' _TESTS = [{ 'url': 'https://media.netapp.com/video-detail/da25fc01-82ad-5284-95bc-26920200a222/seamless-storage-for-modern-kubernetes-deployments', 'info_dict': { 'id': '1843620950167202073', 'ext': 'mp4', 'title': 'Seamless storage for modern Kubernetes deployments', 'description': 'md5:1ee39e315243fe71fb90af2796037248', 'uploader_id': '6255154784001', 'duration': 2159.41, 'thumbnail': r're:https://house-fastly-signed-us-east-1-prod\.brightcovecdn\.com/image/.*\.jpg', 'tags': 'count:15', 'timestamp': 1758213949, 'upload_date': '20250918', }, }, { 'url': 'https://media.netapp.com/video-detail/45593e5d-cf1c-5996-978c-c9081906e69f/unleash-ai-innovation-with-your-data-with-the-netapp-platform', 'only_matching': True, }] def _real_extract(self, url): video_uuid = self._match_id(url) metadata = self._download_json( f'https://api.media.netapp.com/client/detail/{video_uuid}', video_uuid) brightcove_video_id = traverse_obj(metadata, ( 'sections', lambda _, v: v['type'] == 'Player', 'video', {str}, any, {require('brightcove video id')})) video_item = traverse_obj(metadata, ('sections', lambda _, v: v['type'] == 'VideoDetail', any)) return self.url_result( self._BC_URL.format(brightcove_video_id), BrightcoveNewIE, brightcove_video_id, url_transparent=True, **self._parse_metadata(video_item)) class NetAppCollectionIE(NetAppBaseIE): _VALID_URL = r'https?://media\.netapp\.com/collection/(?P<id>[0-9a-f-]+)' _TESTS = [{ 'url': 'https://media.netapp.com/collection/9820e190-f2a6-47ac-9c0a-98e5e64234a4', 'info_dict': { 'title': 'Featured sessions', 'id': '9820e190-f2a6-47ac-9c0a-98e5e64234a4', }, 'playlist_count': 4, }] def _entries(self, metadata): for item in traverse_obj(metadata, ('items', lambda _, v: v['brightcoveVideoId'])): brightcove_video_id = item['brightcoveVideoId'] yield self.url_result( self._BC_URL.format(brightcove_video_id), BrightcoveNewIE, brightcove_video_id, url_transparent=True, **self._parse_metadata(item)) def _real_extract(self, url): collection_uuid = self._match_id(url) metadata = self._download_json( f'https://api.media.netapp.com/client/collection/{collection_uuid}', collection_uuid) return self.playlist_result(self._entries(metadata), collection_uuid, playlist_title=metadata.get('name'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/c56.py
yt_dlp/extractor/c56.py
from .common import InfoExtractor from ..utils import js_to_json class C56IE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|player)\.)?56\.com/(?:.+?/)?(?:v_|(?:play_album.+-))(?P<textid>.+?)\.(?:html|swf)' IE_NAME = '56.com' _TESTS = [{ 'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html', 'md5': 'e59995ac63d0457783ea05f93f12a866', 'info_dict': { 'id': '93440716', 'ext': 'flv', 'title': '网事知多少 第32期:车怒', 'duration': 283.813, }, }, { 'url': 'http://www.56.com/u47/v_MTM5NjQ5ODc2.html', 'md5': '', 'info_dict': { 'id': '82247482', 'title': '爱的诅咒之杜鹃花开', }, 'playlist_count': 7, 'add_ie': ['Sohu'], }] def _real_extract(self, url): mobj = self._match_valid_url(url) text_id = mobj.group('textid') webpage = self._download_webpage(url, text_id) sohu_video_info_str = self._search_regex( r'var\s+sohuVideoInfo\s*=\s*({[^}]+});', webpage, 'Sohu video info', default=None) if sohu_video_info_str: sohu_video_info = self._parse_json( sohu_video_info_str, text_id, transform_source=js_to_json) return self.url_result(sohu_video_info['url'], 'Sohu') page = self._download_json( f'http://vxml.56.com/json/{text_id}/', text_id, 'Downloading video info') info = page['info'] formats = [ { 'format_id': f['type'], 'filesize': int(f['filesize']), 'url': f['url'], } for f in info['rfiles'] ] return { 'id': info['vid'], 'title': info['Subject'], 'duration': int(info['duration']) / 1000.0, 'formats': formats, 'thumbnail': info.get('bimg') or info.get('img'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nhk.py
yt_dlp/extractor/nhk.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, filter_dict, get_element_by_class, int_or_none, join_nonempty, make_archive_id, orderedSet, parse_duration, remove_end, traverse_obj, try_call, unescapeHTML, unified_timestamp, url_or_none, urljoin, variadic, ) class NhkBaseIE(InfoExtractor): _API_URL_TEMPLATE = 'https://api.nhkworld.jp/showsapi/v1/{lang}/{content_format}_{page_type}/{m_id}{extra_page}' _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/' def _call_api(self, m_id, lang, is_video, is_episode, is_clip): content_format = 'video' if is_video else 'audio' content_type = 'clips' if is_clip else 'episodes' if not is_episode: extra_page = f'/{content_format}_{content_type}' page_type = 'programs' else: extra_page = '' page_type = content_type return self._download_json( self._API_URL_TEMPLATE.format( lang=lang, content_format=content_format, page_type=page_type, m_id=m_id, extra_page=extra_page), join_nonempty(m_id, lang)) def _extract_episode_info(self, url, episode=None): fetch_episode = episode is None lang, m_type, episode_id = NhkVodIE._match_valid_url(url).group('lang', 'type', 'id') is_video = m_type != 'audio' if fetch_episode: episode = self._call_api( episode_id, lang, is_video, is_episode=True, is_clip=episode_id[:4] == '9999') video_id = join_nonempty('id', 'lang', from_dict=episode) title = episode.get('title') series = traverse_obj(episode, (('video_program', 'audio_program'), any, 'title')) episode_name = title if series and title: title = f'{series} - {title}' elif series and not title: title = series series = None episode_name = None else: # title, no series episode_name = None info = { 'id': video_id, 'title': title, 'series': series, 'episode': episode_name, **traverse_obj(episode, { 'description': ('description', {str}), 'release_timestamp': ('first_broadcasted_at', {unified_timestamp}), 'categories': ('categories', ..., 'name', {str}), 'tags': ('tags', ..., 'name', {str}), 'thumbnails': ('images', lambda _, v: v['url'], { 'url': ('url', {urljoin(url)}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), }), 'webpage_url': ('url', {urljoin(url)}), }), 'extractor_key': NhkVodIE.ie_key(), 'extractor': NhkVodIE.IE_NAME, } # XXX: We are assuming that 'video' and 'audio' are mutually exclusive stream_info = traverse_obj(episode, (('video', 'audio'), {dict}, any)) or {} if not stream_info.get('url'): self.raise_no_formats('Stream not found; it has most likely expired', expected=True) else: stream_url = stream_info['url'] if is_video: formats, subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, video_id) info.update({ 'formats': formats, 'subtitles': subtitles, **traverse_obj(stream_info, ({ 'duration': ('duration', {int_or_none}), 'timestamp': ('published_at', {unified_timestamp}), })), }) else: # From https://www3.nhk.or.jp/nhkworld/common/player/radio/inline/rod.html audio_path = remove_end(stream_url, '.m4a') info['formats'] = self._extract_m3u8_formats( f'{urljoin("https://vod-stream.nhk.jp", audio_path)}/index.m3u8', episode_id, 'm4a', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in info['formats']: f['language'] = lang return info class NhkVodIE(NhkBaseIE): _VALID_URL = [ rf'{NhkBaseIE._BASE_URL_REGEX}shows/(?:(?P<type>video)/)?(?P<id>\d{{4}}[\da-z]\d+)/?(?:$|[?#])', rf'{NhkBaseIE._BASE_URL_REGEX}(?:ondemand|shows)/(?P<type>audio)/(?P<id>[^/?#]+?-\d{{8}}-[\da-z]+)', rf'{NhkBaseIE._BASE_URL_REGEX}ondemand/(?P<type>video)/(?P<id>\d{{4}}[\da-z]\d+)', # deprecated ] # Content available only for a limited period of time. Visit # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. _TESTS = [{ 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/2049165/', 'info_dict': { 'id': '2049165-en', 'ext': 'mp4', 'title': 'Japan Railway Journal - Choshi Electric Railway: Fighting to Get Back on Track', 'description': 'md5:ab57df2fca7f04245148c2e787bb203d', 'thumbnail': r're:https://.+/.+\.jpg', 'episode': 'Choshi Electric Railway: Fighting to Get Back on Track', 'series': 'Japan Railway Journal', 'duration': 1680, 'categories': ['Biz & Tech'], 'tags': ['Akita', 'Chiba', 'Trains', 'Transcript', 'All (Japan Navigator)'], 'timestamp': 1759055880, 'upload_date': '20250928', 'release_timestamp': 1758810600, 'release_date': '20250925', }, }, { # video clip 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/', 'md5': '153c3016dfd252ba09726588149cf0e7', 'info_dict': { 'id': '9999011-en', 'ext': 'mp4', 'title': 'Dining with the Chef - Chef Saito\'s Family recipe: MENCHI-KATSU', 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5', 'thumbnail': r're:https://.+/.+\.jpg', 'series': 'Dining with the Chef', 'episode': 'Chef Saito\'s Family recipe: MENCHI-KATSU', 'duration': 148, 'categories': ['Food'], 'tags': ['Washoku'], 'timestamp': 1548212400, 'upload_date': '20190123', }, }, { # radio 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/livinginjapan-20240901-1/', 'info_dict': { 'id': 'livinginjapan-20240901-1-en', 'ext': 'm4a', 'title': 'Living in Japan - Weekend Hiking / Self-protection from crime', 'series': 'Living in Japan', 'description': 'md5:4d0e14ab73bdbfedb60a53b093954ed6', 'thumbnail': r're:https://.+/.+\.jpg', 'episode': 'Weekend Hiking / Self-protection from crime', 'categories': ['Interactive'], }, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/', 'only_matching': True, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/', 'only_matching': True, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/', 'only_matching': True, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/', 'only_matching': True, }, { # video, alphabetic character in ID #29670 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/', 'info_dict': { 'id': 'qfjay6cg', 'ext': 'mp4', 'title': 'DESIGN TALKS plus - Fishermen’s Finery', 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448', 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$', 'upload_date': '20210615', 'timestamp': 1623722008, }, 'skip': '404 Not Found', }, { # japanese-language, longer id than english 'url': 'https://www3.nhk.or.jp/nhkworld/ja/ondemand/video/0020271111/', 'info_dict': { 'id': 'nw_ja_v_jvod_ohayou_20231008', 'ext': 'mp4', 'title': 'おはよう日本(7時台) - 10月8日放送', 'series': 'おはよう日本(7時台)', 'episode': '10月8日放送', 'thumbnail': r're:https://.+/.+\.jpg', 'description': 'md5:9c1d6cbeadb827b955b20e99ab920ff0', }, 'skip': 'expires 2023-10-15', }, { # a one-off (single-episode series). title from the api is just null 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/3026036/', 'info_dict': { 'id': '3026036-en', 'ext': 'mp4', 'title': 'STATELESS: The Japanese Left Behind in the Philippines', 'description': 'md5:9a2fd51cdfa9f52baae28569e0053786', 'duration': 2955, 'thumbnail': 'https://www3.nhk.or.jp/nhkworld/en/shows/3026036/images/wide_l_QPtWpt4lzVhm3NzPAMIIF35MCg4CdNwcikPaTS5Q.jpg', 'categories': ['Documentary', 'Culture & Lifestyle'], 'tags': ['Transcript', 'Documentary 360', 'The Pursuit of PEACE'], 'timestamp': 1758931800, 'upload_date': '20250927', 'release_timestamp': 1758931800, 'release_date': '20250927', }, }, { # /ondemand/video/ url with alphabetical character in 5th position of id 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a07/', 'info_dict': { 'id': '9999a07-en', 'ext': 'mp4', 'episode': 'Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]', 'series': 'Mini-Dramas on SDGs', 'title': 'Mini-Dramas on SDGs - Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]', 'description': 'md5:3f9dcb4db22fceb675d90448a040d3f6', 'timestamp': 1621911600, 'duration': 190, 'upload_date': '20210525', 'thumbnail': r're:https://.+/.+\.jpg', 'categories': ['Current Affairs', 'Entertainment'], }, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999d17/', 'info_dict': { 'id': '9999d17-en', 'ext': 'mp4', 'title': 'Flowers of snow blossom - The 72 Pentads of Yamato', 'description': 'Today’s focus: Snow', 'thumbnail': r're:https://.+/.+\.jpg', 'duration': 136, 'categories': ['Culture & Lifestyle', 'Science & Nature'], 'tags': ['Nara', 'Temples & Shrines', 'Winter', 'Snow'], 'timestamp': 1643339040, 'upload_date': '20220128', }, }, { # new /shows/audio/ url format 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/livinginjapan-20231001-1/', 'only_matching': True, }, { # valid url even if can't be found in wild; support needed for clip entries extraction 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/9999o80/', 'only_matching': True, }] def _real_extract(self, url): return self._extract_episode_info(url) class NhkVodProgramIE(NhkBaseIE): _VALID_URL = rf'''(?x) {NhkBaseIE._BASE_URL_REGEX}(?:shows|tv)/ (?:(?P<type>audio)/programs/)?(?P<id>\w+)/? (?:\?(?:[^#]+&)?type=(?P<episode_type>clip|(?:radio|tv)Episode))?''' _TESTS = [{ # video program episodes 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/sumo/', 'info_dict': { 'id': 'sumo', 'title': 'GRAND SUMO Highlights', 'description': 'md5:fc20d02dc6ce85e4b72e0273aa52fdbf', 'series': 'GRAND SUMO Highlights', }, 'playlist_mincount': 1, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/japanrailway/', 'info_dict': { 'id': 'japanrailway', 'title': 'Japan Railway Journal', 'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f', 'series': 'Japan Railway Journal', }, 'playlist_mincount': 12, }, { # video program clips 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/japanrailway/?type=clip', 'info_dict': { 'id': 'japanrailway', 'title': 'Japan Railway Journal', 'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f', 'series': 'Japan Railway Journal', }, 'playlist_mincount': 12, }, { # audio program 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/programs/livinginjapan/', 'info_dict': { 'id': 'livinginjapan', 'title': 'Living in Japan', 'description': 'md5:665bb36ec2a12c5a7f598ee713fc2b54', 'series': 'Living in Japan', }, 'playlist_mincount': 11, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/10yearshayaomiyazaki/', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if NhkVodIE.suitable(url) else super().suitable(url) def _extract_meta_from_class_elements(self, class_values, html): for class_value in class_values: if value := clean_html(get_element_by_class(class_value, html)): return value def _real_extract(self, url): lang, m_type, program_id, episode_type = self._match_valid_url(url).group('lang', 'type', 'id', 'episode_type') episodes = self._call_api( program_id, lang, m_type != 'audio', False, episode_type == 'clip') def entries(): for episode in traverse_obj(episodes, ('items', lambda _, v: v['url'])): yield self._extract_episode_info(urljoin(url, episode['url']), episode) html = self._download_webpage(url, program_id) program_title = self._extract_meta_from_class_elements([ 'p-programDetail__title', # /ondemand/program/ 'pProgramHero__logoText', # /shows/ 'tAudioProgramMain__title', # /shows/audio/programs/ 'p-program-name'], html) # /tv/ program_description = self._extract_meta_from_class_elements([ 'p-programDetail__text', # /ondemand/program/ 'pProgramHero__description', # /shows/ 'tAudioProgramMain__info', # /shows/audio/programs/ 'p-program-description'], html) # /tv/ return self.playlist_result(entries(), program_id, program_title, program_description, series=program_title) class NhkForSchoolBangumiIE(InfoExtractor): _VALID_URL = r'https?://www2\.nhk\.or\.jp/school/movie/(?P<type>bangumi|clip)\.cgi\?das_id=(?P<id>[a-zA-Z0-9_-]+)' _TESTS = [{ 'url': 'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id=D0005150191_00000', 'info_dict': { 'id': 'D0005150191_00003', 'title': 'にている かな', 'duration': 599.999, 'timestamp': 1396414800, 'upload_date': '20140402', 'ext': 'mp4', 'chapters': 'count:12', }, 'params': { # m3u8 download 'skip_download': True, }, }] def _real_extract(self, url): program_type, video_id = self._match_valid_url(url).groups() webpage = self._download_webpage( f'https://www2.nhk.or.jp/school/movie/{program_type}.cgi?das_id={video_id}', video_id) # searches all variables base_values = {g.group(1): g.group(2) for g in re.finditer(r'var\s+([a-zA-Z_]+)\s*=\s*"([^"]+?)";', webpage)} # and programObj values too program_values = {g.group(1): g.group(3) for g in re.finditer(r'(?:program|clip)Obj\.([a-zA-Z_]+)\s*=\s*(["\'])([^"]+?)\2;', webpage)} # extract all chapters chapter_durations = [parse_duration(g.group(1)) for g in re.finditer(r'chapterTime\.push\(\'([0-9:]+?)\'\);', webpage)] chapter_titles = [' '.join([g.group(1) or '', unescapeHTML(g.group(2))]).strip() for g in re.finditer(r'<div class="cpTitle"><span>(scene\s*\d+)?</span>([^<]+?)</div>', webpage)] # this is how player_core.js is actually doing (!) version = base_values.get('r_version') or program_values.get('version') if version: video_id = f'{video_id.split("_")[0]}_{version}' formats = self._extract_m3u8_formats( f'https://nhks-vh.akamaihd.net/i/das/{video_id[0:8]}/{video_id}_V_000.f4v/master.m3u8', video_id, ext='mp4', m3u8_id='hls') duration = parse_duration(base_values.get('r_duration')) chapters = None if chapter_durations and chapter_titles and len(chapter_durations) == len(chapter_titles): start_time = chapter_durations end_time = [*chapter_durations[1:], duration] chapters = [{ 'start_time': s, 'end_time': e, 'title': t, } for s, e, t in zip(start_time, end_time, chapter_titles, strict=True)] return { 'id': video_id, 'title': program_values.get('name'), 'duration': parse_duration(base_values.get('r_duration')), 'timestamp': unified_timestamp(base_values['r_upload']), 'formats': formats, 'chapters': chapters, } class NhkForSchoolSubjectIE(InfoExtractor): IE_DESC = 'Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)' KNOWN_SUBJECTS = ( 'rika', 'syakai', 'kokugo', 'sansuu', 'seikatsu', 'doutoku', 'ongaku', 'taiiku', 'zukou', 'gijutsu', 'katei', 'sougou', 'eigo', 'tokkatsu', 'tokushi', 'sonota', ) _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>{})/?(?:[\?#].*)?$'.format( '|'.join(re.escape(s) for s in KNOWN_SUBJECTS)) _TESTS = [{ 'url': 'https://www.nhk.or.jp/school/sougou/', 'info_dict': { 'id': 'sougou', 'title': '総合的な学習の時間', }, 'playlist_mincount': 16, }, { 'url': 'https://www.nhk.or.jp/school/rika/', 'info_dict': { 'id': 'rika', 'title': '理科', }, 'playlist_mincount': 15, }] def _real_extract(self, url): subject_id = self._match_id(url) webpage = self._download_webpage(url, subject_id) return self.playlist_from_matches( re.finditer(rf'href="((?:https?://www\.nhk\.or\.jp)?/school/{re.escape(subject_id)}/[^/]+/)"', webpage), subject_id, self._html_search_regex(r'(?s)<span\s+class="subjectName">\s*<img\s*[^<]+>\s*([^<]+?)</span>', webpage, 'title', fatal=False), lambda g: urljoin(url, g.group(1))) class NhkForSchoolProgramListIE(InfoExtractor): _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>(?:{})/[a-zA-Z0-9_-]+)'.format( '|'.join(re.escape(s) for s in NhkForSchoolSubjectIE.KNOWN_SUBJECTS)) _TESTS = [{ 'url': 'https://www.nhk.or.jp/school/sougou/q/', 'info_dict': { 'id': 'sougou/q', 'title': 'Q~こどものための哲学', }, 'playlist_mincount': 20, }] def _real_extract(self, url): program_id = self._match_id(url) webpage = self._download_webpage(f'https://www.nhk.or.jp/school/{program_id}/', program_id) title = (self._generic_title('', webpage) or self._html_search_regex(r'<h3>([^<]+?)とは?\s*</h3>', webpage, 'title', fatal=False)) title = re.sub(r'\s*\|\s*NHK\s+for\s+School\s*$', '', title) if title else None description = self._html_search_regex( r'(?s)<div\s+class="programDetail\s*">\s*<p>[^<]+</p>', webpage, 'description', fatal=False, group=0) bangumi_list = self._download_json( f'https://www.nhk.or.jp/school/{program_id}/meta/program.json', program_id) # they're always bangumi bangumis = [ self.url_result(f'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id={x}') for x in traverse_obj(bangumi_list, ('part', ..., 'part-video-dasid')) or []] return self.playlist_result(bangumis, program_id, title, description) class NhkRadiruIE(InfoExtractor): _GEO_COUNTRIES = ['JP'] IE_DESC = 'NHK らじる (Radiru/Rajiru)' _VALID_URL = r'https?://www\.nhk\.or\.jp/radio/(?:player/ondemand|ondemand/detail)\.html\?p=(?P<site>[\da-zA-Z]+)_(?P<corner>[\da-zA-Z]+)(?:_(?P<headline>[\da-zA-Z]+))?' _TESTS = [{ 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=LG96ZW5KZ4_01_4251382', 'skip': 'Episode expires on 2025-07-14', 'info_dict': { 'title': 'クラシックの庭\u3000特集「ドボルザークを聴く」(1)交響曲を中心に', 'id': 'LG96ZW5KZ4_01_4251382', 'ext': 'm4a', 'description': 'md5:652d3c38a25b77959c716421eba1617a', 'uploader': 'NHK FM・東京', 'channel': 'NHK FM・東京', 'duration': 6597.0, 'thumbnail': 'https://www.nhk.jp/static/assets/images/radioseries/rs/LG96ZW5KZ4/LG96ZW5KZ4-eyecatch_a67c6e949325016c0724f2ed3eec8a2f.jpg', 'categories': ['音楽', 'クラシック・オペラ'], 'cast': ['田添菜穂子'], 'series': 'クラシックの庭', 'series_id': 'LG96ZW5KZ4', 'episode': '特集「ドボルザークを聴く」(1)交響曲を中心に', 'episode_id': 'QP1Q2ZXZY3', 'timestamp': 1751871000, 'upload_date': '20250707', 'release_timestamp': 1751864403, 'release_date': '20250707', }, }, { # playlist, airs every weekday so it should _hopefully_ be okay forever 'url': 'https://www.nhk.or.jp/radio/ondemand/detail.html?p=Z9L1V2M24L_01', 'info_dict': { 'id': 'Z9L1V2M24L_01', 'title': 'ベストオブクラシック', 'description': '世界中の上質な演奏会をじっくり堪能する本格派クラシック番組。', 'thumbnail': 'https://www.nhk.jp/static/assets/images/radioseries/rs/Z9L1V2M24L/Z9L1V2M24L-eyecatch_83ed28b4782907998875965fee60a351.jpg', 'series_id': 'Z9L1V2M24L_01', 'uploader': 'NHK FM', 'channel': 'NHK FM', 'series': 'ベストオブクラシック', }, 'playlist_mincount': 3, }, { # news 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=18439M2W42_02_4251212', 'skip': 'Expires on 2025-07-15', 'info_dict': { 'id': '18439M2W42_02_4251212', 'ext': 'm4a', 'title': 'マイあさ! 午前5時のNHKニュース 2025年7月8日', 'uploader': 'NHKラジオ第1', 'channel': 'NHKラジオ第1', 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/18439M2W42/img/series_945_thumbnail.jpg', 'series': 'NHKラジオニュース', 'timestamp': 1751919420, 'upload_date': '20250707', 'release_timestamp': 1751918400, 'release_date': '20250707', }, }, { # fallback when extended metadata fails 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=J8792PY43V_20_4253945', 'skip': 'Expires on 2025-09-01', 'info_dict': { 'id': 'J8792PY43V_20_4253945', 'ext': 'm4a', 'title': '「後絶たない筋肉増強剤の使用」ワールドリポート', 'description': '大濱 敦(ソウル支局)', 'uploader': 'NHK R1', 'channel': 'NHK R1', 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/J8792PY43V/img/corner/box_31_thumbnail.jpg', 'series': 'マイあさ! ワールドリポート', 'series_id': 'J8792PY43V_20', 'timestamp': 1751837100, 'upload_date': '20250706', 'release_timestamp': 1751835600, 'release_date': '20250706', }, 'expected_warnings': ['Failed to download extended metadata: HTTP Error 404: Not Found'], }] _API_URL_TMPL = None # The `_format_*` and `_make_*` functions are ported from: https://www.nhk.or.jp/radio/assets/js/timetable_detail_new.js def _format_act_list(self, act_list): role_groups = {} for act in traverse_obj(act_list, (..., {dict})): role = act.get('role') if role not in role_groups: role_groups[role] = [] role_groups[role].append(act) formatted_roles = [] for role, acts in role_groups.items(): for i, act in enumerate(acts): res = f'【{role}】' if i == 0 and role is not None else '' if title := act.get('title'): res += f'{title}…' formatted_roles.append(join_nonempty(res, act.get('name'), delim='')) return join_nonempty(*formatted_roles, delim=',') def _make_artists(self, track, key): artists = [] for artist in traverse_obj(track, (key, ..., {dict})): if res := join_nonempty(*traverse_obj(artist, (( ('role', filter, {'{}…'.format}), ('part', filter, {'({})'.format}), ('name', filter), ), {str})), delim=''): artists.append(res) return '、'.join(artists) or None def _make_duration(self, track, key): d = traverse_obj(track, (key, {parse_duration})) if d is None: return None hours, remainder = divmod(d, 3600) minutes, seconds = divmod(remainder, 60) res = '(' if hours > 0: res += f'{int(hours)}時間' if minutes > 0: res += f'{int(minutes)}分' res += f'{int(seconds):02}秒)' return res def _format_music_list(self, music_list): tracks = [] for track in traverse_obj(music_list, (..., {dict})): track_details = traverse_obj(track, (( ('name', filter, {'「{}」'.format}), ('lyricist', filter, {'{}:作詞'.format}), ('composer', filter, {'{}:作曲'.format}), ('arranger', filter, {'{}:編曲'.format}), ), {str})) track_details.append(self._make_artists(track, 'byArtist')) track_details.append(self._make_duration(track, 'duration')) if label := join_nonempty('label', 'code', delim=' ', from_dict=track): track_details.append(f'<{label}>') if location := traverse_obj(track, ('location', {str})): track_details.append(f'~{location}~') tracks.append(join_nonempty(*track_details, delim='\n')) return '\n\n'.join(tracks) def _format_description(self, response): detailed_description = traverse_obj(response, ('detailedDescription', {dict})) or {} return join_nonempty( join_nonempty('epg80', 'epg200', delim='\n\n', from_dict=detailed_description), traverse_obj(response, ('misc', 'actList', {self._format_act_list})), traverse_obj(response, ('misc', 'musicList', {self._format_music_list})), delim='\n\n') def _get_thumbnails(self, data, keys, name=None, preference=-1): thumbnails = [] for size, thumb in traverse_obj(data, ( *variadic(keys, (str, bytes, dict, set)), {dict.items}, lambda _, v: v[0] != 'copyright' and url_or_none(v[1]['url']), )): thumbnails.append({ 'url': thumb['url'], 'width': int_or_none(thumb.get('width')), 'height': int_or_none(thumb.get('height')), 'preference': preference, 'id': join_nonempty(name, size), }) preference -= 1 return thumbnails def _extract_extended_metadata(self, episode_id, aa_vinfo): service, _, area = traverse_obj(aa_vinfo, (2, {str}, {lambda x: (x or '').partition(',')})) date_id = aa_vinfo[3] detail_url = try_call( lambda: self._API_URL_TMPL.format(broadcastEventId=join_nonempty(service, area, date_id))) if not detail_url: return {} response = self._download_json( detail_url, episode_id, 'Downloading extended metadata', 'Failed to download extended metadata', fatal=False, expected_status=400) if not response: return {} if error := traverse_obj(response, ('error', {dict})): self.report_warning( 'Failed to get extended metadata. API returned ' f'Error {join_nonempty("statuscode", "message", from_dict=error, delim=": ")}') return {} station = traverse_obj(response, ('publishedOn', 'broadcastDisplayName', {str})) thumbnails = [] thumbnails.extend(self._get_thumbnails(response, ('about', 'eyecatch'))) for num, dct in enumerate(traverse_obj(response, ('about', 'eyecatchList', ...))): thumbnails.extend(self._get_thumbnails(dct, None, join_nonempty('list', num), -2)) thumbnails.extend( self._get_thumbnails(response, ('about', 'partOfSeries', 'eyecatch'), 'series', -3)) return filter_dict({ 'description': self._format_description(response), 'cast': traverse_obj(response, ('misc', 'actList', ..., 'name', {str})), 'thumbnails': thumbnails, **traverse_obj(response, { 'title': ('name', {str}), 'timestamp': ('endDate', {unified_timestamp}), 'release_timestamp': ('startDate', {unified_timestamp}), 'duration': ('duration', {parse_duration}), }), **traverse_obj(response, ('identifierGroup', { 'series': ('radioSeriesName', {str}), 'series_id': ('radioSeriesId', {str}), 'episode': ('radioEpisodeName', {str}), 'episode_id': ('radioEpisodeId', {str}), 'categories': ('genre', ..., ['name1', 'name2'], {str}, all, {orderedSet}), })), 'channel': station, 'uploader': station, }) def _extract_episode_info(self, episode, programme_id, series_meta): episode_id = f'{programme_id}_{episode["id"]}' aa_vinfo = traverse_obj(episode, ('aa_contents_id', {lambda x: x.split(';')})) extended_metadata = self._extract_extended_metadata(episode_id, aa_vinfo) fallback_start_time, _, fallback_end_time = traverse_obj( aa_vinfo, (4, {str}, {lambda x: (x or '').partition('_')})) return { **series_meta, 'id': episode_id, 'formats': self._extract_m3u8_formats(episode.get('stream_url'), episode_id, fatal=False), 'container': 'm4a_dash', # force fixup, AAC-only HLS 'was_live': True, 'title': episode.get('program_title'), 'description': episode.get('program_sub_title'), # fallback 'timestamp': unified_timestamp(fallback_end_time), 'release_timestamp': unified_timestamp(fallback_start_time), **extended_metadata, } def _extract_news_info(self, headline, programme_id, series_meta): episode_id = f'{programme_id}_{headline["headline_id"]}' episode = traverse_obj(headline, ('file_list', 0, {dict})) return { **series_meta, 'id': episode_id, 'formats': self._extract_m3u8_formats(episode.get('file_name'), episode_id, fatal=False), 'container': 'm4a_dash', # force fixup, AAC-only HLS 'was_live': True, 'series': series_meta.get('title'), 'thumbnail': url_or_none(headline.get('headline_image')) or series_meta.get('thumbnail'), **traverse_obj(episode, { 'title': ('file_title', {str}), 'description': ('file_title_sub', {str}), 'timestamp': ('open_time', {unified_timestamp}), 'release_timestamp': ('aa_vinfo4', {lambda x: x.split('_')[0]}, {unified_timestamp}), }), } def _real_initialize(self):
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kicker.py
yt_dlp/extractor/kicker.py
from .common import InfoExtractor from .dailymotion import DailymotionIE class KickerIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)kicker\.(?:de)/(?P<id>[\w-]+)/video' _TESTS = [{ 'url': 'https://www.kicker.de/pogba-dembel-co-die-top-11-der-abloesefreien-spieler-905049/video', 'info_dict': { 'id': 'km04mrK0DrRAVxy2GcA', 'title': 'md5:b91d145bac5745ac58d5479d8347a875', 'ext': 'mp4', 'duration': 350, 'description': 'md5:a5a3dd77dbb6550dbfb997be100b9998', 'uploader_id': 'x2dfupo', 'timestamp': 1654677626, 'like_count': int, 'uploader': 'kicker.de', 'view_count': int, 'age_limit': 0, 'thumbnail': r're:https://s\d+\.dmcdn\.net/v/T-x741YeYAx8aSZ0Z/x1080', 'tags': ['published', 'category.InternationalSoccer'], 'upload_date': '20220608', }, }, { 'url': 'https://www.kicker.de/ex-unioner-in-der-bezirksliga-felix-kroos-vereinschallenge-in-pankow-902825/video', 'info_dict': { 'id': 'k2omNsJKdZ3TxwxYSFJ', 'title': 'md5:72ec24d7f84b8436fe1e89d198152adf', 'ext': 'mp4', 'uploader_id': 'x2dfupo', 'duration': 331, 'timestamp': 1652966015, 'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/TxU4Z1YYCmtisTbMq/x1080', 'tags': ['FELIX KROOS', 'EINFACH MAL LUPPEN', 'KROOS', 'FSV FORTUNA PANKOW', 'published', 'category.Amateurs', 'marketingpreset.Spreekick'], 'age_limit': 0, 'view_count': int, 'upload_date': '20220519', 'uploader': 'kicker.de', 'description': 'md5:0c2060c899a91c8bf40f578f78c5846f', 'like_count': int, }, }] def _real_extract(self, url): video_slug = self._match_id(url) webpage = self._download_webpage(url, video_slug) dailymotion_video_id = self._search_regex( r'data-dmprivateid\s*=\s*[\'"](?P<video_id>\w+)', webpage, 'video id', group='video_id') return self.url_result( f'https://www.dailymotion.com/video/{dailymotion_video_id}', ie=DailymotionIE, video_title=self._html_extract_title(webpage))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/r7.py
yt_dlp/extractor/r7.py
from .common import InfoExtractor from ..utils import int_or_none class R7IE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE _VALID_URL = r'''(?x) https?:// (?: (?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/| noticias\.r7\.com(?:/[^/]+)+/[^/]+-| player\.r7\.com/video/i/ ) (?P<id>[\da-f]{24}) ''' _TESTS = [{ 'url': 'http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html', 'md5': '403c4e393617e8e8ddc748978ee8efde', 'info_dict': { 'id': '54e7050b0cf2ff57e0279389', 'ext': 'mp4', 'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"', 'description': 'md5:01812008664be76a6479aa58ec865b72', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 98, 'like_count': int, 'view_count': int, }, }, { 'url': 'http://esportes.r7.com/videos/cigano-manda-recado-aos-fas/idmedia/4e176727b51a048ee6646a1b.html', 'only_matching': True, }, { 'url': 'http://noticias.r7.com/record-news/video/representante-do-instituto-sou-da-paz-fala-sobre-fim-do-estatuto-do-desarmamento-5480fc580cf2285b117f438d/', 'only_matching': True, }, { 'url': 'http://player.r7.com/video/i/54e7050b0cf2ff57e0279389?play=true&video=http://vsh.r7.com/54e7050b0cf2ff57e0279389/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-ATOS_copy.mp4&linkCallback=http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html&thumbnail=http://vtb.r7.com/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-thumb.jpg&idCategory=192&share=true&layout=full&full=true', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video = self._download_json( f'http://player-api.r7.com/video/i/{video_id}', video_id) title = video['title'] formats = [] media_url_hls = video.get('media_url_hls') if media_url_hls: formats.extend(self._extract_m3u8_formats( media_url_hls, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) media_url = video.get('media_url') if media_url: f = { 'url': media_url, 'format_id': 'http', } # m3u8 format always matches the http format, let's copy metadata from # one to another m3u8_formats = list(filter( lambda f: f.get('vcodec') != 'none', formats)) if len(m3u8_formats) == 1: f_copy = m3u8_formats[0].copy() f_copy.update(f) f_copy['protocol'] = 'http' f = f_copy formats.append(f) description = video.get('description') thumbnail = video.get('thumb') duration = int_or_none(video.get('media_duration')) like_count = int_or_none(video.get('likes')) view_count = int_or_none(video.get('views')) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'like_count': like_count, 'view_count': view_count, 'formats': formats, } class R7ArticleIE(InfoExtractor): _WORKING = False _ENABLED = None # XXX: pass through to GenericIE _VALID_URL = r'https?://(?:[a-zA-Z]+)\.r7\.com/(?:[^/]+/)+[^/?#&]+-(?P<id>\d+)' _TEST = { 'url': 'http://tv.r7.com/record-play/balanco-geral/videos/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-16102015', 'only_matching': True, } @classmethod def suitable(cls, url): return False if R7IE.suitable(url) else super().suitable(url) def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'<div[^>]+(?:id=["\']player-|class=["\']embed["\'][^>]+id=["\'])([\da-f]{24})', webpage, 'video id') return self.url_result(f'http://player.r7.com/video/i/{video_id}', R7IE.ie_key())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jove.py
yt_dlp/extractor/jove.py
from .common import InfoExtractor from ..utils import ExtractorError, unified_strdate class JoveIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?jove\.com/video/(?P<id>[0-9]+)' _CHAPTERS_URL = 'http://www.jove.com/video-chapters?videoid={video_id:}' _TESTS = [ { 'url': 'http://www.jove.com/video/2744/electrode-positioning-montage-transcranial-direct-current', 'md5': '93723888d82dbd6ba8b3d7d0cd65dd2b', 'info_dict': { 'id': '2744', 'ext': 'mp4', 'title': 'Electrode Positioning and Montage in Transcranial Direct Current Stimulation', 'description': 'md5:015dd4509649c0908bc27f049e0262c6', 'thumbnail': r're:^https?://.*\.png$', 'upload_date': '20110523', }, }, { 'url': 'http://www.jove.com/video/51796/culturing-caenorhabditis-elegans-axenic-liquid-media-creation', 'md5': '914aeb356f416811d911996434811beb', 'info_dict': { 'id': '51796', 'ext': 'mp4', 'title': 'Culturing Caenorhabditis elegans in Axenic Liquid Media and Creation of Transgenic Worms by Microparticle Bombardment', 'description': 'md5:35ff029261900583970c4023b70f1dc9', 'thumbnail': r're:^https?://.*\.png$', 'upload_date': '20140802', }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) chapters_id = self._html_search_regex( r'/video-chapters\?videoid=([0-9]+)', webpage, 'chapters id') chapters_xml = self._download_xml( self._CHAPTERS_URL.format(video_id=chapters_id), video_id, note='Downloading chapters XML', errnote='Failed to download chapters XML') video_url = chapters_xml.attrib.get('video') if not video_url: raise ExtractorError('Failed to get the video URL') title = self._html_search_meta('citation_title', webpage, 'title') thumbnail = self._og_search_thumbnail(webpage) description = self._html_search_regex( r'<div id="section_body_summary"><p class="jove_content">(.+?)</p>', webpage, 'description', fatal=False) publish_date = unified_strdate(self._html_search_meta( 'citation_publication_date', webpage, 'publish date', fatal=False)) comment_count = int(self._html_search_regex( r'<meta name="num_comments" content="(\d+) Comments?"', webpage, 'comment count', fatal=False)) return { 'id': video_id, 'title': title, 'url': video_url, 'thumbnail': thumbnail, 'description': description, 'upload_date': publish_date, 'comment_count': comment_count, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/noz.py
yt_dlp/extractor/noz.py
import urllib.parse from .common import InfoExtractor from ..utils import ( find_xpath_attr, int_or_none, update_url_query, xpath_text, ) class NozIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?noz\.de/video/(?P<id>[0-9]+)/' _TESTS = [{ 'url': 'http://www.noz.de/video/25151/32-Deutschland-gewinnt-Badminton-Lnderspiel-in-Melle', 'info_dict': { 'id': '25151', 'ext': 'mp4', 'duration': 215, 'title': '3:2 - Deutschland gewinnt Badminton-Länderspiel in Melle', 'description': 'Vor rund 370 Zuschauern gewinnt die deutsche Badminton-Nationalmannschaft am Donnerstag ein EM-Vorbereitungsspiel gegen Frankreich in Melle. Video Moritz Frankenberg.', 'thumbnail': r're:^http://.*\.jpg', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) description = self._og_search_description(webpage) edge_url = self._html_search_regex( r'<script\s+(?:type="text/javascript"\s+)?src="(.*?/videojs_.*?)"', webpage, 'edge URL') edge_content = self._download_webpage(edge_url, 'meta configuration') config_url_encoded = self._search_regex( r'so\.addVariable\("config_url","[^,]*,(.*?)"', edge_content, 'config URL', ) config_url = urllib.parse.unquote(config_url_encoded) doc = self._download_xml(config_url, 'video configuration') title = xpath_text(doc, './/title') thumbnail = xpath_text(doc, './/article/thumbnail/url') duration = int_or_none(xpath_text( doc, './/article/movie/file/duration')) formats = [] for qnode in doc.findall('.//article/movie/file/qualities/qual'): http_url_ele = find_xpath_attr( qnode, './html_urls/video_url', 'format', 'video/mp4') http_url = http_url_ele.text if http_url_ele is not None else None if http_url: formats.append({ 'url': http_url, 'format_name': xpath_text(qnode, './name'), 'format_id': '{}-{}'.format('http', xpath_text(qnode, './id')), 'height': int_or_none(xpath_text(qnode, './height')), 'width': int_or_none(xpath_text(qnode, './width')), 'tbr': int_or_none(xpath_text(qnode, './bitrate'), scale=1000), }) else: f4m_url = xpath_text(qnode, 'url_hd2') if f4m_url: formats.extend(self._extract_f4m_formats( update_url_query(f4m_url, {'hdcore': '3.4.0'}), video_id, f4m_id='hds', fatal=False)) m3u8_url_ele = find_xpath_attr( qnode, './html_urls/video_url', 'format', 'application/vnd.apple.mpegurl') m3u8_url = m3u8_url_ele.text if m3u8_url_ele is not None else None if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) return { 'id': video_id, 'formats': formats, 'title': title, 'duration': duration, 'description': description, 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/audius.py
yt_dlp/extractor/audius.py
import random import urllib.parse from .common import InfoExtractor from ..utils import ExtractorError, str_or_none, try_get class AudiusBaseIE(InfoExtractor): _API_BASE = None _API_V = '/v1' def _get_response_data(self, response): if isinstance(response, dict): response_data = response.get('data') if response_data is not None: return response_data if len(response) == 1 and 'message' in response: raise ExtractorError('API error: {}'.format(response['message']), expected=True) raise ExtractorError('Unexpected API response') def _select_api_base(self): """Selecting one of the currently available API hosts""" response = super()._download_json( 'https://api.audius.co/', None, note='Requesting available API hosts', errnote='Unable to request available API hosts') hosts = self._get_response_data(response) if isinstance(hosts, list): self._API_BASE = random.choice(hosts) return raise ExtractorError('Unable to get available API hosts') @staticmethod def _prepare_url(url, title): """ Audius removes forward slashes from the uri, but leaves backslashes. The problem is that the current version of Chrome replaces backslashes in the address bar with a forward slashes, so if you copy the link from there and paste it into youtube-dl, you won't be able to download anything from this link, since the Audius API won't be able to resolve this url """ url = urllib.parse.unquote(url) title = urllib.parse.unquote(title) if '/' in title or '%2F' in title: fixed_title = title.replace('/', '%5C').replace('%2F', '%5C') return url.replace(title, fixed_title) return url def _api_request(self, path, item_id=None, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', expected_status=None): if self._API_BASE is None: self._select_api_base() try: response = super()._download_json( f'{self._API_BASE}{self._API_V}{path}', item_id, note=note, errnote=errnote, expected_status=expected_status) except ExtractorError as exc: # some of Audius API hosts may not work as expected and return HTML if 'Failed to parse JSON' in str(exc): raise ExtractorError('An error occurred while receiving data. Try again', expected=True) raise exc return self._get_response_data(response) def _resolve_url(self, url, item_id): return self._api_request(f'/resolve?url={url}', item_id, expected_status=404) class AudiusIE(AudiusBaseIE): _VALID_URL = r'''(?x)https?://(?:www\.)?(?:audius\.co/(?P<uploader>[\w\d-]+)(?!/album|/playlist)/(?P<title>\S+))''' IE_DESC = 'Audius.co' _TESTS = [ { # URL from Chrome address bar which replace backslash to forward slash 'url': 'https://audius.co/test_acc/t%D0%B5%D0%B5%D0%B5est-1.%5E_%7B%7D/%22%3C%3E.%E2%84%96~%60-198631', 'md5': '92c35d3e754d5a0f17eef396b0d33582', 'info_dict': { 'id': 'xd8gY', 'title': '''Tеееest/ 1.!@#$%^&*()_+=[]{};'\\\":<>,.?/№~`''', 'ext': 'mp3', 'description': 'Description', 'duration': 30, 'track': '''Tеееest/ 1.!@#$%^&*()_+=[]{};'\\\":<>,.?/№~`''', 'artist': 'test', 'genre': 'Electronic', 'thumbnail': r're:https?://.*\.jpg', 'view_count': int, 'like_count': int, 'repost_count': int, }, }, { # Regular track 'url': 'https://audius.co/voltra/radar-103692', 'md5': '491898a0a8de39f20c5d6a8a80ab5132', 'info_dict': { 'id': 'KKdy2', 'title': 'RADAR', 'ext': 'mp3', 'duration': 318, 'track': 'RADAR', 'artist': 'voltra', 'genre': 'Trance', 'thumbnail': r're:https?://.*\.jpg', 'view_count': int, 'like_count': int, 'repost_count': int, }, }, ] _ARTWORK_MAP = { '150x150': 150, '480x480': 480, '1000x1000': 1000, } def _real_extract(self, url): mobj = self._match_valid_url(url) track_id = try_get(mobj, lambda x: x.group('track_id')) if track_id is None: title = mobj.group('title') # uploader = mobj.group('uploader') url = self._prepare_url(url, title) track_data = self._resolve_url(url, title) else: # API link title = None # uploader = None track_data = self._api_request(f'/tracks/{track_id}', track_id) if not isinstance(track_data, dict): raise ExtractorError('Unexpected API response') track_id = track_data.get('id') if track_id is None: raise ExtractorError('Unable to get ID of the track') artworks_data = track_data.get('artwork') thumbnails = [] if isinstance(artworks_data, dict): for quality_key, thumbnail_url in artworks_data.items(): thumbnail = { 'url': thumbnail_url, } quality_code = self._ARTWORK_MAP.get(quality_key) if quality_code is not None: thumbnail['preference'] = quality_code thumbnails.append(thumbnail) return { 'id': track_id, 'title': track_data.get('title', title), 'url': f'{self._API_BASE}/v1/tracks/{track_id}/stream', 'ext': 'mp3', 'description': track_data.get('description'), 'duration': track_data.get('duration'), 'track': track_data.get('title'), 'artist': try_get(track_data, lambda x: x['user']['name'], str), 'genre': track_data.get('genre'), 'thumbnails': thumbnails, 'view_count': track_data.get('play_count'), 'like_count': track_data.get('favorite_count'), 'repost_count': track_data.get('repost_count'), } class AudiusTrackIE(AudiusIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'''(?x)(?:audius:)(?:https?://(?:www\.)?.+/v1/tracks/)?(?P<track_id>\w+)''' IE_NAME = 'audius:track' IE_DESC = 'Audius track ID or API link. Prepend with "audius:"' _TESTS = [ { 'url': 'audius:9RWlo', 'only_matching': True, }, { 'url': 'audius:http://discoveryprovider.audius.prod-us-west-2.staked.cloud/v1/tracks/9RWlo', 'only_matching': True, }, ] class AudiusPlaylistIE(AudiusBaseIE): _VALID_URL = r'https?://(?:www\.)?audius\.co/(?P<uploader>[\w\d-]+)/(?:album|playlist)/(?P<title>\S+)' IE_NAME = 'audius:playlist' IE_DESC = 'Audius.co playlists' _TEST = { 'url': 'https://audius.co/test_acc/playlist/test-playlist-22910', 'info_dict': { 'id': 'DNvjN', 'title': 'test playlist', 'description': 'Test description\n\nlol', }, 'playlist_count': 175, } def _build_playlist(self, tracks): entries = [] for track in tracks: if not isinstance(track, dict): raise ExtractorError('Unexpected API response') track_id = str_or_none(track.get('id')) if not track_id: raise ExtractorError('Unable to get track ID from playlist') entries.append(self.url_result( f'audius:{track_id}', ie=AudiusTrackIE.ie_key(), video_id=track_id)) return entries def _real_extract(self, url): self._select_api_base() mobj = self._match_valid_url(url) title = mobj.group('title') # uploader = mobj.group('uploader') url = self._prepare_url(url, title) playlist_response = self._resolve_url(url, title) if not isinstance(playlist_response, list) or len(playlist_response) != 1: raise ExtractorError('Unexpected API response') playlist_data = playlist_response[0] if not isinstance(playlist_data, dict): raise ExtractorError('Unexpected API response') playlist_id = playlist_data.get('id') if playlist_id is None: raise ExtractorError('Unable to get playlist ID') playlist_tracks = self._api_request( f'/playlists/{playlist_id}/tracks', title, note='Downloading playlist tracks metadata', errnote='Unable to download playlist tracks metadata') if not isinstance(playlist_tracks, list): raise ExtractorError('Unexpected API response') entries = self._build_playlist(playlist_tracks) return self.playlist_result(entries, playlist_id, playlist_data.get('playlist_name', title), playlist_data.get('description')) class AudiusProfileIE(AudiusPlaylistIE): # XXX: Do not subclass from concrete IE IE_NAME = 'audius:artist' IE_DESC = 'Audius.co profile/artist pages' _VALID_URL = r'https?://(?:www)?audius\.co/(?P<id>[^\/]+)/?(?:[?#]|$)' _TEST = { 'url': 'https://audius.co/pzl/', 'info_dict': { 'id': 'ezRo7', 'description': 'TAMALE\n\nContact: officialpzl@gmail.com', 'title': 'pzl', }, 'playlist_count': 24, } def _real_extract(self, url): self._select_api_base() profile_id = self._match_id(url) try: _profile_data = self._api_request('/full/users/handle/' + profile_id, profile_id) except ExtractorError as e: raise ExtractorError('Could not download profile info; ' + str(e)) profile_audius_id = _profile_data[0]['id'] profile_bio = _profile_data[0].get('bio') api_call = self._api_request(f'/full/users/handle/{profile_id}/tracks', profile_id) return self.playlist_result(self._build_playlist(api_call), profile_audius_id, profile_id, profile_bio)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bunnycdn.py
yt_dlp/extractor/bunnycdn.py
import json from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, extract_attributes, int_or_none, parse_qs, smuggle_url, unsmuggle_url, url_or_none, urlhandle_detect_ext, ) from ..utils.traversal import find_element, traverse_obj class BunnyCdnIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:iframe|player)\.mediadelivery\.net|video\.bunnycdn\.com)/(?:embed|play)/(?P<library_id>\d+)/(?P<id>[\da-f-]+)' _EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL}[^\'"]*)[\'"]'] _TESTS = [{ 'url': 'https://iframe.mediadelivery.net/embed/113933/e73edec1-e381-4c8b-ae73-717a140e0924', 'info_dict': { 'id': 'e73edec1-e381-4c8b-ae73-717a140e0924', 'ext': 'mp4', 'title': 'mistress morgana (3).mp4', 'description': '', 'timestamp': 1693251673, 'thumbnail': r're:^https?://.*\.b-cdn\.net/e73edec1-e381-4c8b-ae73-717a140e0924/thumbnail\.jpg', 'duration': 7.0, 'upload_date': '20230828', }, 'params': {'skip_download': True}, }, { 'url': 'https://iframe.mediadelivery.net/play/136145/32e34c4b-0d72-437c-9abb-05e67657da34', 'info_dict': { 'id': '32e34c4b-0d72-437c-9abb-05e67657da34', 'ext': 'mp4', 'timestamp': 1691145748, 'thumbnail': r're:^https?://.*\.b-cdn\.net/32e34c4b-0d72-437c-9abb-05e67657da34/thumbnail_9172dc16\.jpg', 'duration': 106.0, 'description': 'md5:11452bcb31f379ee3eaf1234d3264e44', 'upload_date': '20230804', 'title': 'Sanela ist Teil der #arbeitsmarktkraft', }, 'params': {'skip_download': True}, }, { # Stream requires activation and pings 'url': 'https://iframe.mediadelivery.net/embed/200867/2e8545ec-509d-4571-b855-4cf0235ccd75', 'info_dict': { 'id': '2e8545ec-509d-4571-b855-4cf0235ccd75', 'ext': 'mp4', 'timestamp': 1708497752, 'title': 'netflix part 1', 'duration': 3959.0, 'description': '', 'upload_date': '20240221', 'thumbnail': r're:^https?://.*\.b-cdn\.net/2e8545ec-509d-4571-b855-4cf0235ccd75/thumbnail\.jpg', }, 'params': {'skip_download': True}, }, { # Requires any Referer 'url': 'https://iframe.mediadelivery.net/embed/289162/6372f5a3-68df-4ef7-a115-e1110186c477', 'info_dict': { 'id': '6372f5a3-68df-4ef7-a115-e1110186c477', 'ext': 'mp4', 'title': '12-Creating Small Asset Blockouts -Timelapse.mp4', 'description': '', 'duration': 263.0, 'timestamp': 1724485440, 'upload_date': '20240824', 'thumbnail': r're:^https?://.*\.b-cdn\.net/6372f5a3-68df-4ef7-a115-e1110186c477/thumbnail\.jpg', }, 'params': {'skip_download': True}, }, { 'url': 'https://player.mediadelivery.net/embed/519128/875880a9-bcc2-4038-9e05-e5024bba9b70', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # Stream requires Referer 'url': 'https://conword.io/', 'info_dict': { 'id': '3a5d863e-9cd6-447e-b6ef-e289af50b349', 'ext': 'mp4', 'title': 'Conword bei der Stadt Köln und Stadt Dortmund', 'description': '', 'upload_date': '20231031', 'duration': 31.0, 'thumbnail': 'https://video.watchuh.com/3a5d863e-9cd6-447e-b6ef-e289af50b349/thumbnail.jpg', 'timestamp': 1698783879, }, 'params': {'skip_download': True}, }, { # URL requires token and expires 'url': 'https://www.stockphotos.com/video/moscow-subway-the-train-is-arriving-at-the-park-kultury-station-10017830', 'info_dict': { 'id': '0b02fa20-4e8c-4140-8f87-f64d820a3386', 'ext': 'mp4', 'thumbnail': r're:^https?://.*\.b-cdn\.net/0b02fa20-4e8c-4140-8f87-f64d820a3386/thumbnail\.jpg', 'title': 'Moscow subway. The train is arriving at the Park Kultury station.', 'upload_date': '20240531', 'duration': 18.0, 'timestamp': 1717152269, 'description': '', }, 'params': {'skip_download': True}, }] @classmethod def _extract_embed_urls(cls, url, webpage): for embed_url in super()._extract_embed_urls(url, webpage): yield smuggle_url(embed_url, {'Referer': url}) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id, library_id = self._match_valid_url(url).group('id', 'library_id') webpage = self._download_webpage( f'https://iframe.mediadelivery.net/embed/{library_id}/{video_id}', video_id, headers={'Referer': smuggled_data.get('Referer') or 'https://iframe.mediadelivery.net/'}, query=traverse_obj(parse_qs(url), {'token': 'token', 'expires': 'expires'})) if html_title := self._html_extract_title(webpage, default=None) == '403': raise ExtractorError( 'This video is inaccessible. Setting a Referer header ' 'might be required to access the video', expected=True) elif html_title == '404': raise ExtractorError('This video does not exist', expected=True) headers = {'Referer': url} info = traverse_obj(self._parse_html5_media_entries(url, webpage, video_id, _headers=headers), 0) or {} formats = info.get('formats') or [] subtitles = info.get('subtitles') or {} original_url = self._search_regex( r'(?:var|const|let)\s+originalUrl\s*=\s*["\']([^"\']+)["\']', webpage, 'original url', default=None) if url_or_none(original_url): urlh = self._request_webpage( HEADRequest(original_url), video_id=video_id, note='Checking original', headers=headers, fatal=False, expected_status=(403, 404)) if urlh and urlh.status == 200: formats.append({ 'url': original_url, 'format_id': 'source', 'quality': 1, 'http_headers': headers, 'ext': urlhandle_detect_ext(urlh, default='mp4'), 'filesize': int_or_none(urlh.get_header('Content-Length')), }) # MediaCage Streams require activation and pings src_url = self._search_regex( r'\.setAttribute\([\'"]src[\'"],\s*[\'"]([^\'"]+)[\'"]\)', webpage, 'src url', default=None) activation_url = self._search_regex( r'loadUrl\([\'"]([^\'"]+/activate)[\'"]', webpage, 'activation url', default=None) ping_url = self._search_regex( r'loadUrl\([\'"]([^\'"]+/ping)[\'"]', webpage, 'ping url', default=None) secret = traverse_obj(parse_qs(src_url), ('secret', 0)) context_id = traverse_obj(parse_qs(src_url), ('contextId', 0)) ping_data = {} if src_url and activation_url and ping_url and secret and context_id: self._download_webpage( activation_url, video_id, headers=headers, note='Downloading activation data') fmts, subs = self._extract_m3u8_formats_and_subtitles( src_url, video_id, 'mp4', headers=headers, m3u8_id='hls', fatal=False) for fmt in fmts: fmt.update({ 'protocol': 'bunnycdn', 'http_headers': headers, }) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) ping_data = { '_bunnycdn_ping_data': { 'url': ping_url, 'headers': headers, 'secret': secret, 'context_id': context_id, }, } return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(webpage, ({find_element(id='main-video', html=True)}, {extract_attributes}, { 'title': ('data-plyr-config', {json.loads}, 'title', {str}), 'thumbnail': ('data-poster', {url_or_none}), })), **ping_data, **self._search_json_ld(webpage, video_id, fatal=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/historicfilms.py
yt_dlp/extractor/historicfilms.py
from .common import InfoExtractor from ..utils import parse_duration class HistoricFilmsIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?historicfilms\.com/(?:tapes/|play)(?P<id>\d+)' _TEST = { 'url': 'http://www.historicfilms.com/tapes/4728', 'md5': 'd4a437aec45d8d796a38a215db064e9a', 'info_dict': { 'id': '4728', 'ext': 'mov', 'title': 'Historic Films: GP-7', 'description': 'md5:1a86a0f3ac54024e419aba97210d959a', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 2096, }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) tape_id = self._search_regex( [r'class="tapeId"[^>]*>([^<]+)<', r'tapeId\s*:\s*"([^"]+)"'], webpage, 'tape id') title = self._og_search_title(webpage) description = self._og_search_description(webpage) thumbnail = self._html_search_meta( 'thumbnailUrl', webpage, 'thumbnails') or self._og_search_thumbnail(webpage) duration = parse_duration(self._html_search_meta( 'duration', webpage, 'duration')) video_url = f'http://www.historicfilms.com/video/{tape_id}_{video_id}_web.mov' return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mixcloud.py
yt_dlp/extractor/mixcloud.py
import base64 import itertools import urllib.parse from .common import InfoExtractor from ..compat import compat_ord from ..utils import ( ExtractorError, int_or_none, parse_iso8601, strip_or_none, try_get, url_or_none, ) from ..utils.traversal import traverse_obj class MixcloudBaseIE(InfoExtractor): def _call_api(self, object_type, object_fields, display_id, username, slug=None): lookup_key = object_type + 'Lookup' return self._download_json( 'https://app.mixcloud.com/graphql', display_id, query={ 'query': '''{ %s(lookup: {username: "%s"%s}) { %s } }''' % (lookup_key, username, f', slug: "{slug}"' if slug else '', object_fields), # noqa: UP031 })['data'][lookup_key] class MixcloudIE(MixcloudBaseIE): _VALID_URL = r'https?://(?:(?:www|beta|m)\.)?mixcloud\.com/([^/]+)/(?!stream|uploads|favorites|listens|playlists)([^/]+)' IE_NAME = 'mixcloud' _TESTS = [{ 'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/', 'info_dict': { 'id': 'dholbach_cryptkeeper', 'ext': 'm4a', 'title': 'Cryptkeeper', 'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.', 'uploader': 'dholbach', 'uploader_id': 'dholbach', 'thumbnail': r're:https?://.*\.jpg', 'view_count': int, 'timestamp': 1321359578, 'upload_date': '20111115', 'uploader_url': 'https://www.mixcloud.com/dholbach/', 'artist': 'Submorphics & Chino , Telekinesis, Porter Robinson, Enei, Breakage ft Jess Mills', 'duration': 3723, 'tags': ['liquid drum and bass', 'drum and bass'], 'comment_count': int, 'repost_count': int, 'like_count': int, 'artists': list, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'http://www.mixcloud.com/gillespeterson/caribou-7-inch-vinyl-mix-chat/', 'info_dict': { 'id': 'gillespeterson_caribou-7-inch-vinyl-mix-chat', 'ext': 'mp3', 'title': 'Caribou 7 inch Vinyl Mix & Chat', 'description': 'md5:2b8aec6adce69f9d41724647c65875e8', 'uploader': 'Gilles Peterson Worldwide', 'uploader_id': 'gillespeterson', 'thumbnail': 're:https?://.*', 'view_count': int, 'timestamp': 1422987057, 'upload_date': '20150203', 'uploader_url': 'https://www.mixcloud.com/gillespeterson/', 'duration': 2992, 'tags': ['jazz', 'soul', 'world music', 'funk'], 'comment_count': int, 'repost_count': int, 'like_count': int, }, 'params': {'skip_download': '404 playback error on site'}, }, { 'url': 'https://beta.mixcloud.com/RedLightRadio/nosedrip-15-red-light-radio-01-18-2016/', 'only_matching': True, }] _DECRYPTION_KEY = 'IFYOUWANTTHEARTISTSTOGETPAIDDONOTDOWNLOADFROMMIXCLOUD' @staticmethod def _decrypt_xor_cipher(key, ciphertext): """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" return ''.join([ chr(compat_ord(ch) ^ compat_ord(k)) for ch, k in zip(ciphertext, itertools.cycle(key))]) def _real_extract(self, url): username, slug = self._match_valid_url(url).groups() username, slug = urllib.parse.unquote(username), urllib.parse.unquote(slug) track_id = f'{username}_{slug}' cloudcast = self._call_api('cloudcast', '''audioLength comments(first: 100) { edges { node { comment created user { displayName username } } } totalCount } description favorites { totalCount } featuringArtistList isExclusive name owner { displayName url username } picture(width: 1024, height: 1024) { url } plays publishDate reposts { totalCount } streamInfo { dashUrl hlsUrl url } tags { tag { name } } restrictedReason id''', track_id, username, slug) if not cloudcast: raise ExtractorError('Track not found', expected=True) reason = cloudcast.get('restrictedReason') if reason == 'tracklist': raise ExtractorError('Track unavailable in your country due to licensing restrictions', expected=True) elif reason == 'repeat_play': raise ExtractorError('You have reached your play limit for this track', expected=True) elif reason: raise ExtractorError('Track is restricted', expected=True) stream_info = cloudcast['streamInfo'] formats = [] for url_key in ('url', 'hlsUrl', 'dashUrl'): format_url = stream_info.get(url_key) if not format_url: continue decrypted = self._decrypt_xor_cipher( self._DECRYPTION_KEY, base64.b64decode(format_url)) if url_key == 'hlsUrl': formats.extend(self._extract_m3u8_formats( decrypted, track_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif url_key == 'dashUrl': formats.extend(self._extract_mpd_formats( decrypted, track_id, mpd_id='dash', fatal=False)) else: formats.append({ 'format_id': 'http', 'url': decrypted, 'vcodec': 'none', 'downloader_options': { # Mixcloud starts throttling at >~5M 'http_chunk_size': 5242880, }, }) if not formats and cloudcast.get('isExclusive'): self.raise_login_required(metadata_available=True) comments = [] for node in traverse_obj(cloudcast, ('comments', 'edges', ..., 'node', {dict})): text = strip_or_none(node.get('comment')) if not text: continue comments.append({ 'text': text, **traverse_obj(node, { 'author': ('user', 'displayName', {str}), 'author_id': ('user', 'username', {str}), 'timestamp': ('created', {parse_iso8601}), }), }) return { 'id': track_id, 'formats': formats, 'comments': comments, **traverse_obj(cloudcast, { 'title': ('name', {str}), 'description': ('description', {str}), 'thumbnail': ('picture', 'url', {url_or_none}), 'timestamp': ('publishDate', {parse_iso8601}), 'duration': ('audioLength', {int_or_none}), 'uploader': ('owner', 'displayName', {str}), 'uploader_id': ('owner', 'username', {str}), 'uploader_url': ('owner', 'url', {url_or_none}), 'view_count': ('plays', {int_or_none}), 'like_count': ('favorites', 'totalCount', {int_or_none}), 'repost_count': ('reposts', 'totalCount', {int_or_none}), 'comment_count': ('comments', 'totalCount', {int_or_none}), 'tags': ('tags', ..., 'tag', 'name', {str}, filter, all, filter), 'artists': ('featuringArtistList', ..., {str}, filter, all, filter), }), } class MixcloudPlaylistBaseIE(MixcloudBaseIE): def _get_cloudcast(self, node): return node def _get_playlist_title(self, title, slug): return title def _real_extract(self, url): username, slug = self._match_valid_url(url).groups() username = urllib.parse.unquote(username) if not slug: slug = 'uploads' else: slug = urllib.parse.unquote(slug) playlist_id = f'{username}_{slug}' is_playlist_type = self._ROOT_TYPE == 'playlist' playlist_type = 'items' if is_playlist_type else slug list_filter = '' has_next_page = True entries = [] while has_next_page: playlist = self._call_api( self._ROOT_TYPE, '''%s %s %s(first: 100%s) { edges { node { %s } } pageInfo { endCursor hasNextPage } }''' % (self._TITLE_KEY, self._DESCRIPTION_KEY, playlist_type, list_filter, self._NODE_TEMPLATE), # noqa: UP031 playlist_id, username, slug if is_playlist_type else None) items = playlist.get(playlist_type) or {} for edge in items.get('edges', []): cloudcast = self._get_cloudcast(edge.get('node') or {}) cloudcast_url = cloudcast.get('url') if not cloudcast_url: continue item_slug = try_get(cloudcast, lambda x: x['slug'], str) owner_username = try_get(cloudcast, lambda x: x['owner']['username'], str) video_id = f'{owner_username}_{item_slug}' if item_slug and owner_username else None entries.append(self.url_result( cloudcast_url, MixcloudIE.ie_key(), video_id)) page_info = items['pageInfo'] has_next_page = page_info['hasNextPage'] list_filter = ', after: "{}"'.format(page_info['endCursor']) return self.playlist_result( entries, playlist_id, self._get_playlist_title(playlist[self._TITLE_KEY], slug), playlist.get(self._DESCRIPTION_KEY)) class MixcloudUserIE(MixcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?mixcloud\.com/(?P<id>[^/]+)/(?P<type>uploads|favorites|listens|stream)?/?$' IE_NAME = 'mixcloud:user' _TESTS = [{ 'url': 'http://www.mixcloud.com/dholbach/', 'info_dict': { 'id': 'dholbach_uploads', 'title': 'dholbach (uploads)', 'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b', }, 'playlist_mincount': 36, }, { 'url': 'http://www.mixcloud.com/dholbach/uploads/', 'info_dict': { 'id': 'dholbach_uploads', 'title': 'dholbach (uploads)', 'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b', }, 'playlist_mincount': 36, }, { 'url': 'http://www.mixcloud.com/dholbach/favorites/', 'info_dict': { 'id': 'dholbach_favorites', 'title': 'dholbach (favorites)', 'description': 'md5:a3f468a60ac8c3e1f8616380fc469b2b', }, # 'params': { # 'playlist_items': '1-100', # }, 'playlist_mincount': 396, }, { 'url': 'http://www.mixcloud.com/dholbach/listens/', 'info_dict': { 'id': 'dholbach_listens', 'title': 'Daniel Holbach (listens)', 'description': 'md5:b60d776f0bab534c5dabe0a34e47a789', }, # 'params': { # 'playlist_items': '1-100', # }, 'playlist_mincount': 1623, 'skip': 'Large list', }, { 'url': 'https://www.mixcloud.com/FirstEar/stream/', 'info_dict': { 'id': 'FirstEar_stream', 'title': 'First Ear (stream)', 'description': 'we maraud for ears', }, 'playlist_mincount': 267, }] _TITLE_KEY = 'displayName' _DESCRIPTION_KEY = 'biog' _ROOT_TYPE = 'user' _NODE_TEMPLATE = '''slug url owner { username }''' def _get_playlist_title(self, title, slug): return f'{title} ({slug})' class MixcloudPlaylistIE(MixcloudPlaylistBaseIE): _VALID_URL = r'https?://(?:www\.)?mixcloud\.com/(?P<user>[^/]+)/playlists/(?P<playlist>[^/]+)/?$' IE_NAME = 'mixcloud:playlist' _TESTS = [{ 'url': 'https://www.mixcloud.com/maxvibes/playlists/jazzcat-on-ness-radio/', 'info_dict': { 'id': 'maxvibes_jazzcat-on-ness-radio', 'title': 'Ness Radio sessions', }, 'playlist_mincount': 58, }] _TITLE_KEY = 'name' _DESCRIPTION_KEY = 'description' _ROOT_TYPE = 'playlist' _NODE_TEMPLATE = '''cloudcast { slug url owner { username } }''' def _get_cloudcast(self, node): return node.get('cloudcast') or {}
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mx3.py
yt_dlp/extractor/mx3.py
import re from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( get_element_by_class, int_or_none, try_call, url_or_none, urlhandle_detect_ext, ) from ..utils.traversal import traverse_obj class Mx3BaseIE(InfoExtractor): _VALID_URL_TMPL = r'https?://(?:www\.)?%s/t/(?P<id>\w+)' _FORMATS = [{ 'url': 'player_asset', 'format_id': 'default', 'quality': 0, }, { 'url': 'player_asset?quality=hd', 'format_id': 'hd', 'quality': 1, }, { 'url': 'download', 'format_id': 'download', 'quality': 2, }, { 'url': 'player_asset?quality=source', 'format_id': 'source', 'quality': 2, }] def _extract_formats(self, track_id): formats = [] for fmt in self._FORMATS: format_url = f'https://{self._DOMAIN}/tracks/{track_id}/{fmt["url"]}' urlh = self._request_webpage( HEADRequest(format_url), track_id, fatal=False, expected_status=404, note=f'Checking for format {fmt["format_id"]}') if urlh and urlh.status == 200: formats.append({ **fmt, 'url': format_url, 'ext': urlhandle_detect_ext(urlh), 'filesize': int_or_none(urlh.headers.get('Content-Length')), }) return formats def _real_extract(self, url): track_id = self._match_id(url) webpage = self._download_webpage(url, track_id) more_info = get_element_by_class('single-more-info', webpage) data = self._download_json(f'https://{self._DOMAIN}/t/{track_id}.json', track_id, fatal=False) def get_info_field(name): return self._html_search_regex( rf'<dt[^>]*>\s*{name}\s*</dt>\s*<dd[^>]*>(.*?)</dd>', more_info, name, default=None, flags=re.DOTALL) return { 'id': track_id, 'formats': self._extract_formats(track_id), 'genre': self._html_search_regex( r'<div\b[^>]+class="single-band-genre"[^>]*>([^<]+)</div>', webpage, 'genre', default=None), 'release_year': int_or_none(get_info_field('Year of creation')), 'description': get_info_field('Description'), 'tags': try_call(lambda: get_info_field('Tag').split(', '), list), **traverse_obj(data, { 'title': ('title', {str}), 'artist': (('performer_name', 'artist'), {str}), 'album_artist': ('artist', {str}), 'composer': ('composer_name', {str}), 'thumbnail': (('picture_url_xlarge', 'picture_url'), {url_or_none}), }, get_all=False), } class Mx3IE(Mx3BaseIE): _DOMAIN = 'mx3.ch' _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) _TESTS = [{ 'url': 'https://mx3.ch/t/1Cru', 'md5': '7ba09e9826b4447d4e1ce9d69e0e295f', 'info_dict': { 'id': '1Cru', 'ext': 'wav', 'artist': 'Godina', 'album_artist': 'Tortue Tortue', 'composer': 'Olivier Godinat', 'genre': 'Rock', 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0101/4643/square_xlarge/1-s-envoler-1.jpg?1630272813', 'title': "S'envoler", 'release_year': 2021, 'tags': [], }, }, { 'url': 'https://mx3.ch/t/1LIY', 'md5': '48293cb908342547827f963a5a2e9118', 'info_dict': { 'id': '1LIY', 'ext': 'mov', 'artist': 'Tania Kimfumu', 'album_artist': 'The Broots', 'composer': 'Emmanuel Diserens', 'genre': 'Electro', 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0110/0003/video_xlarge/frame_0000.png?1686963670', 'title': 'The Broots-Larytta remix "Begging For Help"', 'release_year': 2023, 'tags': ['the broots', 'cassata records', 'larytta'], 'description': '"Begging for Help" Larytta Remix Official Video\nRealized By Kali Donkilie in 2023', }, }, { 'url': 'https://mx3.ch/t/1C6E', 'md5': '1afcd578493ddb8e5008e94bb6d97e25', 'info_dict': { 'id': '1C6E', 'ext': 'wav', 'artist': 'Alien Bubblegum', 'album_artist': 'Alien Bubblegum', 'composer': 'Alien Bubblegum', 'genre': 'Punk', 'thumbnail': 'https://mx3.ch/pictures/mx3/file/0101/1551/square_xlarge/pandora-s-box-cover-with-title.png?1627054733', 'title': 'Wide Awake', 'release_year': 2021, 'tags': ['alien bubblegum', 'bubblegum', 'alien', 'pop punk', 'poppunk'], }, }] class Mx3NeoIE(Mx3BaseIE): _DOMAIN = 'neo.mx3.ch' _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) _TESTS = [{ 'url': 'https://neo.mx3.ch/t/1hpd', 'md5': '6d9986bbae5cac3296ec8813bf965eb2', 'info_dict': { 'id': '1hpd', 'ext': 'wav', 'artist': 'Baptiste Lopez', 'album_artist': 'Kammerorchester Basel', 'composer': 'Jannik Giger', 'genre': 'Composition, Orchestra', 'title': 'Troisième œil. Für Kammerorchester (2023)', 'thumbnail': 'https://neo.mx3.ch/pictures/neo/file/0000/0241/square_xlarge/kammerorchester-basel-group-photo-2_c_-lukasz-rajchert.jpg?1560341252', 'release_year': 2023, 'tags': [], }, }] class Mx3VolksmusikIE(Mx3BaseIE): _DOMAIN = 'volksmusik.mx3.ch' _VALID_URL = Mx3BaseIE._VALID_URL_TMPL % re.escape(_DOMAIN) _TESTS = [{ 'url': 'https://volksmusik.mx3.ch/t/Zx', 'md5': 'dd967a7b0c1ef898f3e072cf9c2eae3c', 'info_dict': { 'id': 'Zx', 'ext': 'mp3', 'artist': 'Ländlerkapelle GrischArt', 'album_artist': 'Ländlerkapelle GrischArt', 'composer': 'Urs Glauser', 'genre': 'Instrumental, Graubünden', 'title': 'Chämilouf', 'thumbnail': 'https://volksmusik.mx3.ch/pictures/vxm/file/0000/3815/square_xlarge/grischart1.jpg?1450530120', 'release_year': 2012, 'tags': [], }, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/googlesearch.py
yt_dlp/extractor/googlesearch.py
import itertools import re from .common import SearchInfoExtractor class GoogleSearchIE(SearchInfoExtractor): IE_DESC = 'Google Video search' IE_NAME = 'video.google:search' _SEARCH_KEY = 'gvsearch' _TESTS = [{ 'url': 'gvsearch15:python language', 'info_dict': { 'id': 'python language', 'title': 'python language', }, 'playlist_count': 15, }] _PAGE_SIZE = 100 def _search_results(self, query): for pagenum in itertools.count(): webpage = self._download_webpage( 'http://www.google.com/search', f'gvsearch:{query}', note=f'Downloading result page {pagenum + 1}', query={ 'tbm': 'vid', 'q': query, 'start': pagenum * self._PAGE_SIZE, 'num': self._PAGE_SIZE, 'hl': 'en', }) for url in re.findall(r'<div[^>]* class="dXiKIc"[^>]*><a href="([^"]+)"', webpage): yield self.url_result(url) if not re.search(r'id="pnnext"', webpage): return
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/vrt.py
yt_dlp/extractor/vrt.py
import json import time import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, extract_attributes, filter_dict, float_or_none, get_element_by_class, get_element_html_by_class, int_or_none, jwt_decode_hs256, jwt_encode, make_archive_id, merge_dicts, parse_age_limit, parse_duration, parse_iso8601, str_or_none, strip_or_none, traverse_obj, try_call, url_or_none, ) class VRTBaseIE(InfoExtractor): _GEO_BYPASS = False _PLAYER_INFO = { 'platform': 'desktop', 'app': { 'type': 'browser', 'name': 'Chrome', }, 'device': 'undefined (undefined)', 'os': { 'name': 'Windows', 'version': '10', }, 'player': { 'name': 'VRT web player', 'version': '5.1.1-prod-2025-02-14T08:44:16"', }, } # From https://player.vrt.be/vrtnws/js/main.js & https://player.vrt.be/ketnet/js/main.8cdb11341bcb79e4cd44.js _JWT_KEY_ID = '0-0Fp51UZykfaiCJrfTE3+oMI8zvDteYfPtR+2n1R+z8w=' _JWT_SIGNING_KEY = 'b5f500d55cb44715107249ccd8a5c0136cfb2788dbb71b90a4f142423bacaf38' # -dev # player-stag.vrt.be key: d23987504521ae6fbf2716caca6700a24bb1579477b43c84e146b279de5ca595 # player.vrt.be key: 2a9251d782700769fb856da5725daf38661874ca6f80ae7dc2b05ec1a81a24ae def _extract_formats_and_subtitles(self, data, video_id): if traverse_obj(data, 'drm'): self.report_drm(video_id) formats, subtitles = [], {} for target in traverse_obj(data, ('targetUrls', lambda _, v: url_or_none(v['url']) and v['type'])): format_type = target['type'].upper() format_url = target['url'] if format_type in ('HLS', 'HLS_AES'): fmts, subs = self._extract_m3u8_formats_and_subtitles( format_url, video_id, 'mp4', m3u8_id=format_type, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif format_type == 'HDS': formats.extend(self._extract_f4m_formats( format_url, video_id, f4m_id=format_type, fatal=False)) elif format_type == 'MPEG_DASH': fmts, subs = self._extract_mpd_formats_and_subtitles( format_url, video_id, mpd_id=format_type, fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif format_type == 'HSS': fmts, subs = self._extract_ism_formats_and_subtitles( format_url, video_id, ism_id='mss', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append({ 'format_id': format_type, 'url': format_url, }) for sub in traverse_obj(data, ('subtitleUrls', lambda _, v: v['url'] and v['type'] == 'CLOSED')): subtitles.setdefault('nl', []).append({'url': sub['url']}) return formats, subtitles def _call_api(self, video_id, client='null', id_token=None, version='v2'): player_info = {'exp': (round(time.time(), 3) + 900), **self._PLAYER_INFO} player_token = self._download_json( f'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/{version}/tokens', video_id, 'Downloading player token', 'Failed to download player token', headers={ **self.geo_verification_headers(), 'Content-Type': 'application/json', }, data=json.dumps({ 'identityToken': id_token or '', 'playerInfo': jwt_encode(player_info, self._JWT_SIGNING_KEY, headers={ 'kid': self._JWT_KEY_ID, }), }, separators=(',', ':')).encode())['vrtPlayerToken'] return self._download_json( # The URL below redirects to https://media-services-public.vrt.be/media-aggregator/{version}/media-items/{video_id} f'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/{version}/videos/{video_id}', video_id, 'Downloading API JSON', 'Failed to download API JSON', query={ 'vrtPlayerToken': player_token, 'client': client, }, expected_status=400) class VRTIE(VRTBaseIE): IE_DESC = 'VRT NWS, Flanders News, Flandern Info and Sporza' _VALID_URL = r'https?://(?:www\.)?(?P<site>vrt\.be/vrtnws|sporza\.be)/[a-z]{2}/\d{4}/\d{2}/\d{2}/(?P<id>[^/?&#]+)' _TESTS = [{ 'url': 'https://www.vrt.be/vrtnws/nl/2019/05/15/beelden-van-binnenkant-notre-dame-een-maand-na-de-brand/', 'info_dict': { 'id': 'pbs-pub-7855fc7b-1448-49bc-b073-316cb60caa71$vid-2ca50305-c38a-4762-9890-65cbd098b7bd', 'ext': 'mp4', 'title': 'Beelden van binnenkant Notre-Dame, één maand na de brand', 'description': 'md5:6fd85f999b2d1841aa5568f4bf02c3ff', 'duration': 31.2, 'thumbnail': 'https://images.vrt.be/orig/2019/05/15/2d914d61-7710-11e9-abcc-02b7b76bf47f.jpg', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://sporza.be/nl/2019/05/15/de-belgian-cats-zijn-klaar-voor-het-ek/', 'info_dict': { 'id': 'pbs-pub-f2c86a46-8138-413a-a4b9-a0015a16ce2c$vid-1f112b31-e58e-4379-908d-aca6d80f8818', 'ext': 'mp4', 'title': 'De Belgian Cats zijn klaar voor het EK', 'description': 'Video: De Belgian Cats zijn klaar voor het EK mét Ann Wauters | basketbal, sport in het journaal', 'duration': 115.17, 'thumbnail': 'https://images.vrt.be/orig/2019/05/15/11c0dba3-770e-11e9-abcc-02b7b76bf47f.jpg', }, 'params': {'skip_download': 'm3u8'}, }] _CLIENT_MAP = { 'vrt.be/vrtnws': 'vrtnieuws', 'sporza.be': 'sporza', } def _real_extract(self, url): site, display_id = self._match_valid_url(url).groups() webpage = self._download_webpage(url, display_id) attrs = extract_attributes(get_element_html_by_class('vrtvideo', webpage) or '') asset_id = attrs.get('data-video-id') or attrs['data-videoid'] publication_id = traverse_obj(attrs, 'data-publication-id', 'data-publicationid') if publication_id: asset_id = f'{publication_id}${asset_id}' client = traverse_obj(attrs, 'data-client-code', 'data-client') or self._CLIENT_MAP[site] data = self._call_api(asset_id, client) formats, subtitles = self._extract_formats_and_subtitles(data, asset_id) description = self._html_search_meta( ['og:description', 'twitter:description', 'description'], webpage) if description == '…': description = None return { 'id': asset_id, 'formats': formats, 'subtitles': subtitles, 'description': description, 'thumbnail': url_or_none(attrs.get('data-posterimage')), 'duration': float_or_none(attrs.get('data-duration'), 1000), '_old_archive_ids': [make_archive_id('Canvas', asset_id)], **traverse_obj(data, { 'title': ('title', {str}), 'description': ('shortDescription', {str}), 'duration': ('duration', {float_or_none(scale=1000)}), 'thumbnail': ('posterImageUrl', {url_or_none}), }), } class VrtNUIE(VRTBaseIE): IE_NAME = 'vrtmax' IE_DESC = 'VRT MAX (formerly VRT NU)' _VALID_URL = r'https?://(?:www\.)?vrt\.be/(?:vrtnu|vrtmax)/a-z/(?:[^/]+/){2}(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.vrt.be/vrtmax/a-z/ket---doc/trailer/ket---doc-trailer-s6/', 'info_dict': { 'id': 'pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251', 'ext': 'mp4', 'channel': 'ketnet', 'description': 'Neem een kijkje in de bijzondere wereld van deze Ketnetters.', 'display_id': 'ket---doc-trailer-s6', 'duration': 30.0, 'episode': 'Reeks 6 volledig vanaf 3 maart', 'episode_id': '1739450401467', 'season': 'Trailer', 'season_id': '1739450401467', 'series': 'Ket & Doc', 'thumbnail': 'https://images.vrt.be/orig/2025/02/21/63f07122-5bbd-4ca1-b42e-8565c6cd95df.jpg', 'timestamp': 1740373200, 'title': 'Reeks 6 volledig vanaf 3 maart', 'upload_date': '20250224', '_old_archive_ids': [ 'canvas pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251', 'ketnet pbs-pub-c8a78645-5d3e-468a-89ec-6f3ed5534bd5$vid-242ddfe9-18f5-4e16-ab45-09b122a19251', ], }, }, { 'url': 'https://www.vrt.be/vrtmax/a-z/meisjes/6/meisjes-s6a5/', 'info_dict': { 'id': 'pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e', 'ext': 'mp4', 'channel': 'ketnet', 'description': 'md5:713793f15cbf677f66200b36b7b1ec5a', 'display_id': 'meisjes-s6a5', 'duration': 1336.02, 'episode': 'Week 5', 'episode_id': '1684157692901', 'episode_number': 5, 'season': '6', 'season_id': '1684157692901', 'season_number': 6, 'series': 'Meisjes', 'thumbnail': 'https://images.vrt.be/orig/2023/05/14/bf526ae0-f1d9-11ed-91d7-02b7b76bf47f.jpg', 'timestamp': 1685251800, 'title': 'Week 5', 'upload_date': '20230528', '_old_archive_ids': [ 'canvas pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e', 'ketnet pbs-pub-97b541ab-e05c-43b9-9a40-445702ef7189$vid-5e306921-a9aa-4fa9-9f39-5b82c8f1028e', ], }, }, { 'url': 'https://www.vrt.be/vrtnu/a-z/taboe/3/taboe-s3a4/', 'info_dict': { 'id': 'pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd', 'ext': 'mp4', 'channel': 'een', 'description': 'md5:bf61345a95eca9393a95de4a7a54b5c6', 'display_id': 'taboe-s3a4', 'duration': 2882.02, 'episode': 'Mensen met het syndroom van Gilles de la Tourette', 'episode_id': '1739055911734', 'episode_number': 4, 'season': '3', 'season_id': '1739055911734', 'season_number': 3, 'series': 'Taboe', 'thumbnail': 'https://images.vrt.be/orig/2025/02/19/8198496c-d1ae-4bca-9a48-761cf3ea3ff2.jpg', 'timestamp': 1740286800, 'title': 'Mensen met het syndroom van Gilles de la Tourette', 'upload_date': '20250223', '_old_archive_ids': [ 'canvas pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd', 'ketnet pbs-pub-f50faa3a-1778-46b6-9117-4ba85f197703$vid-547507fe-1c8b-4394-b361-21e627cbd0fd', ], }, }] _NETRC_MACHINE = 'vrtnu' _TOKEN_COOKIE_DOMAIN = '.www.vrt.be' _ACCESS_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_at' _REFRESH_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_rt' _VIDEO_TOKEN_COOKIE_NAME = 'vrtnu-site_profile_vt' _VIDEO_PAGE_QUERY = ''' query VideoPage($pageId: ID!) { page(id: $pageId) { ... on EpisodePage { episode { ageRaw description durationRaw episodeNumberRaw id name onTimeRaw program { title } season { id titleRaw } title brand } ldjson player { image { templateUrl } modes { streamId } } } } } ''' def _fetch_tokens(self): has_credentials = self._get_login_info()[0] access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME) video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME) if (access_token and not self._is_jwt_token_expired(access_token) and video_token and not self._is_jwt_token_expired(video_token)): return access_token, video_token if has_credentials: access_token, video_token = self.cache.load(self._NETRC_MACHINE, 'token_data', default=(None, None)) if (access_token and not self._is_jwt_token_expired(access_token) and video_token and not self._is_jwt_token_expired(video_token)): self.write_debug('Restored tokens from cache') self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._ACCESS_TOKEN_COOKIE_NAME, access_token) self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._VIDEO_TOKEN_COOKIE_NAME, video_token) return access_token, video_token if not self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME): return None, None self._request_webpage( 'https://www.vrt.be/vrtmax/sso/refresh', None, note='Refreshing tokens', errnote='Failed to refresh tokens', fatal=False) access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME) video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME) if not access_token or not video_token: self.cache.store(self._NETRC_MACHINE, 'refresh_token', None) self.cookiejar.clear(self._TOKEN_COOKIE_DOMAIN, '/vrtmax/sso', self._REFRESH_TOKEN_COOKIE_NAME) msg = 'Refreshing of tokens failed' if not has_credentials: self.report_warning(msg) return None, None self.report_warning(f'{msg}. Re-logging in') return self._perform_login(*self._get_login_info()) if has_credentials: self.cache.store(self._NETRC_MACHINE, 'token_data', (access_token, video_token)) return access_token, video_token def _get_vrt_cookie(self, cookie_name): # Refresh token cookie is scoped to /vrtmax/sso, others are scoped to / return try_call(lambda: self._get_cookies('https://www.vrt.be/vrtmax/sso')[cookie_name].value) @staticmethod def _is_jwt_token_expired(token): return jwt_decode_hs256(token)['exp'] - time.time() < 300 def _perform_login(self, username, password): refresh_token = self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME) if refresh_token and not self._is_jwt_token_expired(refresh_token): self.write_debug('Using refresh token from logged-in cookies; skipping login with credentials') return refresh_token = self.cache.load(self._NETRC_MACHINE, 'refresh_token', default=None) if refresh_token and not self._is_jwt_token_expired(refresh_token): self.write_debug('Restored refresh token from cache') self._set_cookie(self._TOKEN_COOKIE_DOMAIN, self._REFRESH_TOKEN_COOKIE_NAME, refresh_token, path='/vrtmax/sso') return self._request_webpage( 'https://www.vrt.be/vrtmax/sso/login', None, note='Getting session cookies', errnote='Failed to get session cookies') login_data = self._download_json( 'https://login.vrt.be/perform_login', None, data=json.dumps({ 'clientId': 'vrtnu-site', 'loginID': username, 'password': password, }).encode(), headers={ 'Content-Type': 'application/json', 'Oidcxsrf': self._get_cookies('https://login.vrt.be')['OIDCXSRF'].value, }, note='Logging in', errnote='Login failed', expected_status=403) if login_data.get('errorCode'): raise ExtractorError(f'Login failed: {login_data.get("errorMessage")}', expected=True) self._request_webpage( login_data['redirectUrl'], None, note='Getting access token', errnote='Failed to get access token') access_token = self._get_vrt_cookie(self._ACCESS_TOKEN_COOKIE_NAME) video_token = self._get_vrt_cookie(self._VIDEO_TOKEN_COOKIE_NAME) refresh_token = self._get_vrt_cookie(self._REFRESH_TOKEN_COOKIE_NAME) if not all((access_token, video_token, refresh_token)): raise ExtractorError('Unable to extract token cookie values') self.cache.store(self._NETRC_MACHINE, 'token_data', (access_token, video_token)) self.cache.store(self._NETRC_MACHINE, 'refresh_token', refresh_token) return access_token, video_token def _real_extract(self, url): display_id = self._match_id(url) access_token, video_token = self._fetch_tokens() metadata = self._download_json( f'https://www.vrt.be/vrtnu-api/graphql{"" if access_token else "/public"}/v1', display_id, 'Downloading asset JSON', 'Unable to download asset JSON', data=json.dumps({ 'operationName': 'VideoPage', 'query': self._VIDEO_PAGE_QUERY, 'variables': {'pageId': urllib.parse.urlparse(url).path}, }).encode(), headers=filter_dict({ 'Authorization': f'Bearer {access_token}' if access_token else None, 'Content-Type': 'application/json', 'x-vrt-client-name': 'WEB', 'x-vrt-client-version': '1.5.9', 'x-vrt-zone': 'default', }))['data']['page'] video_id = metadata['player']['modes'][0]['streamId'] try: streaming_info = self._call_api(video_id, 'vrtnu-web@PROD', id_token=video_token) except ExtractorError as e: if not video_token and isinstance(e.cause, HTTPError) and e.cause.status == 404: self.raise_login_required() raise formats, subtitles = self._extract_formats_and_subtitles(streaming_info, video_id) code = traverse_obj(streaming_info, ('code', {str})) if not formats and code: if code in ('CONTENT_AVAILABLE_ONLY_FOR_BE_RESIDENTS', 'CONTENT_AVAILABLE_ONLY_IN_BE', 'CONTENT_UNAVAILABLE_VIA_PROXY'): self.raise_geo_restricted(countries=['BE']) elif code in ('CONTENT_AVAILABLE_ONLY_FOR_BE_RESIDENTS_AND_EXPATS', 'CONTENT_IS_AGE_RESTRICTED', 'CONTENT_REQUIRES_AUTHENTICATION'): self.raise_login_required() else: self.raise_no_formats(f'Unable to extract formats: {code}') return { 'duration': float_or_none(streaming_info.get('duration'), 1000), 'thumbnail': url_or_none(streaming_info.get('posterImageUrl')), **self._json_ld(traverse_obj(metadata, ('ldjson', ..., {json.loads})), video_id, fatal=False), **traverse_obj(metadata, ('episode', { 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': ('onTimeRaw', {parse_iso8601}), 'series': ('program', 'title', {str}), 'season': ('season', 'titleRaw', {str}), 'season_number': ('season', 'titleRaw', {int_or_none}), 'season_id': ('id', {str_or_none}), 'episode': ('title', {str}), 'episode_number': ('episodeNumberRaw', {int_or_none}), 'episode_id': ('id', {str_or_none}), 'age_limit': ('ageRaw', {parse_age_limit}), 'channel': ('brand', {str}), 'duration': ('durationRaw', {parse_duration}), })), 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, '_old_archive_ids': [make_archive_id('Canvas', video_id), make_archive_id('Ketnet', video_id)], } class DagelijkseKostIE(VRTBaseIE): IE_DESC = 'dagelijksekost.een.be' _VALID_URL = r'https?://dagelijksekost\.een\.be/gerechten/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://dagelijksekost.een.be/gerechten/hachis-parmentier-met-witloof', 'info_dict': { 'id': 'md-ast-27a4d1ff-7d7b-425e-b84f-a4d227f592fa', 'ext': 'mp4', 'title': 'Hachis parmentier met witloof', 'description': 'md5:9960478392d87f63567b5b117688cdc5', 'display_id': 'hachis-parmentier-met-witloof', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( r'data-url=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id', group='id') data = self._call_api(video_id, 'dako@prod', version='v1') formats, subtitles = self._extract_formats_and_subtitles(data, video_id) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'display_id': display_id, 'title': strip_or_none(get_element_by_class( 'dish-metadata__title', webpage) or self._html_search_meta('twitter:title', webpage)), 'description': clean_html(get_element_by_class( 'dish-description', webpage)) or self._html_search_meta( ['description', 'twitter:description', 'og:description'], webpage), '_old_archive_ids': [make_archive_id('Canvas', video_id)], } class Radio1BeIE(VRTBaseIE): _VALID_URL = r'https?://radio1\.be/(?:lees|luister/select)/(?P<id>[\w/-]+)' _TESTS = [{ 'url': 'https://radio1.be/luister/select/de-ochtend/komt-n-va-volgend-jaar-op-in-wallonie', 'info_dict': { 'id': 'eb6c22e9-544f-44f4-af39-cf8cccd29e22', 'title': 'Komt N-VA volgend jaar op in Wallonië?', 'display_id': 'de-ochtend/komt-n-va-volgend-jaar-op-in-wallonie', 'description': 'md5:b374ea1c9302f38362df9dea1931468e', 'thumbnail': r're:https?://cds\.vrt\.radio/[^/#\?&]+', }, 'playlist_mincount': 1, }, { 'url': 'https://radio1.be/lees/europese-unie-wil-onmiddellijke-humanitaire-pauze-en-duurzaam-staakt-het-vuren-in-gaza?view=web', 'info_dict': { 'id': '5d47f102-dbdb-4fa0-832b-26c1870311f2', 'title': 'Europese Unie wil "onmiddellijke humanitaire pauze" en "duurzaam staakt-het-vuren" in Gaza', 'description': 'md5:1aad1fae7d39edeffde5d3e67d276b64', 'thumbnail': r're:https?://cds\.vrt\.radio/[^/#\?&]+', 'display_id': 'europese-unie-wil-onmiddellijke-humanitaire-pauze-en-duurzaam-staakt-het-vuren-in-gaza', }, 'playlist_mincount': 1, }] def _extract_video_entries(self, next_js_data, display_id): video_data = traverse_obj( next_js_data, ((None, ('paragraphs', ...)), {lambda x: x if x['mediaReference'] else None})) for data in video_data: media_reference = data['mediaReference'] formats, subtitles = self._extract_formats_and_subtitles( self._call_api(media_reference), display_id) yield { 'id': media_reference, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('title', {str}), 'description': ('body', {clean_html}), }), } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) next_js_data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['item'] return self.playlist_result( self._extract_video_entries(next_js_data, display_id), **merge_dicts(traverse_obj( next_js_data, ({ 'id': ('id', {str}), 'title': ('title', {str}), 'description': (('description', 'content'), {clean_html}), }), get_all=False), { 'display_id': display_id, 'title': self._html_search_meta(['name', 'og:title', 'twitter:title'], webpage), 'description': self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage), 'thumbnail': self._html_search_meta(['og:image', 'twitter:image'], webpage), }))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/magellantv.py
yt_dlp/extractor/magellantv.py
from .common import InfoExtractor from ..utils import parse_age_limit, parse_duration, url_or_none from ..utils.traversal import traverse_obj class MagellanTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?magellantv\.com/(?:watch|video)/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.magellantv.com/watch/incas-the-new-story?type=v', 'info_dict': { 'id': 'incas-the-new-story', 'ext': 'mp4', 'title': 'Incas: The New Story', 'description': 'md5:936c7f6d711c02dfb9db22a067b586fe', 'age_limit': 14, 'duration': 3060.0, 'tags': ['Ancient History', 'Archaeology', 'Anthropology'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.magellantv.com/video/tortured-to-death-murdering-the-nanny', 'info_dict': { 'id': 'tortured-to-death-murdering-the-nanny', 'ext': 'mp4', 'title': 'Tortured to Death: Murdering the Nanny', 'description': 'md5:d87033594fa218af2b1a8b49f52511e5', 'age_limit': 14, 'duration': 2640.0, 'tags': ['True Crime', 'Murder'], }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.magellantv.com/watch/celebration-nation?type=s', 'info_dict': { 'id': 'celebration-nation', 'ext': 'mp4', 'tags': ['Art & Culture', 'Human Interest', 'Anthropology', 'China', 'History'], 'duration': 2640.0, 'title': 'Ancestors', }, 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) context = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['reactContext'] data = traverse_obj(context, ((('video', 'detail'), ('series', 'currentEpisode')), {dict}, any)) formats, subtitles = [], {} for m3u8_url in set(traverse_obj(data, ((('manifests', ..., 'hls'), 'jwp_video_url'), {url_or_none}))): fmts, subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if not formats and (error := traverse_obj(context, ('errorDetailPage', 'errorMessage', {str}))): if 'available in your country' in error: self.raise_geo_restricted(msg=error) self.raise_no_formats(f'{self.IE_NAME} said: {error}', expected=True) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(data, { 'title': ('title', {str}), 'description': ('metadata', 'description', {str}), 'duration': ('duration', {parse_duration}), 'age_limit': ('ratingCategory', {parse_age_limit}), 'tags': ('tags', ..., {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/wimbledon.py
yt_dlp/extractor/wimbledon.py
from .common import InfoExtractor from ..utils import ( parse_duration, traverse_obj, ) class WimbledonIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?wimbledon\.com/\w+/video/media/(?P<id>\d+)\.html' _TESTS = [{ 'url': 'https://www.wimbledon.com/en_GB/video/media/6330247525112.html', 'info_dict': { 'id': '6330247525112', 'ext': 'mp4', 'timestamp': 1687972186, 'description': '', 'thumbnail': r're:^https://[\w.-]+\.prod\.boltdns\.net/[^?#]+/image\.jpg', 'upload_date': '20230628', 'title': 'Coco Gauff | My Wimbledon Inspiration', 'tags': ['features', 'trending', 'homepage'], 'uploader_id': '3506358525001', 'duration': 163072.0, }, }, { 'url': 'https://www.wimbledon.com/en_GB/video/media/6308703111112.html', 'info_dict': { 'id': '6308703111112', 'ext': 'mp4', 'thumbnail': r're:^https://[\w.-]+\.prod\.boltdns\.net/[^?#]+/image\.jpg', 'description': 'null', 'upload_date': '20220629', 'uploader_id': '3506358525001', 'title': 'Roblox | WimbleWorld ', 'duration': 101440.0, 'tags': ['features', 'kids'], 'timestamp': 1656500867, }, }, { 'url': 'https://www.wimbledon.com/en_US/video/media/6309327106112.html', 'only_matching': True, }, { 'url': 'https://www.wimbledon.com/es_Es/video/media/6308377909112.html', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_json( f'https://www.wimbledon.com/relatedcontent/rest/v2/wim_v1/en/content/wim_v1_{video_id}_en', video_id) return { '_type': 'url_transparent', 'url': f'http://players.brightcove.net/3506358525001/default_default/index.html?videoId={video_id}', 'ie_key': 'BrightcoveNew', 'id': video_id, **traverse_obj(metadata, { 'title': 'title', 'description': 'description', 'duration': ('metadata', 'duration', {parse_duration}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/drtuber.py
yt_dlp/extractor/drtuber.py
import re from .common import InfoExtractor from ..utils import ( NO_DEFAULT, int_or_none, parse_duration, str_to_int, ) class DrTuberIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?' _EMBED_REGEX = [r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)'] _TESTS = [{ 'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf', 'md5': '93e680cf2536ad0dfb7e74d94a89facd', 'info_dict': { 'id': '1740434', 'display_id': 'hot-perky-blonde-naked-golf', 'ext': 'mp4', 'title': 'hot perky blonde naked golf', 'like_count': int, 'comment_count': int, 'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'], 'thumbnail': r're:https?://.*\.jpg$', 'age_limit': 18, }, }, { 'url': 'http://www.drtuber.com/embed/489939', 'only_matching': True, }, { 'url': 'http://m.drtuber.com/video/3893529/lingerie-blowjob-from-beautiful-teen', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') display_id = mobj.group('display_id') or video_id webpage = self._download_webpage( f'http://www.drtuber.com/video/{video_id}', display_id) video_data = self._download_json( 'http://www.drtuber.com/player_config_json/', video_id, query={ 'vid': video_id, 'embed': 0, 'aid': 0, 'domain_id': 0, }) formats = [] for format_id, video_url in video_data['files'].items(): if video_url: formats.append({ 'format_id': format_id, 'quality': 2 if format_id == 'hq' else 1, 'url': video_url, }) duration = int_or_none(video_data.get('duration')) or parse_duration( video_data.get('duration_format')) title = self._html_search_regex( (r'<h1[^>]+class=["\']title[^>]+>([^<]+)', r'<title>([^<]+)\s*@\s+DrTuber', r'class="title_watch"[^>]*><(?:p|h\d+)[^>]*>([^<]+)<', r'<p[^>]+class="title_substrate">([^<]+)</p>', r'<title>([^<]+) - \d+'), webpage, 'title') thumbnail = self._html_search_regex( r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False) def extract_count(id_, name, default=NO_DEFAULT): return str_to_int(self._html_search_regex( rf'<span[^>]+(?:class|id)="{id_}"[^>]*>([\d,\.]+)</span>', webpage, f'{name} count', default=default, fatal=False)) like_count = extract_count('rate_likes', 'like') dislike_count = extract_count('rate_dislikes', 'dislike', default=None) comment_count = extract_count('comments_count', 'comment') cats_str = self._search_regex( r'<div[^>]+class="categories_list">(.+?)</div>', webpage, 'categories', fatal=False) categories = [] if not cats_str else re.findall( r'<a title="([^"]+)"', cats_str) return { 'id': video_id, 'display_id': display_id, 'formats': formats, 'title': title, 'thumbnail': thumbnail, 'like_count': like_count, 'dislike_count': dislike_count, 'comment_count': comment_count, 'categories': categories, 'age_limit': self._rta_search(webpage), 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gedidigital.py
yt_dlp/extractor/gedidigital.py
import re from .common import InfoExtractor from ..utils import ( base_url, determine_ext, int_or_none, url_basename, urljoin, ) class GediDigitalIE(InfoExtractor): _VALID_URL = r'''(?x:(?P<base_url>(?:https?:)//video\. (?: (?: (?:espresso\.)?repubblica |lastampa |ilsecoloxix |huffingtonpost )| (?: iltirreno |messaggeroveneto |ilpiccolo |gazzettadimantova |mattinopadova |laprovinciapavese |tribunatreviso |nuovavenezia |gazzettadimodena |lanuovaferrara |corrierealpi |lasentinella )\.gelocal )\.it(?:/[^/]+){2,4}/(?P<id>\d+))(?:$|[?&].*))''' _EMBED_REGEX = [rf'''(?x) (?: data-frame-src=| <iframe[^\n]+src= ) (["'])(?P<url>{_VALID_URL})\1'''] _TESTS = [{ 'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683', 'md5': '84658d7fb9e55a6e57ecc77b73137494', 'info_dict': { 'id': '121683', 'ext': 'mp4', 'title': 'Il paradosso delle Regionali: ecco perché la Lega vince ma sembra aver perso', 'description': 'md5:de7f4d6eaaaf36c153b599b10f8ce7ca', 'thumbnail': r're:^https://www\.repstatic\.it/video/photo/.+?-thumb-full-.+?\.jpg$', 'duration': 125, }, }, { 'url': 'https://video.huffingtonpost.it/embed/politica/cotticelli-non-so-cosa-mi-sia-successo-sto-cercando-di-capire-se-ho-avuto-un-malore/29312/29276?responsive=true&el=video971040871621586700', 'only_matching': True, }, { 'url': 'https://video.espresso.repubblica.it/embed/tutti-i-video/01-ted-villa/14772/14870&width=640&height=360', 'only_matching': True, }, { 'url': 'https://video.repubblica.it/motori/record-della-pista-a-spa-francorchamps-la-pagani-huayra-roadster-bc-stupisce/367415/367963', 'only_matching': True, }, { 'url': 'https://video.ilsecoloxix.it/sport/cassani-e-i-brividi-azzurri-ai-mondiali-di-imola-qui-mi-sono-innamorato-del-ciclismo-da-ragazzino-incredibile-tornarci-da-ct/66184/66267', 'only_matching': True, }, { 'url': 'https://video.iltirreno.gelocal.it/sport/dentro-la-notizia-ferrari-cosa-succede-a-maranello/141059/142723', 'only_matching': True, }, { 'url': 'https://video.messaggeroveneto.gelocal.it/locale/maria-giovanna-elmi-covid-vaccino/138155/139268', 'only_matching': True, }, { 'url': 'https://video.ilpiccolo.gelocal.it/dossier/big-john/dinosauro-big-john-al-via-le-visite-guidate-a-trieste/135226/135751', 'only_matching': True, }, { 'url': 'https://video.gazzettadimantova.gelocal.it/locale/dal-ponte-visconteo-di-valeggio-l-and-8217sos-dei-ristoratori-aprire-anche-a-cena/137310/137818', 'only_matching': True, }, { 'url': 'https://video.mattinopadova.gelocal.it/dossier/coronavirus-in-veneto/covid-a-vo-un-anno-dopo-un-cuore-tricolore-per-non-dimenticare/138402/138964', 'only_matching': True, }, { 'url': 'https://video.laprovinciapavese.gelocal.it/locale/mede-zona-rossa-via-alle-vaccinazioni-per-gli-over-80/137545/138120', 'only_matching': True, }, { 'url': 'https://video.tribunatreviso.gelocal.it/dossier/coronavirus-in-veneto/ecco-le-prima-vaccinazioni-di-massa-nella-marca/134485/135024', 'only_matching': True, }, { 'url': 'https://video.nuovavenezia.gelocal.it/locale/camion-troppo-alto-per-il-ponte-ferroviario-perde-il-carico/135734/136266', 'only_matching': True, }, { 'url': 'https://video.gazzettadimodena.gelocal.it/locale/modena-scoperta-la-proteina-che-predice-il-livello-di-gravita-del-covid/139109/139796', 'only_matching': True, }, { 'url': 'https://video.lanuovaferrara.gelocal.it/locale/due-bombole-di-gpl-aperte-e-abbandonate-i-vigili-bruciano-il-gas/134391/134957', 'only_matching': True, }, { 'url': 'https://video.corrierealpi.gelocal.it/dossier/cortina-2021-i-mondiali-di-sci-alpino/mondiali-di-sci-il-timelapse-sulla-splendida-olympia/133760/134331', 'only_matching': True, }, { 'url': 'https://video.lasentinella.gelocal.it/locale/vestigne-centra-un-auto-e-si-ribalta/138931/139466', 'only_matching': True, }, { 'url': 'https://video.espresso.repubblica.it/tutti-i-video/01-ted-villa/14772', 'only_matching': True, }] @staticmethod def _sanitize_urls(urls): # add protocol if missing for i, e in enumerate(urls): if e.startswith('//'): urls[i] = f'https:{e}' # clean iframes urls for i, e in enumerate(urls): urls[i] = urljoin(base_url(e), url_basename(e)) return urls @classmethod def _extract_embed_urls(cls, url, webpage): return cls._sanitize_urls(tuple(super()._extract_embed_urls(url, webpage))) @staticmethod def _clean_formats(formats): format_urls = set() clean_formats = [] for f in formats: if f['url'] not in format_urls: if f.get('audio_ext') != 'none' and not f.get('acodec'): continue format_urls.add(f['url']) clean_formats.append(f) formats[:] = clean_formats def _real_extract(self, url): video_id, url = self._match_valid_url(url).group('id', 'base_url') webpage = self._download_webpage(url, video_id) title = self._html_search_meta( ['twitter:title', 'og:title'], webpage, fatal=True) player_data = re.findall( r"PlayerFactory\.setParam\('(?P<type>format|param)',\s*'(?P<name>[^']+)',\s*'(?P<val>[^']+)'\);", webpage) formats = [] duration = thumb = None for t, n, v in player_data: if t == 'format': if n in ('video-hds-vod-ec', 'video-hls-vod-ec', 'video-viralize', 'video-youtube-pfp'): continue elif n.endswith('-vod-ak'): formats.extend(self._extract_akamai_formats( v, video_id, {'http': 'media.gedidigital.it'})) else: ext = determine_ext(v) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( v, video_id, 'mp4', 'm3u8_native', m3u8_id=n, fatal=False)) continue f = { 'format_id': n, 'url': v, } if ext == 'mp3': abr = int_or_none(self._search_regex( r'-mp3-audio-(\d+)', v, 'abr', default=None)) f.update({ 'abr': abr, 'tbr': abr, 'acodec': ext, 'vcodec': 'none', }) else: mobj = re.match(r'^video-rrtv-(\d+)(?:-(\d+))?$', n) if mobj: f.update({ 'height': int(mobj.group(1)), 'vbr': int_or_none(mobj.group(2)), }) if not f.get('vbr'): f['vbr'] = int_or_none(self._search_regex( r'-video-rrtv-(\d+)', v, 'abr', default=None)) formats.append(f) elif t == 'param': if n in ['image_full', 'image']: thumb = v elif n == 'videoDuration': duration = int_or_none(v) self._clean_formats(formats) return { 'id': video_id, 'title': title, 'description': self._html_search_meta( ['twitter:description', 'og:description', 'description'], webpage), 'thumbnail': thumb or self._og_search_thumbnail(webpage), 'formats': formats, 'duration': duration, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rmcdecouverte.py
yt_dlp/extractor/rmcdecouverte.py
import urllib.parse from .brightcove import BrightcoveLegacyIE from .common import InfoExtractor from ..utils import smuggle_url class RMCDecouverteIE(InfoExtractor): _VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:[^?#]*_(?P<id>\d+)|mediaplayer-direct)/?(?:[#?]|$)' _TESTS = [{ 'url': 'https://rmcdecouverte.bfmtv.com/vestiges-de-guerre_22240/les-bunkers-secrets-domaha-beach_25303/', 'info_dict': { 'id': '6250879771001', 'ext': 'mp4', 'title': 'LES BUNKERS SECRETS D´OMAHA BEACH', 'uploader_id': '1969646226001', 'description': 'md5:aed573ca24abde62a148e0eba909657d', 'timestamp': 1619622984, 'upload_date': '20210428', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/', 'info_dict': { 'id': '5983675500001', 'ext': 'mp4', 'title': 'CORVETTE', 'description': 'md5:c1e8295521e45ffebf635d6a7658f506', 'uploader_id': '1969646226001', 'upload_date': '20181226', 'timestamp': 1545861635, }, 'params': { 'skip_download': True, }, 'skip': 'only available for a week', }, { 'url': 'https://rmcdecouverte.bfmtv.com/avions-furtifs-la-technologie-de-lextreme_10598', 'only_matching': True, }, { # The website accepts any URL as long as it has _\d+ at the end 'url': 'https://rmcdecouverte.bfmtv.com/any/thing/can/go/here/_10598', 'only_matching': True, }, { # live, geo restricted, bypassable 'url': 'https://rmcdecouverte.bfmtv.com/mediaplayer-direct/', 'only_matching': True, }] BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1969646226001/default_default/index.html?videoId=%s' def _real_extract(self, url): mobj = self._match_valid_url(url) display_id = mobj.group('id') or 'direct' webpage = self._download_webpage(url, display_id) brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if brightcove_legacy_url: brightcove_id = urllib.parse.parse_qs(urllib.parse.urlparse( brightcove_legacy_url).query)['@videoPlayer'][0] else: brightcove_id = self._search_regex( r'data-video-id=["\'](\d+)', webpage, 'brightcove id') return self.url_result( smuggle_url( self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': ['FR']}), 'BrightcoveNew', brightcove_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mirrorcouk.py
yt_dlp/extractor/mirrorcouk.py
from .common import InfoExtractor from ..utils import unescapeHTML class MirrorCoUKIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mirror\.co\.uk/[/+[\w-]+-(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.mirror.co.uk/tv/tv-news/love-island-fans-baffled-after-27163139', 'info_dict': { 'id': 'voyyS7SV', 'ext': 'mp4', 'title': 'Love Island: Gemma Owen enters the villa', 'description': 'Love Island: Michael Owen\'s daughter Gemma Owen enters the villa.', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/voyyS7SV/poster.jpg?width=720', 'display_id': '27163139', 'timestamp': 1654547895, 'duration': 57.0, 'upload_date': '20220606', }, }, { 'url': 'https://www.mirror.co.uk/3am/celebrity-news/michael-jacksons-son-blankets-new-25344890', 'info_dict': { 'id': 'jyXpdvxp', 'ext': 'mp4', 'title': 'Michael Jackson’s son Bigi calls for action on climate change', 'description': 'md5:d39ceaba2b7a615b4ca6557e7bc40222', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/jyXpdvxp/poster.jpg?width=720', 'display_id': '25344890', 'timestamp': 1635749907, 'duration': 56.0, 'upload_date': '20211101', }, }, { 'url': 'https://www.mirror.co.uk/sport/football/news/antonio-conte-next-tottenham-manager-25346042', 'info_dict': { 'id': 'q6FkKa4p', 'ext': 'mp4', 'title': 'Nuno sacked by Tottenham after fifth Premier League defeat of the season', 'description': 'Nuno Espirito Santo has been sacked as Tottenham boss after only four months in charge.', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/q6FkKa4p/poster.jpg?width=720', 'display_id': '25346042', 'timestamp': 1635763157, 'duration': 40.0, 'upload_date': '20211101', }, }, { 'url': 'https://www.mirror.co.uk/3am/celebrity-news/johnny-depp-splashes-50k-curry-27160737', 'info_dict': { 'id': 'IT0oa1nH', 'ext': 'mp4', 'title': 'Johnny Depp Leaves The Grand Hotel in Birmingham', 'description': 'Johnny Depp Leaves The Grand Hotel in Birmingham.', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/IT0oa1nH/poster.jpg?width=720', 'display_id': '27160737', 'timestamp': 1654524120, 'duration': 65.0, 'upload_date': '20220606', }, }, { 'url': 'https://www.mirror.co.uk/tv/tv-news/love-islands-liam-could-first-27162602', 'info_dict': { 'id': 'EaPr5Z2j', 'ext': 'mp4', 'title': 'Love Island: Davide reveals plot twist after receiving text', 'description': 'Love Island: Davide reveals plot twist after receiving text', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/EaPr5Z2j/poster.jpg?width=720', 'display_id': '27162602', 'timestamp': 1654552597, 'duration': 23.0, 'upload_date': '20220606', }, }, { 'url': 'https://www.mirror.co.uk/news/uk-news/william-kate-sent-message-george-27160572', 'info_dict': { 'id': 'ygtceXIu', 'ext': 'mp4', 'title': 'Prince William and Kate arrive in Wales with George and Charlotte', 'description': 'Prince William and Kate Middleton arrive in Wales with children Prince George and Princess Charlotte.', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/ygtceXIu/poster.jpg?width=720', 'display_id': '27160572', 'timestamp': 1654349678, 'duration': 106.0, 'upload_date': '20220604', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) data = self._search_json(r'div\s+class="json-placeholder"\s+data-json="', webpage, 'data', display_id, transform_source=unescapeHTML)['videoData'] return { '_type': 'url_transparent', 'url': f'jwplatform:{data["videoId"]}', 'ie_key': 'JWPlatform', 'display_id': display_id, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/onionstudios.py
yt_dlp/extractor/onionstudios.py
from .common import InfoExtractor from ..utils import js_to_json class OnionStudiosIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?onionstudios\.com/(?:video(?:s/[^/]+-|/)|embed\?.*\bid=)(?P<id>\d+)(?!-)' _EMBED_REGEX = [r'(?s)<(?:iframe|bulbs-video)[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?onionstudios\.com/(?:embed.+?|video/\d+\.json))\1'] _TESTS = [{ 'url': 'http://www.onionstudios.com/videos/hannibal-charges-forward-stops-for-a-cocktail-2937', 'md5': '5a118d466d62b5cd03647cf2c593977f', 'info_dict': { 'id': '3459881', 'ext': 'mp4', 'title': 'Hannibal charges forward, stops for a cocktail', 'description': 'md5:545299bda6abf87e5ec666548c6a9448', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'a.v. club', 'upload_date': '20150619', 'timestamp': 1434728546, }, }, { 'url': 'http://www.onionstudios.com/embed?id=2855&autoplay=true', 'only_matching': True, }, { 'url': 'http://www.onionstudios.com/video/6139.json', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://onionstudios.com/embed/dc94dc2899fe644c0e7241fa04c1b732.js', video_id) mcp_id = str(self._parse_json(self._search_regex( r'window\.mcpMapping\s*=\s*({.+?});', webpage, 'MCP Mapping'), video_id, js_to_json)[video_id]['mcp_id']) return self.url_result( 'http://kinja.com/ajax/inset/iframe?id=mcp-' + mcp_id, 'KinjaEmbed', mcp_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nts.py
yt_dlp/extractor/nts.py
from .common import InfoExtractor from ..utils import parse_iso8601, url_or_none from ..utils.traversal import traverse_obj class NTSLiveIE(InfoExtractor): IE_NAME = 'nts.live' _VALID_URL = r'https?://(?:www\.)?nts\.live/shows/[^/?#]+/episodes/(?P<id>[^/?#]+)' _TESTS = [ { # embedded soundcloud 'url': 'https://www.nts.live/shows/yu-su/episodes/yu-su-2nd-april-2024', 'md5': 'b5444c04888c869d68758982de1a27d8', 'info_dict': { 'id': '1791563518', 'ext': 'opus', 'uploader_id': '995579326', 'title': 'Pender Street Steppers & YU SU', 'timestamp': 1712073600, 'upload_date': '20240402', 'thumbnail': 'https://i1.sndcdn.com/artworks-qKcNO0z0AQGGbv9s-GljJCw-original.jpg', 'license': 'all-rights-reserved', 'repost_count': int, 'uploader_url': 'https://soundcloud.com/user-643553014', 'uploader': 'NTS Latest', 'description': 'md5:cd00ac535a63caaad722483ae3ff802a', 'duration': 10784.157, 'genres': ['Deep House', 'House', 'Leftfield Disco', 'Jazz Fusion', 'Dream Pop'], 'modified_timestamp': 1712564687, 'modified_date': '20240408', }, }, { # embedded mixcloud 'url': 'https://www.nts.live/shows/absolute-fiction/episodes/absolute-fiction-23rd-july-2022', 'info_dict': { 'id': 'NTSRadio_absolute-fiction-23rd-july-2022', 'ext': 'webm', 'like_count': int, 'title': 'Absolute Fiction', 'comment_count': int, 'uploader_url': 'https://www.mixcloud.com/NTSRadio/', 'description': 'md5:ba49da971ae8d71ee45813c52c5e2a04', 'tags': [], 'duration': 3529, 'timestamp': 1658588400, 'repost_count': int, 'upload_date': '20220723', 'uploader_id': 'NTSRadio', 'thumbnail': 'https://thumbnailer.mixcloud.com/unsafe/1024x1024/extaudio/5/1/a/d/ae3e-1be9-4fd4-983e-9c3294226eac', 'uploader': 'Mixcloud NTS Radio', 'genres': ['Minimal Synth', 'Post Punk', 'Industrial '], 'modified_timestamp': 1658842165, 'modified_date': '20220726', }, 'params': {'skip_download': 'm3u8'}, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._search_json(r'window\._REACT_STATE_\s*=', webpage, 'react state', video_id) return { '_type': 'url_transparent', **traverse_obj(data, ('episode', { 'url': ('audio_sources', ..., 'url', {url_or_none}, any), 'title': ('name', {str}), 'description': ('description', {str}), 'genres': ('genres', ..., 'value', {str}), 'timestamp': ('broadcast', {parse_iso8601}), 'modified_timestamp': ('updated', {parse_iso8601}), })), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/clipchamp.py
yt_dlp/extractor/clipchamp.py
from .common import InfoExtractor from ..utils import ( ExtractorError, traverse_obj, unified_timestamp, url_or_none, ) class ClipchampIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?clipchamp\.com/watch/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://clipchamp.com/watch/gRXZ4ZhdDaU', 'info_dict': { 'id': 'gRXZ4ZhdDaU', 'ext': 'mp4', 'title': 'Untitled video', 'uploader': 'Alexander Schwartz', 'timestamp': 1680805580, 'upload_date': '20230406', 'thumbnail': r're:^https?://.+\.jpg', }, 'params': {'skip_download': 'm3u8'}, }] _STREAM_URL_TMPL = 'https://%s.cloudflarestream.com/%s/manifest/video.%s' _STREAM_URL_QUERY = {'parentOrigin': 'https://clipchamp.com'} def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video'] storage_location = data.get('storage_location') if storage_location != 'cf_stream': raise ExtractorError(f'Unsupported clip storage location "{storage_location}"') path = data['download_url'] iframe = self._download_webpage( f'https://iframe.cloudflarestream.com/{path}', video_id, 'Downloading player iframe') subdomain = self._search_regex( r'\bcustomer-domain-prefix=["\']([\w-]+)["\']', iframe, 'subdomain', fatal=False) or 'customer-2ut9yn3y6fta1yxe' formats = self._extract_mpd_formats( self._STREAM_URL_TMPL % (subdomain, path, 'mpd'), video_id, query=self._STREAM_URL_QUERY, fatal=False, mpd_id='dash') formats.extend(self._extract_m3u8_formats( self._STREAM_URL_TMPL % (subdomain, path, 'm3u8'), video_id, 'mp4', query=self._STREAM_URL_QUERY, fatal=False, m3u8_id='hls')) return { 'id': video_id, 'formats': formats, 'uploader': ' '.join(traverse_obj(data, ('creator', ('first_name', 'last_name'), {str}))) or None, **traverse_obj(data, { 'title': ('project', 'project_name', {str}), 'timestamp': ('created_at', {unified_timestamp}), 'thumbnail': ('thumbnail_url', {url_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/dtube.py
yt_dlp/extractor/dtube.py
import json from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, ) class DTubeIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})' _TEST = { 'url': 'https://d.tube/#!/v/broncnutz/x380jtr1', 'md5': '9f29088fa08d699a7565ee983f56a06e', 'info_dict': { 'id': 'x380jtr1', 'ext': 'mp4', 'title': 'Lefty 3-Rings is Back Baby!! NCAA Picks', 'description': 'md5:60be222088183be3a42f196f34235776', 'uploader_id': 'broncnutz', 'upload_date': '20190107', 'timestamp': 1546854054, }, 'params': { 'format': '480p', }, } def _real_extract(self, url): uploader_id, video_id = self._match_valid_url(url).groups() result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({ 'jsonrpc': '2.0', 'method': 'get_content', 'params': [uploader_id, video_id], }).encode())['result'] metadata = json.loads(result['json_metadata']) video = metadata['video'] content = video['content'] info = video.get('info', {}) title = info.get('title') or result['title'] def canonical_url(h): if not h: return None return 'https://video.dtube.top/ipfs/' + h formats = [] for q in ('240', '480', '720', '1080', ''): video_url = canonical_url(content.get(f'video{q}hash')) if not video_url: continue format_id = (q + 'p') if q else 'Source' try: self.to_screen(f'{video_id}: Checking {format_id} video format URL') self._downloader._opener.open(video_url, timeout=5).close() except TimeoutError: self.to_screen( f'{video_id}: {format_id} URL is invalid, skipping') continue formats.append({ 'format_id': format_id, 'url': video_url, 'height': int_or_none(q), 'ext': 'mp4', }) return { 'id': video_id, 'title': title, 'description': content.get('description'), 'thumbnail': canonical_url(info.get('snaphash')), 'tags': content.get('tags') or metadata.get('tags'), 'duration': info.get('duration'), 'formats': formats, 'timestamp': parse_iso8601(result.get('created')), 'uploader_id': uploader_id, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/discogs.py
yt_dlp/extractor/discogs.py
from .common import InfoExtractor from .youtube import YoutubeIE from ..utils import traverse_obj class DiscogsReleasePlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?discogs\.com/(?P<type>release|master)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.discogs.com/release/1-The-Persuader-Stockholm', 'info_dict': { 'id': 'release1', 'title': 'Stockholm', }, 'playlist_mincount': 7, }, { 'url': 'https://www.discogs.com/master/113-Vince-Watson-Moments-In-Time', 'info_dict': { 'id': 'master113', 'title': 'Moments In Time', }, 'playlist_mincount': 53, }] def _real_extract(self, url): playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type') display_id = f'{playlist_type}{playlist_id}' response = self._download_json( f'https://api.discogs.com/{playlist_type}s/{playlist_id}', display_id) entries = [ self.url_result(video['uri'], YoutubeIE, video_title=video.get('title')) for video in traverse_obj(response, ('videos', lambda _, v: YoutubeIE.suitable(v['uri'])))] return self.playlist_result(entries, display_id, response.get('title'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/goplay.py
yt_dlp/extractor/goplay.py
import base64 import binascii import datetime as dt import hashlib import hmac import json import os import urllib.parse from .common import InfoExtractor from ..utils import ExtractorError, int_or_none from ..utils.traversal import get_first, traverse_obj class GoPlayIE(InfoExtractor): IE_NAME = 'play.tv' IE_DESC = 'PLAY (formerly goplay.be)' _VALID_URL = r'https?://(www\.)?play\.tv/video/([^/?#]+/[^/?#]+/|)(?P<id>[^/#]+)' _NETRC_MACHINE = 'goplay' _TESTS = [{ 'url': 'https://www.play.tv/video/de-slimste-mens-ter-wereld/de-slimste-mens-ter-wereld-s22/de-slimste-mens-ter-wereld-s22-aflevering-1', 'info_dict': { 'id': '2baa4560-87a0-421b-bffc-359914e3c387', 'ext': 'mp4', 'title': 'De Slimste Mens ter Wereld - S22 - Aflevering 1', 'description': r're:In aflevering 1 nemen Daan Alferink, Tess Elst en Xander De Rycke .{66}', 'series': 'De Slimste Mens ter Wereld', 'episode': 'Wordt aangekondigd', 'season_number': 22, 'episode_number': 1, 'season': 'Season 22', }, 'params': {'skip_download': True}, 'skip': 'This video is only available for registered users', }, { 'url': 'https://www.play.tv/video/1917', 'info_dict': { 'id': '40cac41d-8d29-4ef5-aa11-75047b9f0907', 'ext': 'mp4', 'title': '1917', 'description': r're:Op het hoogtepunt van de Eerste Wereldoorlog krijgen twee jonge .{94}', }, 'params': {'skip_download': True}, 'skip': 'This video is only available for registered users', }, { 'url': 'https://www.play.tv/video/de-mol/de-mol-s11/de-mol-s11-aflevering-1#autoplay', 'info_dict': { 'id': 'ecb79672-92b9-4cd9-a0d7-e2f0250681ee', 'ext': 'mp4', 'title': 'De Mol - S11 - Aflevering 1', 'description': r're:Tien kandidaten beginnen aan hun verovering van Amerika en ontmoeten .{102}', 'episode': 'Episode 1', 'series': 'De Mol', 'season_number': 11, 'episode_number': 1, 'season': 'Season 11', }, 'params': {'skip_download': True}, 'skip': 'This video is only available for registered users', }] _id_token = None def _perform_login(self, username, password): self.report_login() aws = AwsIdp(ie=self, pool_id='eu-west-1_dViSsKM5Y', client_id='6s1h851s8uplco5h6mqh1jac8m') self._id_token, _ = aws.authenticate(username=username, password=password) def _real_initialize(self): if not self._id_token: raise self.raise_login_required(method='password') def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) nextjs_data = self._search_nextjs_v13_data(webpage, display_id) meta = get_first(nextjs_data, ( lambda k, v: k in ('video', 'meta') and v['path'] == urllib.parse.urlparse(url).path)) video_id = meta['uuid'] info_dict = traverse_obj(meta, { 'title': ('title', {str}), 'description': ('description', {str.strip}), }) if traverse_obj(meta, ('program', 'subtype')) != 'movie': for season_data in traverse_obj(nextjs_data, (..., 'playlists', ..., {dict})): episode_data = traverse_obj(season_data, ('videos', lambda _, v: v['videoId'] == video_id, any)) if not episode_data: continue season_number = traverse_obj(season_data, ('season', {int_or_none})) info_dict.update({ 'episode': traverse_obj(episode_data, ('episodeTitle', {str})), 'episode_number': traverse_obj(episode_data, ('episodeNumber', {int_or_none})), 'season_number': season_number, 'series': self._search_regex( fr'^(.+)? - S{season_number} - ', info_dict.get('title'), 'series', default=None), }) break api = self._download_json( f'https://api.play.tv/web/v1/videos/long-form/{video_id}', video_id, headers={ 'Authorization': f'Bearer {self._id_token}', **self.geo_verification_headers(), }) if 'manifestUrls' in api: formats, subtitles = self._extract_m3u8_formats_and_subtitles( api['manifestUrls']['hls'], video_id, ext='mp4', m3u8_id='HLS') else: if 'ssai' not in api: raise ExtractorError('expecting Google SSAI stream') ssai_content_source_id = api['ssai']['contentSourceID'] ssai_video_id = api['ssai']['videoID'] dai = self._download_json( f'https://dai.google.com/ondemand/dash/content/{ssai_content_source_id}/vid/{ssai_video_id}/streams', video_id, data=b'{"api-key":"null"}', headers={'content-type': 'application/json'}) periods = self._extract_mpd_periods(dai['stream_manifest'], video_id) # skip pre-roll and mid-roll ads periods = [p for p in periods if '-ad-' not in p['id']] formats, subtitles = self._merge_mpd_periods(periods) info_dict.update({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, }) return info_dict # Taken from https://github.com/add-ons/plugin.video.viervijfzes/blob/master/resources/lib/viervijfzes/auth_awsidp.py # Released into Public domain by https://github.com/michaelarnauts class InvalidLoginException(ExtractorError): """ The login credentials are invalid """ class AuthenticationException(ExtractorError): """ Something went wrong while logging in """ class AwsIdp: """ AWS Identity Provider """ def __init__(self, ie, pool_id, client_id): """ :param InfoExtrator ie: The extractor that instantiated this class. :param str pool_id: The AWS user pool to connect to (format: <region>_<poolid>). E.g.: eu-west-1_aLkOfYN3T :param str client_id: The client application ID (the ID of the application connecting) """ self.ie = ie self.pool_id = pool_id if '_' not in self.pool_id: raise ValueError('Invalid pool_id format. Should be <region>_<poolid>.') self.client_id = client_id self.region = self.pool_id.split('_')[0] self.url = f'https://cognito-idp.{self.region}.amazonaws.com/' # Initialize the values # https://github.com/aws/amazon-cognito-identity-js/blob/master/src/AuthenticationHelper.js#L22 self.n_hex = ( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' '83655D23DCA3AD961C62F356208552BB9ED529077096966D' '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF') # https://github.com/aws/amazon-cognito-identity-js/blob/master/src/AuthenticationHelper.js#L49 self.g_hex = '2' self.info_bits = bytearray('Caldera Derived Key', 'utf-8') self.big_n = self.__hex_to_long(self.n_hex) self.g = self.__hex_to_long(self.g_hex) self.k = self.__hex_to_long(self.__hex_hash('00' + self.n_hex + '0' + self.g_hex)) self.small_a_value = self.__generate_random_small_a() self.large_a_value = self.__calculate_a() def authenticate(self, username, password): """ Authenticate with a username and password. """ # Step 1: First initiate an authentication request auth_data_dict = self.__get_authentication_request(username) auth_data = json.dumps(auth_data_dict).encode() auth_headers = { 'X-Amz-Target': 'AWSCognitoIdentityProviderService.InitiateAuth', 'Accept-Encoding': 'identity', 'Content-Type': 'application/x-amz-json-1.1', } auth_response_json = self.ie._download_json( self.url, None, data=auth_data, headers=auth_headers, note='Authenticating username', errnote='Invalid username') challenge_parameters = auth_response_json.get('ChallengeParameters') if auth_response_json.get('ChallengeName') != 'PASSWORD_VERIFIER': raise AuthenticationException(auth_response_json['message']) # Step 2: Respond to the Challenge with a valid ChallengeResponse challenge_request = self.__get_challenge_response_request(challenge_parameters, password) challenge_data = json.dumps(challenge_request).encode() challenge_headers = { 'X-Amz-Target': 'AWSCognitoIdentityProviderService.RespondToAuthChallenge', 'Content-Type': 'application/x-amz-json-1.1', } auth_response_json = self.ie._download_json( self.url, None, data=challenge_data, headers=challenge_headers, note='Authenticating password', errnote='Invalid password') if 'message' in auth_response_json: raise InvalidLoginException(auth_response_json['message']) return ( auth_response_json['AuthenticationResult']['IdToken'], auth_response_json['AuthenticationResult']['RefreshToken'], ) def __get_authentication_request(self, username): """ :param str username: The username to use :return: A full Authorization request. :rtype: dict """ return { 'AuthParameters': { 'USERNAME': username, 'SRP_A': self.__long_to_hex(self.large_a_value), }, 'AuthFlow': 'USER_SRP_AUTH', 'ClientId': self.client_id, } def __get_challenge_response_request(self, challenge_parameters, password): """ Create a Challenge Response Request object. :param dict[str,str|imt] challenge_parameters: The parameters for the challenge. :param str password: The password. :return: A valid and full request data object to use as a response for a challenge. :rtype: dict """ user_id = challenge_parameters['USERNAME'] user_id_for_srp = challenge_parameters['USER_ID_FOR_SRP'] srp_b = challenge_parameters['SRP_B'] salt = challenge_parameters['SALT'] secret_block = challenge_parameters['SECRET_BLOCK'] timestamp = self.__get_current_timestamp() # Get a HKDF key for the password, SrpB and the Salt hkdf = self.__get_hkdf_key_for_password( user_id_for_srp, password, self.__hex_to_long(srp_b), salt, ) secret_block_bytes = base64.standard_b64decode(secret_block) # the message is a combo of the pool_id, provided SRP userId, the Secret and Timestamp msg = \ bytearray(self.pool_id.split('_')[1], 'utf-8') + \ bytearray(user_id_for_srp, 'utf-8') + \ bytearray(secret_block_bytes) + \ bytearray(timestamp, 'utf-8') hmac_obj = hmac.new(hkdf, msg, digestmod=hashlib.sha256) signature_string = base64.standard_b64encode(hmac_obj.digest()).decode('utf-8') return { 'ChallengeResponses': { 'USERNAME': user_id, 'TIMESTAMP': timestamp, 'PASSWORD_CLAIM_SECRET_BLOCK': secret_block, 'PASSWORD_CLAIM_SIGNATURE': signature_string, }, 'ChallengeName': 'PASSWORD_VERIFIER', 'ClientId': self.client_id, } def __get_hkdf_key_for_password(self, username, password, server_b_value, salt): """ Calculates the final hkdf based on computed S value, and computed U value and the key. :param str username: Username. :param str password: Password. :param int server_b_value: Server B value. :param int salt: Generated salt. :return Computed HKDF value. :rtype: object """ u_value = self.__calculate_u(self.large_a_value, server_b_value) if u_value == 0: raise ValueError('U cannot be zero.') username_password = '{}{}:{}'.format(self.pool_id.split('_')[1], username, password) username_password_hash = self.__hash_sha256(username_password.encode()) x_value = self.__hex_to_long(self.__hex_hash(self.__pad_hex(salt) + username_password_hash)) g_mod_pow_xn = pow(self.g, x_value, self.big_n) int_value2 = server_b_value - self.k * g_mod_pow_xn s_value = pow(int_value2, self.small_a_value + u_value * x_value, self.big_n) return self.__compute_hkdf( bytearray.fromhex(self.__pad_hex(s_value)), bytearray.fromhex(self.__pad_hex(self.__long_to_hex(u_value))), ) def __compute_hkdf(self, ikm, salt): """ Standard hkdf algorithm :param {Buffer} ikm Input key material. :param {Buffer} salt Salt value. :return {Buffer} Strong key material. """ prk = hmac.new(salt, ikm, hashlib.sha256).digest() info_bits_update = self.info_bits + bytearray(chr(1), 'utf-8') hmac_hash = hmac.new(prk, info_bits_update, hashlib.sha256).digest() return hmac_hash[:16] def __calculate_u(self, big_a, big_b): """ Calculate the client's value U which is the hash of A and B :param int big_a: Large A value. :param int big_b: Server B value. :return Computed U value. :rtype: int """ u_hex_hash = self.__hex_hash(self.__pad_hex(big_a) + self.__pad_hex(big_b)) return self.__hex_to_long(u_hex_hash) def __generate_random_small_a(self): """ Helper function to generate a random big integer :return a random value. :rtype: int """ random_long_int = self.__get_random(128) return random_long_int % self.big_n def __calculate_a(self): """ Calculate the client's public value A = g^a%N with the generated random number a :return Computed large A. :rtype: int """ big_a = pow(self.g, self.small_a_value, self.big_n) # safety check if (big_a % self.big_n) == 0: raise ValueError('Safety check for A failed') return big_a @staticmethod def __long_to_hex(long_num): return f'{long_num:x}' @staticmethod def __hex_to_long(hex_string): return int(hex_string, 16) @staticmethod def __hex_hash(hex_string): return AwsIdp.__hash_sha256(bytearray.fromhex(hex_string)) @staticmethod def __hash_sha256(buf): """AuthenticationHelper.hash""" digest = hashlib.sha256(buf).hexdigest() return (64 - len(digest)) * '0' + digest @staticmethod def __pad_hex(long_int): """ Converts a Long integer (or hex string) to hex format padded with zeroes for hashing :param int|str long_int: Number or string to pad. :return Padded hex string. :rtype: str """ if not isinstance(long_int, str): hash_str = AwsIdp.__long_to_hex(long_int) else: hash_str = long_int if len(hash_str) % 2 == 1: hash_str = f'0{hash_str}' elif hash_str[0] in '89ABCDEFabcdef': hash_str = f'00{hash_str}' return hash_str @staticmethod def __get_random(nbytes): random_hex = binascii.hexlify(os.urandom(nbytes)) return AwsIdp.__hex_to_long(random_hex) @staticmethod def __get_current_timestamp(): """ Creates a timestamp with the correct English format. :return: timestamp in format 'Sun Jan 27 19:00:04 UTC 2019' :rtype: str """ # We need US only data, so we cannot just do a strftime: # Sun Jan 27 19:00:04 UTC 2019 months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] time_now = dt.datetime.now(dt.timezone.utc) format_string = f'{days[time_now.weekday()]} {months[time_now.month]} {time_now.day} %H:%M:%S UTC %Y' return time_now.strftime(format_string) def __str__(self): return 'AWS IDP Client for:\nRegion: {}\nPoolId: {}\nAppId: {}'.format( self.region, self.pool_id.split('_')[1], self.client_id, )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/kickstarter.py
yt_dlp/extractor/kickstarter.py
from .common import InfoExtractor from ..utils import smuggle_url class KickStarterIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?kickstarter\.com/projects/(?P<id>[^/]*)/.*' _TESTS = [{ 'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant/description', 'md5': 'c81addca81327ffa66c642b5d8b08cab', 'info_dict': { 'id': '1404461844', 'ext': 'mp4', 'title': 'Intersection: The Story of Josh Grant by Kyle Cowling', 'description': ( 'A unique motocross documentary that examines the ' 'life and mind of one of sports most elite athletes: Josh Grant.' ), }, }, { 'note': 'Embedded video (not using the native kickstarter video service)', 'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178', 'info_dict': { 'id': '78704821', 'ext': 'mp4', 'uploader_id': 'pebble', 'uploader': 'Pebble Technology', 'title': 'Pebble iOS Notifications', }, 'add_ie': ['Vimeo'], }, { 'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html', 'info_dict': { 'id': '1420158244', 'ext': 'mp4', 'title': 'Power Drive 2000', }, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<title>\s*(.*?)(?:\s*&mdash;\s*Kickstarter)?\s*</title>', webpage, 'title') video_url = self._search_regex( r'data-video-url="(.*?)"', webpage, 'video URL', default=None) if video_url is None: # No native kickstarter, look for embedded videos return { '_type': 'url_transparent', 'ie_key': 'Generic', 'url': smuggle_url(url, {'to_generic': True}), 'title': title, } thumbnail = self._og_search_thumbnail(webpage, default=None) if thumbnail is None: thumbnail = self._html_search_regex( r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"', webpage, 'thumbnail image', fatal=False) return { 'id': video_id, 'url': video_url, 'title': title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': thumbnail, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/digiteka.py
yt_dlp/extractor/digiteka.py
from .common import InfoExtractor from ..utils import int_or_none, url_or_none from ..utils.traversal import traverse_obj class DigitekaIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?(?:digiteka\.net|ultimedia\.com)/ (?: deliver/ (?P<embed_type> generic| musique ) (?:/[^/]+)*/ (?: src| article )| default/index/video (?P<site_type> generic| music ) /id )/(?P<id>[\d+a-z]+)''' _EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=["\'](?P<url>(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)'] _TESTS = [{ 'url': 'https://www.ultimedia.com/default/index/videogeneric/id/3x5x55k', 'info_dict': { 'id': '3x5x55k', 'ext': 'mp4', 'title': 'Il est passionné de DS', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 89, 'upload_date': '20251012', 'timestamp': 1760285363, 'uploader_id': '3pz33', }, 'params': {'skip_download': True}, }] _IFRAME_MD_ID = '01836272' # One static ID working for Ultimedia iframes def _real_extract(self, url): video_id = self._match_id(url) video_info = self._download_json( f'https://www.ultimedia.com/player/getConf/{self._IFRAME_MD_ID}/1/{video_id}', video_id, note='Downloading player configuration')['video'] formats = [] subtitles = {} if hls_url := traverse_obj(video_info, ('media_sources', 'hls', 'hls_auto', {url_or_none})): fmts, subs = self._extract_m3u8_formats_and_subtitles( hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) for format_id, mp4_url in traverse_obj(video_info, ('media_sources', 'mp4', {dict.items}, ...)): if not mp4_url: continue formats.append({ 'url': mp4_url, 'format_id': format_id, 'height': int_or_none(format_id.partition('_')[2]), 'ext': 'mp4', }) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(video_info, { 'title': ('title', {str}), 'thumbnail': ('image', {url_or_none}), 'duration': ('duration', {int_or_none}), 'timestamp': ('creationDate', {int_or_none}), 'uploader_id': ('ownerId', {str}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/hypem.py
yt_dlp/extractor/hypem.py
from .common import InfoExtractor from ..utils import int_or_none class HypemIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P<id>[0-9a-z]{5})' _TEST = { 'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME', 'md5': 'b9cc91b5af8995e9f0c1cee04c575828', 'info_dict': { 'id': '1v6ga', 'ext': 'mp3', 'title': 'Tame', 'uploader': 'BODYWORK', 'timestamp': 1371810457, 'upload_date': '20130621', }, } def _real_extract(self, url): track_id = self._match_id(url) response = self._download_webpage(url, track_id) track = self._parse_json(self._html_search_regex( r'(?s)<script\s+type="application/json"\s+id="displayList-data">(.+?)</script>', response, 'tracks'), track_id)['tracks'][0] track_id = track['id'] title = track['song'] final_url = self._download_json( 'http://hypem.com/serve/source/{}/{}'.format(track_id, track['key']), track_id, 'Downloading metadata', headers={ 'Content-Type': 'application/json', })['url'] return { 'id': track_id, 'url': final_url, 'ext': 'mp3', 'title': title, 'uploader': track.get('artist'), 'duration': int_or_none(track.get('time')), 'timestamp': int_or_none(track.get('ts')), 'track': title, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/unity.py
yt_dlp/extractor/unity.py
from .common import InfoExtractor from .youtube import YoutubeIE class UnityIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?unity3d\.com/learn/tutorials/(?:[^/]+/)*(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://unity3d.com/learn/tutorials/topics/animation/animate-anything-mecanim', 'info_dict': { 'id': 'jWuNtik0C8E', 'ext': 'mp4', 'title': 'Live Training 22nd September 2014 - Animate Anything', 'description': 'md5:e54913114bd45a554c56cdde7669636e', 'duration': 2893, 'uploader': 'Unity', 'uploader_id': 'Unity3D', 'upload_date': '20140926', }, }, { 'url': 'https://unity3d.com/learn/tutorials/projects/2d-ufo-tutorial/following-player-camera?playlist=25844', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) youtube_id = self._search_regex( r'data-video-id="([_0-9a-zA-Z-]+)"', webpage, 'youtube ID') return self.url_result(youtube_id, ie=YoutubeIE.ie_key(), video_id=video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/triller.py
yt_dlp/extractor/triller.py
import itertools import json import re from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, UnsupportedError, determine_ext, int_or_none, parse_resolution, str_or_none, traverse_obj, unified_timestamp, url_basename, url_or_none, urljoin, ) class TrillerBaseIE(InfoExtractor): _NETRC_MACHINE = 'triller' _API_BASE_URL = 'https://social.triller.co/v1.5' _API_HEADERS = {'Origin': 'https://triller.co'} def _perform_login(self, username, password): if self._API_HEADERS.get('Authorization'): return headers = {**self._API_HEADERS, 'Content-Type': 'application/json'} user_check = traverse_obj(self._download_json( f'{self._API_BASE_URL}/api/user/is-valid-username', None, note='Checking username', fatal=False, expected_status=400, headers=headers, data=json.dumps({'username': username}, separators=(',', ':')).encode()), 'status') if user_check: # endpoint returns `"status":false` if username exists raise ExtractorError('Unable to login: Invalid username', expected=True) login = self._download_json( f'{self._API_BASE_URL}/user/auth', None, note='Logging in', fatal=False, expected_status=400, headers=headers, data=json.dumps({ 'username': username, 'password': password, }, separators=(',', ':')).encode()) or {} if not login.get('auth_token'): if login.get('error') == 1008: raise ExtractorError('Unable to login: Incorrect password', expected=True) raise ExtractorError('Unable to login') self._API_HEADERS['Authorization'] = f'Bearer {login["auth_token"]}' def _get_comments(self, video_id, limit=15): comment_info = self._download_json( f'{self._API_BASE_URL}/api/videos/{video_id}/comments_v2', video_id, fatal=False, note='Downloading comments API JSON', headers=self._API_HEADERS, query={'limit': limit}) or {} if not comment_info.get('comments'): return yield from traverse_obj(comment_info, ('comments', ..., { 'id': ('id', {str_or_none}), 'text': 'body', 'author': ('author', 'username'), 'author_id': ('author', 'user_id'), 'timestamp': ('timestamp', {unified_timestamp}), })) def _parse_video_info(self, video_info, username, user_id, display_id=None): video_id = str(video_info['id']) display_id = display_id or video_info.get('video_uuid') if traverse_obj(video_info, ( None, ('transcoded_url', 'video_url', 'stream_url', 'audio_url'), {lambda x: re.search(r'/copyright/', x)}), get_all=False): self.raise_no_formats('This video has been removed due to licensing restrictions', expected=True) def format_info(url): return { 'url': url, 'ext': determine_ext(url), 'format_id': url_basename(url).split('.')[0], } formats = [] if determine_ext(video_info.get('transcoded_url')) == 'm3u8': formats.extend(self._extract_m3u8_formats( video_info['transcoded_url'], video_id, 'mp4', m3u8_id='hls', fatal=False)) for video in traverse_obj(video_info, ('video_set', lambda _, v: url_or_none(v['url']))): formats.append({ **format_info(video['url']), **parse_resolution(video.get('resolution')), 'vcodec': video.get('codec'), 'vbr': int_or_none(video.get('bitrate'), 1000), }) video_url = traverse_obj(video_info, 'video_url', 'stream_url', expected_type=url_or_none) if video_url: formats.append({ **format_info(video_url), 'vcodec': 'h264', **traverse_obj(video_info, { 'width': 'width', 'height': 'height', 'filesize': 'filesize', }, expected_type=int_or_none), }) audio_url = url_or_none(video_info.get('audio_url')) if audio_url: formats.append(format_info(audio_url)) comment_count = traverse_obj(video_info, ('comment_count', {int_or_none})) return { 'id': video_id, 'display_id': display_id, 'uploader': username, 'uploader_id': user_id or traverse_obj(video_info, ('user', 'user_id', {str_or_none})), 'webpage_url': urljoin(f'https://triller.co/@{username}/video/', display_id), 'uploader_url': f'https://triller.co/@{username}', 'extractor_key': TrillerIE.ie_key(), 'extractor': TrillerIE.IE_NAME, 'formats': formats, 'comment_count': comment_count, '__post_extractor': self.extract_comments(video_id, comment_count), **traverse_obj(video_info, { 'title': ('description', {lambda x: x.replace('\r\n', ' ')}), 'description': 'description', 'creator': ((('user'), ('users', lambda _, v: str(v['user_id']) == user_id)), 'name'), 'thumbnail': ('thumbnail_url', {url_or_none}), 'timestamp': ('timestamp', {unified_timestamp}), 'duration': ('duration', {int_or_none}), 'view_count': ('play_count', {int_or_none}), 'like_count': ('likes_count', {int_or_none}), 'artist': 'song_artist', 'track': 'song_title', }, get_all=False), } class TrillerIE(TrillerBaseIE): _VALID_URL = r'''(?x) https?://(?:www\.)?triller\.co/ @(?P<username>[\w.]+)/video/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}) ''' _TESTS = [{ 'url': 'https://triller.co/@theestallion/video/2358fcd7-3df2-4c77-84c8-1d091610a6cf', 'md5': '228662d783923b60d78395fedddc0a20', 'info_dict': { 'id': '71595734', 'ext': 'mp4', 'title': 'md5:9a2bf9435c5c4292678996a464669416', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', 'description': 'md5:9a2bf9435c5c4292678996a464669416', 'uploader': 'theestallion', 'uploader_id': '18992236', 'creator': 'Megan Thee Stallion', 'timestamp': 1660598222, 'upload_date': '20220815', 'duration': 47, 'view_count': int, 'like_count': int, 'artist': 'Megan Thee Stallion', 'track': 'Her', 'uploader_url': 'https://triller.co/@theestallion', 'comment_count': int, }, 'skip': 'This video has been removed due to licensing restrictions', }, { 'url': 'https://triller.co/@charlidamelio/video/46c6fcfa-aa9e-4503-a50c-68444f44cddc', 'md5': '874055f462af5b0699b9dbb527a505a0', 'info_dict': { 'id': '71621339', 'ext': 'mp4', 'title': 'md5:4c91ea82760fe0fffb71b8c3aa7295fc', 'display_id': '46c6fcfa-aa9e-4503-a50c-68444f44cddc', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', 'description': 'md5:4c91ea82760fe0fffb71b8c3aa7295fc', 'uploader': 'charlidamelio', 'uploader_id': '1875551', 'creator': 'charli damelio', 'timestamp': 1660773354, 'upload_date': '20220817', 'duration': 16, 'view_count': int, 'like_count': int, 'artist': 'Dixie', 'track': 'Someone to Blame', 'uploader_url': 'https://triller.co/@charlidamelio', 'comment_count': int, }, }, { 'url': 'https://triller.co/@theestallion/video/07f35f38-1f51-48e2-8c5f-f7a8e829988f', 'md5': 'af7b3553e4b8bfca507636471ee2eb41', 'info_dict': { 'id': '71837829', 'ext': 'mp4', 'title': 'UNGRATEFUL VIDEO OUT NOW 👏🏾👏🏾👏🏾 💙💙 link my bio #womeninhiphop', 'display_id': '07f35f38-1f51-48e2-8c5f-f7a8e829988f', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', 'description': 'UNGRATEFUL VIDEO OUT NOW 👏🏾👏🏾👏🏾 💙💙 link my bio\r\n #womeninhiphop', 'uploader': 'theestallion', 'uploader_id': '18992236', 'creator': 'Megan Thee Stallion', 'timestamp': 1662486178, 'upload_date': '20220906', 'duration': 30, 'view_count': int, 'like_count': int, 'artist': 'Unknown', 'track': 'Unknown', 'uploader_url': 'https://triller.co/@theestallion', 'comment_count': int, }, }] def _real_extract(self, url): username, display_id = self._match_valid_url(url).group('username', 'id') video_info = self._download_json( f'{self._API_BASE_URL}/api/videos/{display_id}', display_id, headers=self._API_HEADERS)['videos'][0] return self._parse_video_info(video_info, username, None, display_id) class TrillerUserIE(TrillerBaseIE): _VALID_URL = r'https?://(?:www\.)?triller\.co/@(?P<id>[\w.]+)/?(?:$|[#?])' _TESTS = [{ 'url': 'https://triller.co/@theestallion', 'playlist_mincount': 12, 'info_dict': { 'id': '18992236', 'title': 'theestallion', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', }, }, { 'url': 'https://triller.co/@charlidamelio', 'playlist_mincount': 150, 'info_dict': { 'id': '1875551', 'title': 'charlidamelio', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', }, }] def _real_initialize(self): if not self._API_HEADERS.get('Authorization'): guest = self._download_json( f'{self._API_BASE_URL}/user/create_guest', None, note='Creating guest session', data=b'', headers=self._API_HEADERS, query={ 'platform': 'Web', 'app_version': '', }) if not guest.get('auth_token'): raise ExtractorError('Unable to fetch required auth token for user extraction') self._API_HEADERS['Authorization'] = f'Bearer {guest["auth_token"]}' def _entries(self, username, user_id, limit=6): query = {'limit': limit} for page in itertools.count(1): videos = self._download_json( f'{self._API_BASE_URL}/api/users/{user_id}/videos', username, note=f'Downloading user video list page {page}', headers=self._API_HEADERS, query=query) for video in traverse_obj(videos, ('videos', ...)): yield self._parse_video_info(video, username, user_id) query['before_time'] = traverse_obj(videos, ('videos', -1, 'timestamp')) if not query['before_time']: break def _real_extract(self, url): username = self._match_id(url) user_info = traverse_obj(self._download_json( f'{self._API_BASE_URL}/api/users/by_username/{username}', username, note='Downloading user info', headers=self._API_HEADERS), ('user', {dict})) or {} if user_info.get('private') and user_info.get('followed_by_me') not in (True, 'true'): raise ExtractorError('This user profile is private', expected=True) elif traverse_obj(user_info, (('blocked_by_user', 'blocking_user'), {bool}), get_all=False): raise ExtractorError('The author of the video is blocked', expected=True) user_id = str_or_none(user_info.get('user_id')) if not user_id: raise ExtractorError('Unable to extract user ID') return self.playlist_result( self._entries(username, user_id), user_id, username, thumbnail=user_info.get('avatar_url')) class TrillerShortIE(InfoExtractor): _VALID_URL = r'https?://v\.triller\.co/(?P<id>\w+)' _TESTS = [{ 'url': 'https://v.triller.co/WWZNWk', 'md5': '5eb8dc2c971bd8cd794ec9e8d5e9d101', 'info_dict': { 'id': '66210052', 'ext': 'mp4', 'title': 'md5:2dfc89d154cd91a4a18cd9582ba03e16', 'display_id': 'f4480e1f-fb4e-45b9-a44c-9e6c679ce7eb', 'thumbnail': r're:^https://uploads\.cdn\.triller\.co/.+\.jpg$', 'description': 'md5:2dfc89d154cd91a4a18cd9582ba03e16', 'uploader': 'statefairent', 'uploader_id': '487545193', 'creator': 'Official Summer Fair of LA', 'timestamp': 1629655457, 'upload_date': '20210822', 'duration': 19, 'view_count': int, 'like_count': int, 'artist': 'Unknown', 'track': 'Unknown', 'uploader_url': 'https://triller.co/@statefairent', 'comment_count': int, }, }] def _real_extract(self, url): real_url = self._request_webpage(HEADRequest(url), self._match_id(url)).url if self.suitable(real_url): # Prevent infinite loop in case redirect fails raise UnsupportedError(real_url) return self.url_result(real_url)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/playplustv.py
yt_dlp/extractor/playplustv.py
import json from .common import InfoExtractor from ..networking import PUTRequest from ..networking.exceptions import HTTPError from ..utils import ExtractorError, clean_html, int_or_none class PlayPlusTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?playplus\.(?:com|tv)/VOD/(?P<project_id>[0-9]+)/(?P<id>[0-9a-f]{32})' _TEST = { 'url': 'https://www.playplus.tv/VOD/7572/db8d274a5163424e967f35a30ddafb8e', 'md5': 'd078cb89d7ab6b9df37ce23c647aef72', 'info_dict': { 'id': 'db8d274a5163424e967f35a30ddafb8e', 'ext': 'mp4', 'title': 'Capítulo 179 - Final', 'description': 'md5:01085d62d8033a1e34121d3c3cabc838', 'timestamp': 1529992740, 'upload_date': '20180626', }, 'skip': 'Requires account credential', } _NETRC_MACHINE = 'playplustv' _GEO_COUNTRIES = ['BR'] _token = None _profile_id = None def _call_api(self, resource, video_id=None, query=None): return self._download_json('https://api.playplus.tv/api/media/v2/get' + resource, video_id, headers={ 'Authorization': 'Bearer ' + self._token, }, query=query) def _perform_login(self, username, password): req = PUTRequest( 'https://api.playplus.tv/api/web/login', json.dumps({ 'email': username, 'password': password, }).encode(), { 'Content-Type': 'application/json; charset=utf-8', }) try: self._token = self._download_json(req, None)['token'] except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 401: raise ExtractorError(self._parse_json( e.cause.response.read(), None)['errorMessage'], expected=True) raise self._profile = self._call_api('Profiles')['list'][0]['_id'] def _real_initialize(self): if not self._token: self.raise_login_required(method='password') def _real_extract(self, url): project_id, media_id = self._match_valid_url(url).groups() media = self._call_api( 'Media', media_id, { 'profileId': self._profile, 'projectId': project_id, 'mediaId': media_id, })['obj'] title = media['title'] formats = [] for f in media.get('files', []): f_url = f.get('url') if not f_url: continue file_info = f.get('fileInfo') or {} formats.append({ 'url': f_url, 'width': int_or_none(file_info.get('width')), 'height': int_or_none(file_info.get('height')), }) thumbnails = [] for thumb in media.get('thumbs', []): thumb_url = thumb.get('url') if not thumb_url: continue thumbnails.append({ 'url': thumb_url, 'width': int_or_none(thumb.get('width')), 'height': int_or_none(thumb.get('height')), }) return { 'id': media_id, 'title': title, 'formats': formats, 'thumbnails': thumbnails, 'description': clean_html(media.get('description')) or media.get('shortDescription'), 'timestamp': int_or_none(media.get('publishDate'), 1000), 'view_count': int_or_none(media.get('numberOfViews')), 'comment_count': int_or_none(media.get('numberOfComments')), 'tags': media.get('tags'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/jstream.py
yt_dlp/extractor/jstream.py
import base64 import json import re from .common import InfoExtractor from ..utils import ( float_or_none, js_to_json, remove_start, ) class JStreamIE(InfoExtractor): # group "id" only exists for compliance, not directly used in requests # also all components are mandatory _VALID_URL = r'jstream:(?P<host>www\d+):(?P<id>(?P<publisher>[a-z0-9]+):(?P<mid>\d+))' _TESTS = [{ 'url': 'jstream:www50:eqd638pvwx:752', 'info_dict': { 'id': 'eqd638pvwx:752', 'ext': 'mp4', 'title': '阪神淡路大震災 激震の記録2020年版 解説動画', 'duration': 672, 'thumbnail': r're:https?://eqd638pvwx\.eq\.webcdn\.stream\.ne\.jp/.+\.jpg', }, }] def _parse_jsonp(self, callback, string, video_id): return self._search_json(rf'\s*{re.escape(callback)}\s*\(', string, callback, video_id) def _find_formats(self, video_id, movie_list_hls, host, publisher, subtitles): for value in movie_list_hls: text = value.get('text') or '' if not text.startswith('auto'): continue m3u8_id = remove_start(remove_start(text, 'auto'), '_') or None fmts, subs = self._extract_m3u8_formats_and_subtitles( f'https://{publisher}.eq.webcdn.stream.ne.jp/{host}/{publisher}/jmc_pub/{value.get("url")}', video_id, 'mp4', m3u8_id=m3u8_id) self._merge_subtitles(subs, target=subtitles) yield from fmts def _real_extract(self, url): host, publisher, mid, video_id = self._match_valid_url(url).group('host', 'publisher', 'mid', 'id') video_info_jsonp = self._download_webpage( f'https://{publisher}.eq.webcdn.stream.ne.jp/{host}/{publisher}/jmc_pub/eq_meta/v1/{mid}.jsonp', video_id, 'Requesting video info') video_info = self._parse_jsonp('metaDataResult', video_info_jsonp, video_id)['movie'] subtitles = {} formats = list(self._find_formats(video_id, video_info.get('movie_list_hls'), host, publisher, subtitles)) self._remove_duplicate_formats(formats) return { 'id': video_id, 'title': video_info.get('title'), 'duration': float_or_none(video_info.get('duration')), 'thumbnail': video_info.get('thumbnail_url'), 'formats': formats, 'subtitles': subtitles, } @classmethod def _extract_embed_urls(cls, url, webpage): # check for eligiblity of webpage # https://support.eq.stream.co.jp/hc/ja/articles/115008388147-%E3%83%97%E3%83%AC%E3%82%A4%E3%83%A4%E3%83%BCAPI%E3%81%AE%E3%82%B5%E3%83%B3%E3%83%97%E3%83%AB%E3%82%B3%E3%83%BC%E3%83%89 script_tag = re.search(r'<script\s*[^>]+?src="https://ssl-cache\.stream\.ne\.jp/(?P<host>www\d+)/(?P<publisher>[a-z0-9]+)/[^"]+?/if\.js"', webpage) if not script_tag: return host, publisher = script_tag.groups() for m in re.finditer(r'(?s)PlayerFactoryIF\.create\(\s*({[^\}]+?})\s*\)\s*;', webpage): # TODO: using json.loads here as InfoExtractor._parse_json is not classmethod info = json.loads(js_to_json(m.group(1))) mid = base64.b64decode(info.get('m')).decode() yield f'jstream:{host}:{publisher}:{mid}'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gamejolt.py
yt_dlp/extractor/gamejolt.py
import itertools import json import math import urllib.parse from .common import InfoExtractor from ..utils import ( determine_ext, format_field, int_or_none, str_or_none, traverse_obj, try_get, ) class GameJoltBaseIE(InfoExtractor): _API_BASE = 'https://gamejolt.com/site-api/' def _call_api(self, endpoint, *args, **kwargs): kwargs.setdefault('headers', {}).update({'Accept': 'image/webp,*/*'}) return self._download_json(self._API_BASE + endpoint, *args, **kwargs)['payload'] def _parse_content_as_text(self, content): outer_contents, joined_contents = content.get('content') or [], [] for outer_content in outer_contents: if outer_content.get('type') != 'paragraph': joined_contents.append(self._parse_content_as_text(outer_content)) continue inner_contents, inner_content_text = outer_content.get('content') or [], '' for inner_content in inner_contents: if inner_content.get('text'): inner_content_text += inner_content['text'] elif inner_content.get('type') == 'hardBreak': inner_content_text += '\n' joined_contents.append(inner_content_text) return '\n'.join(joined_contents) def _get_comments(self, post_num_id, post_hash_id): sort_by, scroll_id = self._configuration_arg('comment_sort', ['hot'], ie_key=GameJoltIE.ie_key())[0], -1 is_scrolled = sort_by in ('new', 'you') for page in itertools.count(1): comments_data = self._call_api( 'comments/Fireside_Post/%s/%s?%s=%d' % ( post_num_id, sort_by, 'scroll_id' if is_scrolled else 'page', scroll_id if is_scrolled else page), post_hash_id, note=f'Downloading comments list page {page}') if not comments_data.get('comments'): break for comment in traverse_obj(comments_data, (('comments', 'childComments'), ...), expected_type=dict): yield { 'id': comment['id'], 'text': self._parse_content_as_text( self._parse_json(comment['comment_content'], post_hash_id)), 'timestamp': int_or_none(comment.get('posted_on'), scale=1000), 'like_count': comment.get('votes'), 'author': traverse_obj(comment, ('user', ('display_name', 'name')), expected_type=str_or_none, get_all=False), 'author_id': traverse_obj(comment, ('user', 'username'), expected_type=str_or_none), 'author_thumbnail': traverse_obj(comment, ('user', 'image_avatar'), expected_type=str_or_none), 'parent': comment.get('parent_id') or None, } scroll_id = int_or_none(comments_data['comments'][-1].get('posted_on')) def _parse_post(self, post_data): post_id = post_data['hash'] lead_content = self._parse_json(post_data.get('lead_content') or '{}', post_id, fatal=False) or {} description, full_description = post_data.get('leadStr') or self._parse_content_as_text( self._parse_json(post_data.get('lead_content'), post_id)), None if post_data.get('has_article'): article_content = self._parse_json( post_data.get('article_content') or self._call_api(f'web/posts/article/{post_data.get("id", post_id)}', post_id, note='Downloading article metadata', errnote='Unable to download article metadata', fatal=False).get('article'), post_id, fatal=False) full_description = self._parse_content_as_text(article_content) user_data = post_data.get('user') or {} info_dict = { 'extractor_key': GameJoltIE.ie_key(), 'extractor': 'GameJolt', 'webpage_url': str_or_none(post_data.get('url')) or f'https://gamejolt.com/p/{post_id}', 'id': post_id, 'title': description, 'description': full_description or description, 'display_id': post_data.get('slug'), 'uploader': user_data.get('display_name') or user_data.get('name'), 'uploader_id': user_data.get('username'), 'uploader_url': format_field(user_data, 'url', 'https://gamejolt.com%s'), 'categories': [try_get(category, lambda x: '{} - {}'.format(x['community']['name'], x['channel'].get('display_title') or x['channel']['title'])) for category in post_data.get('communities') or []], 'tags': traverse_obj( lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none), 'like_count': int_or_none(post_data.get('like_count')), 'comment_count': int_or_none(post_data.get('comment_count'), default=0), 'timestamp': int_or_none(post_data.get('added_on'), scale=1000), 'release_timestamp': int_or_none(post_data.get('published_on'), scale=1000), '__post_extractor': self.extract_comments(post_data.get('id'), post_id), } # TODO: Handle multiple videos/embeds? video_data = traverse_obj(post_data, ('videos', ...), expected_type=dict, get_all=False) or {} formats, subtitles, thumbnails = [], {}, [] for media in video_data.get('media') or []: media_url, mimetype, ext, media_id = media['img_url'], media.get('filetype', ''), determine_ext(media['img_url']), media.get('type') if mimetype == 'application/vnd.apple.mpegurl' or ext == 'm3u8': hls_formats, hls_subs = self._extract_m3u8_formats_and_subtitles(media_url, post_id, 'mp4', m3u8_id=media_id) formats.extend(hls_formats) subtitles.update(hls_subs) elif mimetype == 'application/dash+xml' or ext == 'mpd': dash_formats, dash_subs = self._extract_mpd_formats_and_subtitles(media_url, post_id, mpd_id=media_id) formats.extend(dash_formats) subtitles.update(dash_subs) elif 'image' in mimetype: thumbnails.append({ 'id': media_id, 'url': media_url, 'width': media.get('width'), 'height': media.get('height'), 'filesize': media.get('filesize'), }) else: formats.append({ 'format_id': media_id, 'url': media_url, 'width': media.get('width'), 'height': media.get('height'), 'filesize': media.get('filesize'), 'acodec': 'none' if 'video-card' in media_url else None, }) if formats: return { **info_dict, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'view_count': int_or_none(video_data.get('view_count')), } gif_entries = [] for media in post_data.get('media', []): if determine_ext(media['img_url']) != 'gif' or 'gif' not in media.get('filetype', ''): continue gif_entries.append({ 'id': media['hash'], 'title': media['filename'].split('.')[0], 'formats': [{ 'format_id': url_key, 'url': media[url_key], 'width': media.get('width') if url_key == 'img_url' else None, 'height': media.get('height') if url_key == 'img_url' else None, 'filesize': media.get('filesize') if url_key == 'img_url' else None, 'acodec': 'none', } for url_key in ('img_url', 'mediaserver_url', 'mediaserver_url_mp4', 'mediaserver_url_webm') if media.get(url_key)], }) if gif_entries: return { '_type': 'playlist', **info_dict, 'entries': gif_entries, } embed_url = traverse_obj(post_data, ('embeds', ..., 'url'), expected_type=str_or_none, get_all=False) if embed_url: return self.url_result(embed_url) return info_dict class GameJoltIE(GameJoltBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/p/(?:[\w-]*-)?(?P<id>\w{8})' _TESTS = [{ # No audio 'url': 'https://gamejolt.com/p/introducing-ramses-jackson-some-fnf-himbo-i-ve-been-animating-fo-c6achnzu', 'md5': 'cd5f733258f6678b0ce500dd88166d86', 'info_dict': { 'id': 'c6achnzu', 'ext': 'mp4', 'display_id': 'introducing-ramses-jackson-some-fnf-himbo-i-ve-been-animating-fo-c6achnzu', 'title': 'Introducing Ramses Jackson, some FNF himbo I’ve been animating for the past few days, hehe.\n#fnfmod #fridaynightfunkin', 'description': 'Introducing Ramses Jackson, some FNF himbo I’ve been animating for the past few days, hehe.\n#fnfmod #fridaynightfunkin', 'uploader': 'Jakeneutron', 'uploader_id': 'Jakeneutron', 'uploader_url': 'https://gamejolt.com/@Jakeneutron', 'categories': ['Friday Night Funkin\' - Videos'], 'tags': ['fnfmod', 'fridaynightfunkin'], 'timestamp': 1633499590, 'upload_date': '20211006', 'release_timestamp': 1633499655, 'release_date': '20211006', 'thumbnail': 're:^https?://.+wgch9mhq.png$', 'like_count': int, 'comment_count': int, 'view_count': int, }, }, { # YouTube embed 'url': 'https://gamejolt.com/p/hey-hey-if-there-s-anyone-who-s-looking-to-get-into-learning-a-n6g4jzpq', 'md5': '79a931ff500a5c783ef6c3bda3272e32', 'info_dict': { 'id': 'XsNA_mzC0q4', 'title': 'Adobe Animate CC 2021 Tutorial || Part 1 - The Basics', 'description': 'md5:9d1ab9e2625b3fe1f42b2a44c67fdd13', 'uploader': 'Jakeneutron', 'uploader_id': 'Jakeneutron', 'uploader_url': 'http://www.youtube.com/user/Jakeneutron', 'ext': 'mp4', 'duration': 1749, 'tags': ['Adobe Animate CC', 'Tutorial', 'Animation', 'The Basics', 'For Beginners'], 'like_count': int, 'playable_in_embed': True, 'categories': ['Education'], 'availability': 'public', 'thumbnail': 'https://i.ytimg.com/vi_webp/XsNA_mzC0q4/maxresdefault.webp', 'age_limit': 0, 'live_status': 'not_live', 'channel_url': 'https://www.youtube.com/channel/UC6_L7fnczNalFZyBthUE9oA', 'channel': 'Jakeneutron', 'channel_id': 'UC6_L7fnczNalFZyBthUE9oA', 'upload_date': '20211015', 'view_count': int, 'chapters': 'count:18', }, }, { # Article 'url': 'https://gamejolt.com/p/i-fuckin-broke-chaos-d56h3eue', 'md5': '786c1ccf98fde02c03a2768acb4258d0', 'info_dict': { 'id': 'd56h3eue', 'ext': 'mp4', 'display_id': 'i-fuckin-broke-chaos-d56h3eue', 'title': 'I fuckin broke Chaos.', 'description': 'I moved my tab durning the cutscene so now it\'s stuck like this.', 'uploader': 'Jeff____________', 'uploader_id': 'The_Nyesh_Man', 'uploader_url': 'https://gamejolt.com/@The_Nyesh_Man', 'categories': ['Friday Night Funkin\' - Videos'], 'timestamp': 1639800264, 'upload_date': '20211218', 'release_timestamp': 1639800330, 'release_date': '20211218', 'thumbnail': 're:^https?://.+euksy8bd.png$', 'like_count': int, 'comment_count': int, 'view_count': int, }, }, { # Single GIF 'url': 'https://gamejolt.com/p/hello-everyone-i-m-developing-a-pixel-art-style-mod-for-fnf-and-i-vs4gdrd8', 'info_dict': { 'id': 'vs4gdrd8', 'display_id': 'hello-everyone-i-m-developing-a-pixel-art-style-mod-for-fnf-and-i-vs4gdrd8', 'title': 'md5:cc3d8b031d9bc7ec2ec5a9ffc707e1f9', 'description': 'md5:cc3d8b031d9bc7ec2ec5a9ffc707e1f9', 'uploader': 'Quesoguy', 'uploader_id': 'CheeseguyDev', 'uploader_url': 'https://gamejolt.com/@CheeseguyDev', 'categories': ['Game Dev - General', 'Arts n\' Crafts - Creations', 'Pixel Art - showcase', 'Friday Night Funkin\' - Mods', 'Newgrounds - Friday Night Funkin (13+)'], 'timestamp': 1639517122, 'release_timestamp': 1639519966, 'like_count': int, 'comment_count': int, }, 'playlist': [{ 'info_dict': { 'id': 'dszyjnwi', 'ext': 'webm', 'title': 'gif-presentacion-mejorado-dszyjnwi', }, }], 'playlist_count': 1, }, { # Multiple GIFs 'url': 'https://gamejolt.com/p/gif-yhsqkumq', 'playlist_count': 35, 'info_dict': { 'id': 'yhsqkumq', 'display_id': 'gif-yhsqkumq', 'title': 'GIF', 'description': 'GIF', 'uploader': 'DaniilTvman', 'uploader_id': 'DaniilTvman', 'uploader_url': 'https://gamejolt.com/@DaniilTvman', 'categories': ['Five Nights At The AGK Studio Comunity - NEWS game'], 'timestamp': 1638721559, 'release_timestamp': 1638722276, 'like_count': int, 'comment_count': int, }, }] def _real_extract(self, url): post_id = self._match_id(url) post_data = self._call_api( f'web/posts/view/{post_id}', post_id)['post'] return self._parse_post(post_data) class GameJoltPostListBaseIE(GameJoltBaseIE): def _entries(self, endpoint, list_id, note='Downloading post list', errnote='Unable to download post list', initial_items=[]): page_num, scroll_id = 1, None items = initial_items or self._call_api(endpoint, list_id, note=note, errnote=errnote)['items'] while items: for item in items: yield self._parse_post(item['action_resource_model']) scroll_id = items[-1]['scroll_id'] page_num += 1 items = self._call_api( endpoint, list_id, note=f'{note} page {page_num}', errnote=errnote, data=json.dumps({ 'scrollDirection': 'from', 'scrollId': scroll_id, }).encode()).get('items') class GameJoltUserIE(GameJoltPostListBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/@(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://gamejolt.com/@BlazikenSuperStar', 'playlist_mincount': 1, 'info_dict': { 'id': '6116784', 'title': 'S. Blaze', 'description': 'md5:5ba7fbbb549e8ea2545aafbfe22eb03a', }, 'params': { 'ignore_no_formats_error': True, }, 'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'], }] def _real_extract(self, url): user_id = self._match_id(url) user_data = self._call_api( f'web/profile/@{user_id}', user_id, note='Downloading user info', errnote='Unable to download user info')['user'] bio = self._parse_content_as_text( self._parse_json(user_data.get('bio_content', '{}'), user_id, fatal=False) or {}) return self.playlist_result( self._entries(f'web/posts/fetch/user/@{user_id}?tab=active', user_id, 'Downloading user posts', 'Unable to download user posts'), str_or_none(user_data.get('id')), user_data.get('display_name') or user_data.get('name'), bio) class GameJoltGameIE(GameJoltPostListBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/games/[\w-]+/(?P<id>\d+)' _TESTS = [{ 'url': 'https://gamejolt.com/games/Friday4Fun/655124', 'playlist_mincount': 2, 'info_dict': { 'id': '655124', 'title': 'Friday Night Funkin\': Friday 4 Fun', 'description': 'md5:576a7dd87912a2dcf33c50d2bd3966d3', }, 'params': { 'ignore_no_formats_error': True, }, 'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'], }] def _real_extract(self, url): game_id = self._match_id(url) game_data = self._call_api( f'web/discover/games/{game_id}', game_id, note='Downloading game info', errnote='Unable to download game info')['game'] description = self._parse_content_as_text( self._parse_json(game_data.get('description_content', '{}'), game_id, fatal=False) or {}) return self.playlist_result( self._entries(f'web/posts/fetch/game/{game_id}', game_id, 'Downloading game posts', 'Unable to download game posts'), game_id, game_data.get('title'), description) class GameJoltGameSoundtrackIE(GameJoltBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/get/soundtrack(?:\?|\#!?)(?:.*?[&;])??game=(?P<id>(?:\d+)+)' _TESTS = [{ 'url': 'https://gamejolt.com/get/soundtrack?foo=bar&game=657899', 'info_dict': { 'id': '657899', 'title': 'Friday Night Funkin\': Vs Oswald', }, 'playlist': [{ 'info_dict': { 'id': '184434', 'ext': 'mp3', 'title': 'Gettin\' Lucky (Menu Music)', 'url': r're:^https://.+vs-oswald-menu-music\.mp3$', 'release_timestamp': 1635190816, 'release_date': '20211025', }, }, { 'info_dict': { 'id': '184435', 'ext': 'mp3', 'title': 'Rabbit\'s Luck (Extended Version)', 'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$', 'release_timestamp': 1635190841, 'release_date': '20211025', }, }, { 'info_dict': { 'id': '185228', 'ext': 'mp3', 'title': 'Last Straw', 'url': r're:^https://.+last-straw\.mp3$', 'release_timestamp': 1635881104, 'release_date': '20211102', }, }], 'playlist_count': 3, }] def _real_extract(self, url): game_id = self._match_id(url) game_overview = self._call_api( f'web/discover/games/overview/{game_id}', game_id, note='Downloading soundtrack info', errnote='Unable to download soundtrack info') return self.playlist_result([{ 'id': str_or_none(song.get('id')), 'title': str_or_none(song.get('title')), 'url': str_or_none(song.get('url')), 'release_timestamp': int_or_none(song.get('posted_on'), scale=1000), } for song in game_overview.get('songs') or []], game_id, traverse_obj( game_overview, ('microdata', 'name'), (('twitter', 'fb'), 'title'), expected_type=str_or_none, get_all=False)) class GameJoltCommunityIE(GameJoltPostListBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/c/(?P<id>(?P<community>[\w-]+)(?:/(?P<channel>[\w-]+))?)(?:(?:\?|\#!?)(?:.*?[&;])??sort=(?P<sort>\w+))?' _TESTS = [{ 'url': 'https://gamejolt.com/c/fnf/videos', 'playlist_mincount': 50, 'info_dict': { 'id': 'fnf/videos', 'title': 'Friday Night Funkin\' - Videos', 'description': 'md5:6d8c06f27460f7d35c1554757ffe53c8', }, 'params': { 'playlistend': 50, 'ignore_no_formats_error': True, }, 'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'], }, { 'url': 'https://gamejolt.com/c/youtubers', 'playlist_mincount': 50, 'info_dict': { 'id': 'youtubers/featured', 'title': 'Youtubers - featured', 'description': 'md5:53e5582c93dcc467ab597bfca4db17d4', }, 'params': { 'playlistend': 50, 'ignore_no_formats_error': True, }, 'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'], }] def _real_extract(self, url): display_id, community_id, channel_id, sort_by = self._match_valid_url(url).group('id', 'community', 'channel', 'sort') channel_id, sort_by = channel_id or 'featured', sort_by or 'new' community_data = self._call_api( f'web/communities/view/{community_id}', display_id, note='Downloading community info', errnote='Unable to download community info')['community'] channel_data = traverse_obj(self._call_api( f'web/communities/view-channel/{community_id}/{channel_id}', display_id, note='Downloading channel info', errnote='Unable to download channel info', fatal=False), 'channel') or {} title = f'{community_data.get("name") or community_id} - {channel_data.get("display_title") or channel_id}' description = self._parse_content_as_text( self._parse_json(community_data.get('description_content') or '{}', display_id, fatal=False) or {}) return self.playlist_result( self._entries( f'web/posts/fetch/community/{community_id}?channels[]={sort_by}&channels[]={channel_id}', display_id, 'Downloading community posts', 'Unable to download community posts'), f'{community_id}/{channel_id}', title, description) class GameJoltSearchIE(GameJoltPostListBaseIE): _VALID_URL = r'https?://(?:www\.)?gamejolt\.com/search(?:/(?P<filter>communities|users|games))?(?:\?|\#!?)(?:.*?[&;])??q=(?P<id>(?:[^&#]+)+)' _URL_FORMATS = { 'users': 'https://gamejolt.com/@{username}', 'communities': 'https://gamejolt.com/c/{path}', 'games': 'https://gamejolt.com/games/{slug}/{id}', } _TESTS = [{ 'url': 'https://gamejolt.com/search?foo=bar&q=%23fnf', 'playlist_mincount': 50, 'info_dict': { 'id': '#fnf', 'title': '#fnf', }, 'params': { 'playlistend': 50, 'ignore_no_formats_error': True, }, 'expected_warnings': ['skipping format', 'No video formats found', 'Requested format is not available'], }, { 'url': 'https://gamejolt.com/search/communities?q=cookie%20run', 'playlist_mincount': 10, 'info_dict': { 'id': 'cookie run', 'title': 'cookie run', }, }, { 'url': 'https://gamejolt.com/search/users?q=mlp', 'playlist_mincount': 278, 'info_dict': { 'id': 'mlp', 'title': 'mlp', }, }, { 'url': 'https://gamejolt.com/search/games?q=roblox', 'playlist_mincount': 688, 'info_dict': { 'id': 'roblox', 'title': 'roblox', }, }] def _search_entries(self, query, filter_mode, display_query): initial_search_data = self._call_api( f'web/search/{filter_mode}?q={query}', display_query, note=f'Downloading {filter_mode} list', errnote=f'Unable to download {filter_mode} list') entries_num = traverse_obj(initial_search_data, 'count', f'{filter_mode}Count') if not entries_num: return for page in range(1, math.ceil(entries_num / initial_search_data['perPage']) + 1): search_results = self._call_api( f'web/search/{filter_mode}?q={query}&page={page}', display_query, note=f'Downloading {filter_mode} list page {page}', errnote=f'Unable to download {filter_mode} list') for result in search_results[filter_mode]: yield self.url_result(self._URL_FORMATS[filter_mode].format(**result)) def _real_extract(self, url): filter_mode, query = self._match_valid_url(url).group('filter', 'id') display_query = urllib.parse.unquote(query) return self.playlist_result( self._search_entries(query, filter_mode, display_query) if filter_mode else self._entries( f'web/posts/fetch/search/{query}', display_query, initial_items=self._call_api( f'web/search?q={query}', display_query, note='Downloading initial post list', errnote='Unable to download initial post list')['posts']), display_query, display_query)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mediaite.py
yt_dlp/extractor/mediaite.py
from .common import InfoExtractor class MediaiteIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?mediaite\.com(?!/category)(?:/[\w-]+){2}' _TESTS = [{ 'url': 'https://www.mediaite.com/sports/bill-burr-roasts-nfl-for-promoting-black-lives-matter-while-scheduling-more-games-after-all-the-sht-they-know-about-cte/', 'info_dict': { 'id': 'vPHKITzy', 'ext': 'm4a', 'title': 'Bill Burr On NFL And Black Lives Matter', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/vPHKITzy/poster.jpg?width=720', 'duration': 55, 'timestamp': 1631630185, 'upload_date': '20210914', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/tv/joe-scarborough-goes-off-on-tax-breaks-for-super-wealthy-largest-income-redistribution-scam-in-american-history/', 'info_dict': { 'id': 'eeFcK4Xm', 'ext': 'mp4', 'title': 'Morning Joe-6_16_52 am - 6_21_10 am-2021-09-14.mp4', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/eeFcK4Xm/poster.jpg?width=720', 'duration': 258, 'timestamp': 1631618057, 'upload_date': '20210914', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/politics/watch-rudy-giuliani-impersonates-queen-elizabeth-calls-mark-milley-an-asshle-in-bizarre-9-11-speech/', 'info_dict': { 'id': 'EiyiXKcr', 'ext': 'mp4', 'title': 'Giuliani 1', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/EiyiXKcr/poster.jpg?width=720', 'duration': 39, 'timestamp': 1631536476, 'upload_date': '20210913', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/podcasts/clarissa-ward-says-she-decided-to-become-a-journalist-on-9-11/', 'info_dict': { 'id': 'TxavoRTx', 'ext': 'mp4', 'title': 'clarissa-ward-3.mp4', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/TxavoRTx/poster.jpg?width=720', 'duration': 83, 'timestamp': 1631311188, 'upload_date': '20210910', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/opinion/mainstream-media-ignores-rose-mcgowans-bombshell-allegation-that-newsoms-wife-tried-to-silence-her-on-weinstein/', 'info_dict': { 'id': 'sEIWvKR7', 'ext': 'mp4', 'title': 'KTTV_09-13-2021_05.34.21', 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/sEIWvKR7/poster.jpg?width=720', 'duration': 52, 'timestamp': 1631553328, 'upload_date': '20210913', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/news/watch-cnbcs-jim-cramer-says-nobody-wants-to-die-getting-infected-by-unvaccinated-coworker-even-for-22-an-hour/', 'info_dict': { 'id': 'nwpt1elX', 'ext': 'mp4', 'title': "CNBC's Jim Cramer Says Nobody Wants to Die Getting Infected by Unvaccinated Coworker 'Even for $22 an Hour'.mp4", 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/nwpt1elX/poster.jpg?width=720', 'duration': 60, 'timestamp': 1633014214, 'upload_date': '20210930', }, 'params': {'skip_download': True}, }, { 'url': 'https://www.mediaite.com/politics/i-cant-read-it-fast-enough-while-defending-trump-larry-kudlow-overwhelmed-by-volume-of-ex-presidents-legal-troubles/', 'info_dict': { 'id': 'E6EhDX5z', 'ext': 'mp4', 'title': 'Fox Business Network - 4:00 PM - 5:00 PM - 1:39:42 pm - 1:42:20 pm', 'description': '', 'thumbnail': 'https://cdn.jwplayer.com/v2/media/E6EhDX5z/poster.jpg?width=720', 'duration': 157, 'timestamp': 1691015535, 'upload_date': '20230802', }, 'params': {'skip_download': True}, }] def _real_extract(self, url): webpage = self._download_webpage(url, None) video_id = self._search_regex( [r'"https://cdn\.jwplayer\.com/players/(\w+)', r'data-video-id\s*=\s*\"([^\"]+)\"'], webpage, 'id') data_json = self._download_json(f'https://cdn.jwplayer.com/v2/media/{video_id}', video_id) return self._parse_jwplayer_data(data_json)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/canalplus.py
yt_dlp/extractor/canalplus.py
from .common import InfoExtractor from ..utils import ( # ExtractorError, # HEADRequest, int_or_none, qualities, unified_strdate, ) class CanalplusIE(InfoExtractor): IE_DESC = 'mycanal.fr and piwiplus.fr' _VALID_URL = r'https?://(?:www\.)?(?P<site>mycanal|piwiplus)\.fr/(?:[^/]+/)*(?P<display_id>[^?/]+)(?:\.html\?.*\bvid=|/p/)(?P<id>\d+)' _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s?format=json' _SITE_ID_MAP = { 'mycanal': 'cplus', 'piwiplus': 'teletoon', } # Only works for direct mp4 URLs _GEO_COUNTRIES = ['FR'] _TESTS = [{ 'url': 'https://www.mycanal.fr/d17-emissions/lolywood/p/1397061', 'info_dict': { 'id': '1397061', 'display_id': 'lolywood', 'ext': 'mp4', 'title': 'Euro 2016 : Je préfère te prévenir - Lolywood - Episode 34', 'description': 'md5:7d97039d455cb29cdba0d652a0efaa5e', 'upload_date': '20160602', }, }, { # geo restricted, bypassed 'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190', 'info_dict': { 'id': '1108190', 'display_id': 'pid1405-le-labyrinthe-boing-super-ranger', 'ext': 'mp4', 'title': 'BOING SUPER RANGER - Ep : Le labyrinthe', 'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff', 'upload_date': '20140724', }, 'expected_warnings': ['HTTP Error 403: Forbidden'], }] def _real_extract(self, url): site, display_id, video_id = self._match_valid_url(url).groups() site_id = self._SITE_ID_MAP[site] info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id) video_data = self._download_json(info_url, video_id, 'Downloading video JSON') if isinstance(video_data, list): video_data = next(video for video in video_data if video.get('ID') == video_id) media = video_data['MEDIA'] infos = video_data['INFOS'] preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD']) # _, fmt_url = next(iter(media['VIDEOS'].items())) # if '/geo' in fmt_url.lower(): # response = self._request_webpage( # HEADRequest(fmt_url), video_id, # 'Checking if the video is georestricted') # if '/blocage' in response.url: # raise ExtractorError( # 'The video is not available in your country', # expected=True) formats = [] for format_id, format_url in media['VIDEOS'].items(): if not format_url: continue if format_id == 'HLS': formats.extend(self._extract_m3u8_formats( format_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) elif format_id == 'HDS': formats.extend(self._extract_f4m_formats( format_url + '?hdcore=2.11.3', video_id, f4m_id=format_id, fatal=False)) else: formats.append({ # the secret extracted from ya function in http://player.canalplus.fr/common/js/canalPlayer.js 'url': format_url + '?secret=pqzerjlsmdkjfoiuerhsdlfknaes', 'format_id': format_id, 'quality': preference(format_id), }) thumbnails = [{ 'id': image_id, 'url': image_url, } for image_id, image_url in media.get('images', {}).items()] titrage = infos['TITRAGE'] return { 'id': video_id, 'display_id': display_id, 'title': '{} - {}'.format(titrage['TITRE'], titrage['SOUS_TITRE']), 'upload_date': unified_strdate(infos.get('PUBLICATION', {}).get('DATE')), 'thumbnails': thumbnails, 'description': infos.get('DESCRIPTION'), 'duration': int_or_none(infos.get('DURATION')), 'view_count': int_or_none(infos.get('NB_VUES')), 'like_count': int_or_none(infos.get('NB_LIKES')), 'comment_count': int_or_none(infos.get('NB_COMMENTS')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/camfm.py
yt_dlp/extractor/camfm.py
import re from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, get_elements_by_class, join_nonempty, traverse_obj, unified_timestamp, urljoin, ) class CamFMShowIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/shows/(?P<id>[^/]+)' _TESTS = [{ 'playlist_mincount': 5, 'url': 'https://camfm.co.uk/shows/soul-mining/', 'info_dict': { 'id': 'soul-mining', 'thumbnail': 'md5:6a873091f92c936f23bdcce80f75e66a', 'title': 'Soul Mining', 'description': 'Telling the stories of jazz, funk and soul from all corners of the world.', }, }] def _real_extract(self, url): show_id = self._match_id(url) page = self._download_webpage(url, show_id) return { '_type': 'playlist', 'id': show_id, 'entries': [self.url_result(urljoin('https://camfm.co.uk', i), CamFMEpisodeIE) for i in re.findall(r"javascript:popup\('(/player/[^']+)', 'listen'", page)], 'thumbnail': urljoin('https://camfm.co.uk', self._search_regex( r'<img[^>]+class="thumb-expand"[^>]+src="([^"]+)"', page, 'thumbnail', fatal=False)), 'title': self._html_search_regex('<h1>([^<]+)</h1>', page, 'title', fatal=False), 'description': clean_html(get_element_by_class('small-12 medium-8 cell', page)), } class CamFMEpisodeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/player/(?P<id>[^/]+)' _TESTS = [{ 'url': 'https://camfm.co.uk/player/43336', 'skip': 'Episode will expire - don\'t actually know when, but it will go eventually', 'info_dict': { 'id': '43336', 'title': 'AITAA: Am I the Agony Aunt? - 19:00 Tue 16/05/2023', 'ext': 'mp3', 'upload_date': '20230516', 'description': 'md5:f165144f94927c0f1bfa2ee6e6ab7bbf', 'timestamp': 1684263600, 'series': 'AITAA: Am I the Agony Aunt?', 'thumbnail': 'md5:5980a831360d0744c3764551be3d09c1', 'categories': ['Entertainment'], }, }] def _real_extract(self, url): episode_id = self._match_id(url) page = self._download_webpage(url, episode_id) audios = self._parse_html5_media_entries('https://audio.camfm.co.uk', page, episode_id) caption = get_element_by_class('caption', page) series = clean_html(re.sub(r'<span[^<]+<[^<]+>', '', caption)) card_section = get_element_by_class('card-section', page) date = self._html_search_regex('>Aired at ([^<]+)<', card_section, 'air date', fatal=False) return { 'id': episode_id, 'title': join_nonempty(series, date, delim=' - '), 'formats': traverse_obj(audios, (..., 'formats', ...)), 'timestamp': unified_timestamp(date), # XXX: Does not account for UK's daylight savings 'series': series, 'description': clean_html(re.sub(r'<b>[^<]+</b><br[^>]+/>', '', card_section)), 'thumbnail': urljoin('https://camfm.co.uk', self._search_regex( r'<div[^>]+class="cover-art"[^>]+style="[^"]+url\(\'([^\']+)', page, 'thumbnail', fatal=False)), 'categories': get_elements_by_class('label', caption), 'was_live': True, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/masters.py
yt_dlp/extractor/masters.py
from .common import InfoExtractor from ..utils import ( traverse_obj, unified_strdate, ) class MastersIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?masters\.com/en_US/watch/(?P<date>\d{4}-\d{2}-\d{2})/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.masters.com/en_US/watch/2022-04-07/16493755593805191/sungjae_im_thursday_interview_2022.html', 'info_dict': { 'id': '16493755593805191', 'ext': 'mp4', 'title': 'Sungjae Im: Thursday Interview 2022', 'upload_date': '20220407', 'thumbnail': r're:^https?://.*\.jpg$', }, }] def _real_extract(self, url): video_id, upload_date = self._match_valid_url(url).group('id', 'date') content_resp = self._download_json( f'https://www.masters.com/relatedcontent/rest/v2/masters_v1/en/content/masters_v1_{video_id}_en', video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles(traverse_obj(content_resp, ('media', 'm3u8')), video_id, 'mp4') thumbnails = [{'id': name, 'url': url} for name, url in traverse_obj(content_resp, ('images', 0), default={}).items()] return { 'id': video_id, 'title': content_resp.get('title'), 'formats': formats, 'subtitles': subtitles, 'upload_date': unified_strdate(upload_date), 'thumbnails': thumbnails, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/whyp.py
yt_dlp/extractor/whyp.py
from .common import InfoExtractor from ..utils import ( float_or_none, str_or_none, traverse_obj, url_or_none, ) class WhypIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?whyp\.it/tracks/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.whyp.it/tracks/18337/home-page-example-track-b4kq7', 'md5': 'c1187b42ebf8605284e3dc92aeb33d16', 'info_dict': { 'url': 'https://cdn.whyp.it/50eb17cc-e9ff-4e18-b89b-dc9206a95cb1.mp3', 'id': '18337', 'title': 'Home Page Example Track', 'description': 'md5:bd758000fb93f3159339c852b5b9133c', 'ext': 'mp3', 'duration': 52.82, 'uploader': 'Brad', 'uploader_id': '1', 'thumbnail': 'https://cdn.whyp.it/a537bb36-3373-4c61-96c8-27fc1b2f427a.jpg', }, }, { 'url': 'https://www.whyp.it/tracks/18337', 'only_matching': True, }] def _real_extract(self, url): unique_id = self._match_id(url) webpage = self._download_webpage(url, unique_id) data = self._search_nuxt_data(webpage, unique_id)['rawTrack'] return { 'url': data['audio_url'], 'id': unique_id, **traverse_obj(data, { 'title': 'title', 'description': 'description', 'duration': ('duration', {float_or_none}), 'uploader': ('user', 'username'), 'uploader_id': ('user', 'id', {str_or_none}), 'thumbnail': ('artwork_url', {url_or_none}), }), 'ext': 'mp3', 'vcodec': 'none', 'http_headers': {'Referer': 'https://whyp.it/'}, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/radiokapital.py
yt_dlp/extractor/radiokapital.py
import itertools import urllib.parse from .common import InfoExtractor from ..utils import clean_html, traverse_obj, unescapeHTML class RadioKapitalBaseIE(InfoExtractor): def _call_api(self, resource, video_id, note='Downloading JSON metadata', qs={}): return self._download_json( f'https://www.radiokapital.pl/wp-json/kapital/v1/{resource}?{urllib.parse.urlencode(qs)}', video_id, note=note) def _parse_episode(self, data): release = '{}{}{}'.format(data['published'][6:11], data['published'][3:6], data['published'][:3]) return { '_type': 'url_transparent', 'url': data['mixcloud_url'], 'ie_key': 'Mixcloud', 'title': unescapeHTML(data['title']), 'description': clean_html(data.get('content')), 'tags': traverse_obj(data, ('tags', ..., 'name')), 'release_date': release, 'series': traverse_obj(data, ('show', 'title')), } class RadioKapitalIE(RadioKapitalBaseIE): IE_NAME = 'radiokapital' _VALID_URL = r'https?://(?:www\.)?radiokapital\.pl/shows/[a-z\d-]+/(?P<id>[a-z\d-]+)' _TESTS = [{ 'url': 'https://radiokapital.pl/shows/tutaj-sa-smoki/5-its-okay-to-be-immaterial', 'info_dict': { 'id': 'radiokapital_radio-kapitał-tutaj-są-smoki-5-its-okay-to-be-immaterial-2021-05-20', 'ext': 'm4a', 'title': '#5: It’s okay to\xa0be\xa0immaterial', 'description': 'md5:2499da5fbfb0e88333b7d37ec8e9e4c4', 'uploader': 'Radio Kapitał', 'uploader_id': 'radiokapital', 'timestamp': 1621640164, 'upload_date': '20210521', }, }] def _real_extract(self, url): video_id = self._match_id(url) episode = self._call_api(f'episodes/{video_id}', video_id) return self._parse_episode(episode) class RadioKapitalShowIE(RadioKapitalBaseIE): IE_NAME = 'radiokapital:show' _VALID_URL = r'https?://(?:www\.)?radiokapital\.pl/shows/(?P<id>[a-z\d-]+)/?(?:$|[?#])' _TESTS = [{ 'url': 'https://radiokapital.pl/shows/wesz', 'info_dict': { 'id': '100', 'title': 'WĘSZ', 'description': 'md5:3a557a1e0f31af612b0dcc85b1e0ca5c', }, 'playlist_mincount': 17, }] def _get_episode_list(self, series_id, page_no): return self._call_api( 'episodes', series_id, f'Downloading episode list page #{page_no}', qs={ 'show': series_id, 'page': page_no, }) def _entries(self, series_id): for page_no in itertools.count(1): episode_list = self._get_episode_list(series_id, page_no) yield from (self._parse_episode(ep) for ep in episode_list['items']) if episode_list['next'] is None: break def _real_extract(self, url): series_id = self._match_id(url) show = self._call_api(f'shows/{series_id}', series_id, 'Downloading show metadata') entries = self._entries(series_id) return { '_type': 'playlist', 'entries': entries, 'id': str(show['id']), 'title': show.get('title'), 'description': clean_html(show.get('content')), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/listennotes.py
yt_dlp/extractor/listennotes.py
import re from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, get_element_by_class, get_element_html_by_id, parse_duration, strip_or_none, ) from ..utils.traversal import find_element, traverse_obj class ListenNotesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?listennotes\.com/podcasts/[^/]+/[^/]+-(?P<id>.+)/' _TESTS = [{ 'url': 'https://www.listennotes.com/podcasts/thriving-on-overload/tim-oreilly-on-noticing-KrDgvNb_u1n/', 'md5': '5b91a32f841e5788fb82b72a1a8af7f7', 'info_dict': { 'id': 'KrDgvNb_u1n', 'ext': 'mp3', 'title': r're:Tim O’Reilly on noticing things other people .{113}', 'description': r're:(?s)‘’We shape reality by what we notice and .{27459}', 'duration': 2215.0, 'channel': 'Amplifying Cognition', 'channel_id': 'ed84wITivxF', 'episode_id': 'e1312583fa7b4e24acfbb5131050be00', 'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/amplifying-cognition-ross-dawson-Iemft4Gdr0k-ed84wITivxF.300x300.jpg', 'channel_url': 'https://www.listennotes.com/podcasts/amplifying-cognition-ross-dawson-ed84wITivxF/', 'cast': ['Tim O’Reilly', 'Cookie Monster', 'Lao Tzu', 'Wallace Steven', 'Eric Raymond', 'Christine Peterson', 'John Maynard Keyne', 'Ross Dawson'], }, }, { 'url': 'https://www.listennotes.com/podcasts/ask-noah-show/episode-177-wireguard-with-lwEA3154JzG/', 'md5': '62fb4ffe7fc525632a1138bf72a5ce53', 'info_dict': { 'id': 'lwEA3154JzG', 'ext': 'mp3', 'title': 'Episode 177: WireGuard with Jason Donenfeld', 'description': r're:(?s)Jason Donenfeld lead developer joins us this hour to discuss WireGuard, .{3169}', 'duration': 3861.0, 'channel': 'Ask Noah Show', 'channel_id': '4DQTzdS5-j7', 'episode_id': '8c8954b95e0b4859ad1eecec8bf6d3a4', 'channel_url': 'https://www.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-4DQTzdS5-j7/', 'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-gD7vG150cxf-4DQTzdS5-j7.300x300.jpg', 'cast': ['noah showlink', 'noah show', 'noah dashboard', 'jason donenfeld'], }, }] def _clean_description(self, description): return clean_html(re.sub(r'(</?(div|p)>\s*)+', '<br/><br/>', description or '')) def _real_extract(self, url): audio_id = self._match_id(url) webpage = self._download_webpage(url, audio_id) data = self._search_json( r'<script id="original-content"[^>]+\btype="application/json">', webpage, 'content', audio_id) data.update(extract_attributes(get_element_html_by_id( r'episode-play-button-toolbar|episode-no-play-button-toolbar', webpage, escape_value=False))) duration, description = self._search_regex( r'(?P<duration>[\d:]+)\s*-\s*(?P<description>.+)', self._html_search_meta(['og:description', 'description', 'twitter:description'], webpage), 'description', fatal=False, group=('duration', 'description')) or (None, None) return { 'id': audio_id, 'url': data['audio'], 'title': (data.get('data-title') or traverse_obj(webpage, ({find_element(tag='h1')}, {clean_html})) or self._html_search_meta(('og:title', 'title', 'twitter:title'), webpage, 'title')), 'description': (self._clean_description(get_element_by_class('ln-text-p', webpage)) or strip_or_none(description)), 'duration': parse_duration(traverse_obj(data, 'audio_length', 'data-duration') or duration), 'episode_id': traverse_obj(data, 'uuid', 'data-episode-uuid'), **traverse_obj(data, { 'thumbnail': 'data-image', 'channel': 'data-channel-title', 'cast': ('nlp_entities', ..., 'name'), 'channel_url': 'channel_url', 'channel_id': 'channel_short_uuid', }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv4.py
yt_dlp/extractor/tv4.py
import re from .common import InfoExtractor from ..utils import ( bool_or_none, int_or_none, parse_iso8601, traverse_obj, url_or_none, ) class TV4IE(InfoExtractor): IE_DESC = 'tv4.se and tv4play.se' _VALID_URL = r'''(?x)https?://(?:www\.)? (?: tv4\.se/(?:[^/]+)/klipp/(?:.*)-| tv4play\.se/ (?: (?:program|barn)/(?:(?:[^/]+/){1,2}|(?:[^\?]+)\?video_id=)| iframe/video/| film/| sport/| ) )(?P<id>[0-9]+)''' _GEO_BYPASS = False _TESTS = [ { # not geo-restricted 'url': 'http://www.tv4.se/kalla-fakta/klipp/kalla-fakta-5-english-subtitles-2491650', 'md5': 'cb837212f342d77cec06e6dad190e96d', 'info_dict': { 'id': '2491650', 'ext': 'mp4', 'title': 'Kalla Fakta 5 (english subtitles)', 'description': '2491650', 'series': 'Kalla fakta', 'duration': 1335, 'thumbnail': r're:^https?://[^/?#]+/api/v2/img/', 'timestamp': 1385373240, 'upload_date': '20131125', }, 'params': {'skip_download': 'm3u8'}, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.tv4play.se/iframe/video/3054113', 'md5': 'cb837212f342d77cec06e6dad190e96d', 'info_dict': { 'id': '3054113', 'ext': 'mp4', 'title': 'Så här jobbar ficktjuvarna - se avslöjande bilder', 'thumbnail': r're:^https?://.*\.jpg$', 'description': 'Unika bilder avslöjar hur turisternas fickor vittjas mitt på Stockholms central. Två experter på ficktjuvarna avslöjar knepen du ska se upp för.', 'timestamp': int, 'upload_date': '20150130', }, 'skip': '404 Not Found', }, { 'url': 'http://www.tv4play.se/sport/3060959', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/film/2378136', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/barn/looney-tunes?video_id=3062412', 'only_matching': True, }, { 'url': 'http://www.tv4play.se/program/farang/3922081', 'only_matching': True, }, { 'url': 'https://www.tv4play.se/program/nyheterna/avsnitt/13315940', 'only_matching': True, }, ] def _call_api(self, endpoint, video_id, headers=None, query={}): return self._download_json( f'https://playback2.a2d.tv/{endpoint}/{video_id}', video_id, f'Downloading {endpoint} API JSON', headers=headers, query={ 'service': 'tv4', 'device': 'browser', 'protocol': 'hls', **query, }) def _real_extract(self, url): video_id = self._match_id(url) info = traverse_obj(self._call_api('asset', video_id, query={ 'protocol': 'hls,dash', 'drm': 'widevine', }), ('metadata', {dict})) or {} manifest_url = self._call_api( 'play', video_id, headers=self.geo_verification_headers())['playbackItem']['manifestUrl'] formats, subtitles = [], {} fmts, subs = self._extract_m3u8_formats_and_subtitles( manifest_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) fmts, subs = self._extract_mpd_formats_and_subtitles( manifest_url.replace('.m3u8', '.mpd'), video_id, mpd_id='dash', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) fmts = self._extract_f4m_formats( manifest_url.replace('.m3u8', '.f4m'), video_id, f4m_id='hds', fatal=False) formats.extend(fmts) fmts, subs = self._extract_ism_formats_and_subtitles( re.sub(r'\.ism/.*?\.m3u8', r'.ism/Manifest', manifest_url), video_id, ism_id='mss', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) if not formats and info.get('is_geo_restricted'): self.raise_geo_restricted( 'This video is not available from your location due to geo-restriction, or not being authenticated', countries=['SE']) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(info, { 'title': ('title', {str}), 'description': ('description', {str}), 'timestamp': (('broadcast_date_time', 'broadcastDateTime'), {parse_iso8601}), 'duration': ('duration', {int_or_none}), 'thumbnail': ('image', {url_or_none}), 'is_live': ('isLive', {bool_or_none}), 'series': ('seriesTitle', {str}), 'season_number': ('seasonNumber', {int_or_none}), 'episode': ('episodeTitle', {str}), 'episode_number': ('episodeNumber', {int_or_none}), }, get_all=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/aenetworks.py
yt_dlp/extractor/aenetworks.py
import json from .theplatform import ThePlatformIE from ..utils import ( ExtractorError, GeoRestrictedError, int_or_none, remove_start, traverse_obj, update_url_query, ) class AENetworksBaseIE(ThePlatformIE): # XXX: Do not subclass from concrete IE _BASE_URL_REGEX = r'''(?x)https?:// (?:(?:www|play|watch)\.)? (?P<domain> (?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com| fyi\.tv )/''' _THEPLATFORM_KEY = '43jXaGRQud' _THEPLATFORM_SECRET = 'S10BPXHMlb' _DOMAIN_MAP = { 'history.com': ('HISTORY', 'history', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1MzZlMTQ3ZS0zMzFhLTQxY2YtYTMwNC01MDA2NzNlOGYwYjYiLCJuYmYiOjE1Mzg2NjMzMDksImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM4NjYzMzA5fQ.n24-FVHLGXJe2D4atIQZ700aiXKIajKh5PWFoHJ40Az4itjtwwSFHnvufnoal3T8lYkwNLxce7H-IEGxIykRkZEdwq09pMKMT-ft9ASzE4vQ8fAWbf5ZgDME86x4Jq_YaxkRc9Ne0eShGhl8fgTJHvk07sfWcol61HJ7kU7K8FzzcHR0ucFQgA5VNd8RyjoGWY7c6VxnXR214LOpXsywmit04-vGJC102b_WA2EQfqI93UzG6M6l0EeV4n0_ijP3s8_i8WMJZ_uwnTafCIY6G_731i01dKXDLSFzG1vYglAwDa8DTcdrAAuIFFDF6QNGItCCmwbhjufjmoeVb7R1Gg'), 'aetv.com': ('AETV', 'aetv', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI5Y2IwNjg2Yy03ODUxLTRiZDUtODcyMC00MjNlZTg1YTQ1NzMiLCJuYmYiOjE1Mzg2NjMyOTAsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM4NjYzMjkwfQ.T5Elf0X4TndO4NEgqBas1gDxNHGPVk_daO2Ha5FBzVO6xi3zM7eavdAKfYMCN7gpWYJx03iADaVPtczO_t_aGZczDjpwJHgTUzDgvcLZAVsVDqtDIAMy3S846rPgT6UDbVoxurA7B2VTPm9phjrSXhejvd0LBO8MQL4AZ3sy2VmiPJ2noT1ily5PuHCYlkrT1fheO064duR__Cd9DQ5VTMnKjzY3Cx345CEwKDkUk5gwgxhXM-aY0eblehrq8VD81_aRM_O3tvh7nbTydHOnUpV-k_iKVi49gqz7Sf8zb6Zh5z2Uftn3vYCfE5NQuesitoRMnsH17nW7o_D59hkRgg'), 'mylifetime.com': ('LIFETIME', 'lifetime', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJmODg0MDM1ZC1mZGRmLTRmYjgtYmRkMC05MzRhZDdiYTAwYTciLCJuYmYiOjE1NDkzOTI2NDQsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTQ5MzkyNjQ0fQ.vkTIaCpheKdKQd__2-3ec4qkcpbAhyCTvwe5iTl922ItSQfVhpEJG4wseVSNmBTrpBi0hvLedcw6Hj1_UuzBMVuVcCqLprU-pI8recEwL0u7G-eVkylsxe1OTUm1o3V6OykXQ9KlA-QQLL1neUhdhR1n5B1LZ4cmtBmiEpfgf4rFwXD1ScFylIcaWKLBqHoRBNUmxyTmoXXvn_A-GGSj9eCizFzY8W5uBwUcsoiw2Cr1skx7PbB2RSP1I5DsoIJKG-8XV1KS7MWl-fNLjE-hVAsI9znqfEEFcPBiv3LhCP4Nf4OIs7xAselMn0M0c8igRUZhURWX_hdygUAxkbKFtQ'), 'fyi.tv': ('FYI', 'fyi', 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIxOGZiOWM3Ny1mYmMzLTQxYTktYmE1Yi1lMzM0ZmUzNzU4NjEiLCJuYmYiOjE1ODc1ODAzNzcsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTg3NTgwMzc3fQ.AYDuipKswmIfLBfOjHRsfc5fMV5NmJUmiJnkpiep4VEw9QiXkygFj4bN06Si5tFc5Mee5TDrGzDpV6iuKbVpLT5kuqXhAn-Wozf5zKPsg_IpdEKO7gsiCq4calt72ct44KTqtKD_hVcoxQU24_HaJsRgXzu3B-6Ff6UrmsXkyvYifYVC9v2DSkdCuA02_IrlllzVT2kRuefUXgL4vQRtTFf77uYa0RKSTG7uVkiQ_AU41eXevKlO2qgtc14Hk5cZ7-ZNrDyMCXYA5ngdIHP7Gs9PWaFXT36PFHI_rC4EfxUABPzjQFxjpP75aX5qn8SH__HbM9q3hoPWgaEaf76qIQ'), 'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc', None), 'historyvault.com': (None, 'historyvault', None), 'biography.com': (None, 'biography', None), } def _extract_aen_smil(self, smil_url, video_id, auth=None): query = { 'mbr': 'true', 'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3', } if auth: query['auth'] = auth TP_SMIL_QUERY = [{ 'assetTypes': 'high_video_ak', 'switch': 'hls_high_ak', }, { 'assetTypes': 'high_video_s3', }, { 'assetTypes': 'high_video_s3', 'switch': 'hls_high_fastly', }] formats = [] subtitles = {} last_e = None for q in TP_SMIL_QUERY: q.update(query) m_url = update_url_query(smil_url, q) m_url = self._sign_url(m_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET) try: tp_formats, tp_subtitles = self._extract_theplatform_smil( m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes'])) except ExtractorError as e: if isinstance(e, GeoRestrictedError): raise last_e = e continue formats.extend(tp_formats) subtitles = self._merge_subtitles(subtitles, tp_subtitles) if last_e and not formats: raise last_e return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, } def _extract_aetn_info(self, domain, filter_key, filter_value, url): requestor_id, brand, software_statement = self._DOMAIN_MAP[domain] result = self._download_json( f'https://feeds.video.aetnd.com/api/v2/{brand}/videos', filter_value, query={f'filter[{filter_key}]': filter_value}) result = traverse_obj( result, ('results', lambda k, v: k == 0 and v[filter_key] == filter_value), get_all=False) if not result: raise ExtractorError('Show not found in A&E feed (too new?)', expected=True, video_id=remove_start(filter_value, '/')) title = result['title'] video_id = result['id'] media_url = result['publicUrl'] theplatform_metadata = self._download_theplatform_metadata(self._search_regex( r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id) info = self._parse_theplatform_metadata(theplatform_metadata) auth = None if theplatform_metadata.get('AETN$isBehindWall'): resource = self._get_mvpd_resource( requestor_id, theplatform_metadata['title'], theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'), traverse_obj(theplatform_metadata, ('ratings', 0, 'rating'))) auth = self._extract_mvpd_auth( url, video_id, requestor_id, resource, software_statement) info.update(self._extract_aen_smil(media_url, video_id, auth)) info.update({ 'title': title, 'series': result.get('seriesName'), 'season_number': int_or_none(result.get('tvSeasonNumber')), 'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')), }) return info class AENetworksIE(AENetworksBaseIE): IE_NAME = 'aenetworks' IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault' _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'''(?P<id> shows/[^/?#]+/season-\d+/episode-\d+| (?P<type>movie|special)s/[^/?#]+(?P<extra>/[^/?#]+)?| (?:shows/[^/?#]+/)?videos/[^/?#]+ )''' _TESTS = [{ 'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1', 'info_dict': { 'id': '22253814', 'ext': 'mp4', 'title': 'Winter Is Coming', 'description': 'md5:a40e370925074260b1c8a633c632c63a', 'timestamp': 1338306241, 'upload_date': '20120529', 'uploader': 'AENE-NEW', 'duration': 2592.0, 'thumbnail': r're:https?://.+/.+\.jpg', 'chapters': 'count:5', 'tags': 'count:14', 'categories': ['Mountain Men'], 'episode_number': 1, 'episode': 'Winter Is Coming', 'season': 'Season 1', 'season_number': 1, 'series': 'Mountain Men', 'age_limit': 0, }, 'params': {'skip_download': 'm3u8'}, 'add_ie': ['ThePlatform'], 'skip': 'Geo-restricted - This content is not available in your location.', }, { 'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1', 'info_dict': { 'id': '600587331957', 'ext': 'mp4', 'title': 'Inlawful Entry', 'description': 'md5:57c12115a2b384d883fe64ca50529e08', 'timestamp': 1452634428, 'upload_date': '20160112', 'uploader': 'AENE-NEW', 'duration': 1277.695, 'thumbnail': r're:https?://.+/.+\.jpg', 'chapters': 'count:4', 'tags': 'count:23', 'episode': 'Inlawful Entry', 'episode_number': 1, 'season': 'Season 9', 'season_number': 9, 'series': 'Duck Dynasty', 'age_limit': 0, }, 'params': {'skip_download': 'm3u8'}, 'add_ie': ['ThePlatform'], }, { 'url': 'https://play.mylifetime.com/movies/v-c-andrews-web-of-dreams', 'info_dict': { 'id': '1590627395981', 'ext': 'mp4', 'title': 'VC Andrews\' Web of Dreams', 'description': 'md5:2a8ba13ae64271c79eb65c0577d312ce', 'uploader': 'AENE-NEW', 'age_limit': 14, 'duration': 5253.665, 'thumbnail': r're:https?://.+/.+\.jpg', 'chapters': 'count:8', 'tags': ['lifetime', 'mylifetime', 'lifetime channel', "VC Andrews' Web of Dreams"], 'series': '', 'season': 'Season 0', 'season_number': 0, 'episode': 'VC Andrews\' Web of Dreams', 'episode_number': 0, 'timestamp': 1566489703.0, 'upload_date': '20190822', }, 'params': {'skip_download': 'm3u8'}, 'add_ie': ['ThePlatform'], }, { 'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story', 'info_dict': { 'id': '1488235587551', 'ext': 'mp4', 'title': 'Hunting JonBenet\'s Killer: The Untold Story', 'description': 'md5:209869425ee392d74fe29201821e48b4', 'uploader': 'AENE-NEW', 'age_limit': 14, 'duration': 5003.903, 'thumbnail': r're:https?://.+/.+\.jpg', 'chapters': 'count:10', 'tags': 'count:11', 'series': '', 'season': 'Season 0', 'season_number': 0, 'episode': 'Hunting JonBenet\'s Killer: The Untold Story', 'episode_number': 0, 'timestamp': 1554987697.0, 'upload_date': '20190411', }, 'params': {'skip_download': 'm3u8'}, 'add_ie': ['ThePlatform'], }, { 'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8', 'only_matching': True, }, { 'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6', 'only_matching': True, }, { 'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie', 'only_matching': True, }, { 'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie', 'only_matching': True, }, { 'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special', 'only_matching': True, }, { 'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story', 'only_matching': True, }, { 'url': 'http://www.history.com/videos/history-of-valentines-day', 'only_matching': True, }, { 'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape', 'only_matching': True, }] def _real_extract(self, url): domain, canonical, url_type, extra = self._match_valid_url(url).group('domain', 'id', 'type', 'extra') if url_type in ('movie', 'special') and not extra: canonical += f'/full-{url_type}' return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url) class AENetworksListBaseIE(AENetworksBaseIE): def _call_api(self, resource, slug, brand, fields): return self._download_json( 'https://yoga.appsvcs.aetnd.com/graphql', slug, query={'brand': brand}, headers={'Content-Type': 'application/json'}, data=json.dumps({ 'query': '''{ %s(slug: "%s") { %s } }''' % (resource, slug, fields), # noqa: UP031 }).encode())['data'][resource] def _real_extract(self, url): domain, slug = self._match_valid_url(url).groups() _, brand, _ = self._DOMAIN_MAP[domain] playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS) base_url = f'http://watch.{domain}' entries = [] for item in (playlist.get(self._ITEMS_KEY) or []): doc = self._get_doc(item) canonical = doc.get('canonical') if not canonical: continue entries.append(self.url_result( base_url + canonical, AENetworksIE.ie_key(), doc.get('id'))) description = None if self._PLAYLIST_DESCRIPTION_KEY: description = playlist.get(self._PLAYLIST_DESCRIPTION_KEY) return self.playlist_result( entries, playlist.get('id'), playlist.get(self._PLAYLIST_TITLE_KEY), description) class AENetworksCollectionIE(AENetworksListBaseIE): IE_NAME = 'aenetworks:collection' _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'(?:[^/]+/)*(?:list|collections)/(?P<id>[^/?#&]+)/?(?:[?#&]|$)' _TESTS = [{ 'url': 'https://watch.historyvault.com/list/america-the-story-of-us', 'info_dict': { 'id': '282', 'title': 'America The Story of Us', }, 'playlist_mincount': 12, }, { 'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us', 'only_matching': True, }, { 'url': 'https://www.historyvault.com/collections/mysteryquest', 'only_matching': True, }] _RESOURCE = 'list' _ITEMS_KEY = 'items' _PLAYLIST_TITLE_KEY = 'display_title' _PLAYLIST_DESCRIPTION_KEY = None _FIELDS = '''id display_title items { ... on ListVideoItem { doc { canonical id } } }''' def _get_doc(self, item): return item.get('doc') or {} class AENetworksShowIE(AENetworksListBaseIE): IE_NAME = 'aenetworks:show' _VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'shows/(?P<id>[^/?#&]+)/?(?:[?#&]|$)' _TESTS = [{ 'url': 'http://www.history.com/shows/ancient-aliens', 'info_dict': { 'id': 'SERIES1574', 'title': 'Ancient Aliens', 'description': 'md5:3f6d74daf2672ff3ae29ed732e37ea7f', }, 'playlist_mincount': 150, }] _RESOURCE = 'series' _ITEMS_KEY = 'episodes' _PLAYLIST_TITLE_KEY = 'title' _PLAYLIST_DESCRIPTION_KEY = 'description' _FIELDS = '''description id title episodes { canonical id }''' def _get_doc(self, item): return item class HistoryTopicIE(AENetworksBaseIE): IE_NAME = 'history:topic' IE_DESC = 'History.com Topic' _VALID_URL = r'https?://(?:www\.)?history\.com/topics/[^/]+/(?P<id>[\w+-]+?)-video' _TESTS = [{ 'url': 'https://www.history.com/topics/valentines-day/history-of-valentines-day-video', 'info_dict': { 'id': '40700995724', 'ext': 'mp4', 'title': 'History of Valentine’s Day', 'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7', 'timestamp': 1375819729, 'upload_date': '20130806', 'uploader': 'AENE-NEW', }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['ThePlatform'], }] def _real_extract(self, url): display_id = self._match_id(url) return self.url_result( 'http://www.history.com/videos/' + display_id, AENetworksIE.ie_key()) class HistoryPlayerIE(AENetworksBaseIE): IE_NAME = 'history:player' _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)' _TESTS = [] def _real_extract(self, url): domain, video_id = self._match_valid_url(url).groups() return self._extract_aetn_info(domain, 'id', video_id, url) class BiographyIE(AENetworksBaseIE): _VALID_URL = r'https?://(?:www\.)?biography\.com/video/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'https://www.biography.com/video/vincent-van-gogh-full-episode-2075049808', 'info_dict': { 'id': '30322987', 'ext': 'mp4', 'title': 'Vincent Van Gogh - Full Episode', 'description': 'A full biography about the most influential 20th century painter, Vincent Van Gogh.', 'timestamp': 1311970571, 'upload_date': '20110729', 'uploader': 'AENE-NEW', }, 'params': { # m3u8 download 'skip_download': True, }, 'add_ie': ['ThePlatform'], 'skip': '404 Not Found', }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) player_url = self._search_regex( rf'<phoenix-iframe[^>]+src="({HistoryPlayerIE._VALID_URL})', webpage, 'player URL') return self.url_result(player_url, HistoryPlayerIE.ie_key())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/zattoo.py
yt_dlp/extractor/zattoo.py
import re import uuid from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, join_nonempty, try_get, url_or_none, urlencode_postdata, ) class ZattooPlatformBaseIE(InfoExtractor): _power_guide_hash = None def _host_url(self): return 'https://%s' % (self._API_HOST if hasattr(self, '_API_HOST') else self._HOST) def _real_initialize(self): if not self._power_guide_hash: self.raise_login_required('An account is needed to access this media', method='password') def _perform_login(self, username, password): try: data = self._download_json( f'{self._host_url()}/zapi/v2/account/login', None, 'Logging in', data=urlencode_postdata({ 'login': username, 'password': password, 'remember': 'true', }), headers={ 'Referer': f'{self._host_url()}/login', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 400: raise ExtractorError( 'Unable to login: incorrect username and/or password', expected=True) raise self._power_guide_hash = data['session']['power_guide_hash'] def _initialize_pre_login(self): session_token = self._download_json( f'{self._host_url()}/token.json', None, 'Downloading session token')['session_token'] # Will setup appropriate cookies self._request_webpage( f'{self._host_url()}/zapi/v3/session/hello', None, 'Opening session', data=urlencode_postdata({ 'uuid': str(uuid.uuid4()), 'lang': 'en', 'app_version': '1.8.2', 'format': 'json', 'client_app_token': session_token, })) def _extract_video_id_from_recording(self, recid): playlist = self._download_json( f'{self._host_url()}/zapi/v2/playlist', recid, 'Downloading playlist') try: return next( str(item['program_id']) for item in playlist['recordings'] if item.get('program_id') and str(item.get('id')) == recid) except (StopIteration, KeyError): raise ExtractorError('Could not extract video id from recording') def _extract_cid(self, video_id, channel_name): channel_groups = self._download_json( f'{self._host_url()}/zapi/v2/cached/channels/{self._power_guide_hash}', video_id, 'Downloading channel list', query={'details': False})['channel_groups'] channel_list = [] for chgrp in channel_groups: channel_list.extend(chgrp['channels']) try: return next( chan['cid'] for chan in channel_list if chan.get('cid') and ( chan.get('display_alias') == channel_name or chan.get('cid') == channel_name)) except StopIteration: raise ExtractorError('Could not extract channel id') def _extract_cid_and_video_info(self, video_id): data = self._download_json( f'{self._host_url()}/zapi/v2/cached/program/power_details/{self._power_guide_hash}', video_id, 'Downloading video information', query={ 'program_ids': video_id, 'complete': True, }) p = data['programs'][0] cid = p['cid'] info_dict = { 'id': video_id, 'title': p.get('t') or p['et'], 'description': p.get('d'), 'thumbnail': p.get('i_url'), 'creator': p.get('channel_name'), 'episode': p.get('et'), 'episode_number': int_or_none(p.get('e_no')), 'season_number': int_or_none(p.get('s_no')), 'release_year': int_or_none(p.get('year')), 'categories': try_get(p, lambda x: x['c'], list), 'tags': try_get(p, lambda x: x['g'], list), } return cid, info_dict def _extract_ondemand_info(self, ondemand_id): """ @returns (ondemand_token, ondemand_type, info_dict) """ data = self._download_json( f'{self._host_url()}/zapi/vod/movies/{ondemand_id}', ondemand_id, 'Downloading ondemand information') info_dict = { 'id': ondemand_id, 'title': data.get('title'), 'description': data.get('description'), 'duration': int_or_none(data.get('duration')), 'release_year': int_or_none(data.get('year')), 'episode_number': int_or_none(data.get('episode_number')), 'season_number': int_or_none(data.get('season_number')), 'categories': try_get(data, lambda x: x['categories'], list), } return data['terms_catalog'][0]['terms'][0]['token'], data['type'], info_dict def _extract_formats(self, cid, video_id, record_id=None, ondemand_id=None, ondemand_termtoken=None, ondemand_type=None, is_live=False): postdata_common = { 'https_watch_urls': True, } if is_live: postdata_common.update({'timeshift': 10800}) url = f'{self._host_url()}/zapi/watch/live/{cid}' elif record_id: url = f'{self._host_url()}/zapi/watch/recording/{record_id}' elif ondemand_id: postdata_common.update({ 'teasable_id': ondemand_id, 'term_token': ondemand_termtoken, 'teasable_type': ondemand_type, }) url = f'{self._host_url()}/zapi/watch/vod/video' else: url = f'{self._host_url()}/zapi/v3/watch/replay/{cid}/{video_id}' formats = [] subtitles = {} for stream_type in ('dash', 'hls7'): postdata = postdata_common.copy() postdata['stream_type'] = stream_type data = self._download_json( url, video_id, f'Downloading {stream_type.upper()} formats', data=urlencode_postdata(postdata), fatal=False) if not data: continue watch_urls = try_get( data, lambda x: x['stream']['watch_urls'], list) if not watch_urls: continue for watch in watch_urls: if not isinstance(watch, dict): continue watch_url = url_or_none(watch.get('url')) if not watch_url: continue audio_channel = watch.get('audio_channel') preference = 1 if audio_channel == 'A' else None format_id = join_nonempty(stream_type, watch.get('maxrate'), audio_channel) if stream_type.startswith('dash'): this_formats, subs = self._extract_mpd_formats_and_subtitles( watch_url, video_id, mpd_id=format_id, fatal=False) self._merge_subtitles(subs, target=subtitles) elif stream_type.startswith('hls'): this_formats, subs = self._extract_m3u8_formats_and_subtitles( watch_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False) self._merge_subtitles(subs, target=subtitles) elif stream_type == 'hds': this_formats = self._extract_f4m_formats( watch_url, video_id, f4m_id=format_id, fatal=False) elif stream_type == 'smooth_playready': this_formats = self._extract_ism_formats( watch_url, video_id, ism_id=format_id, fatal=False) else: assert False for this_format in this_formats: this_format['quality'] = preference formats.extend(this_formats) return formats, subtitles def _extract_video(self, video_id, record_id=None): cid, info_dict = self._extract_cid_and_video_info(video_id) info_dict['formats'], info_dict['subtitles'] = self._extract_formats(cid, video_id, record_id=record_id) return info_dict def _extract_live(self, channel_name): cid = self._extract_cid(channel_name, channel_name) formats, subtitles = self._extract_formats(cid, cid, is_live=True) return { 'id': channel_name, 'title': channel_name, 'is_live': True, 'formats': formats, 'subtitles': subtitles, } def _extract_record(self, record_id): video_id = self._extract_video_id_from_recording(record_id) cid, info_dict = self._extract_cid_and_video_info(video_id) info_dict['formats'], info_dict['subtitles'] = self._extract_formats(cid, video_id, record_id=record_id) return info_dict def _extract_ondemand(self, ondemand_id): ondemand_termtoken, ondemand_type, info_dict = self._extract_ondemand_info(ondemand_id) info_dict['formats'], info_dict['subtitles'] = self._extract_formats( None, ondemand_id, ondemand_id=ondemand_id, ondemand_termtoken=ondemand_termtoken, ondemand_type=ondemand_type) return info_dict def _real_extract(self, url): video_id, record_id = self._match_valid_url(url).groups() return getattr(self, f'_extract_{self._TYPE}')(video_id or record_id) def _create_valid_url(host, match, qs, base_re=None): match_base = fr'|{base_re}/(?P<vid1>{match})' if base_re else '(?P<vid1>)' return rf'''(?x)https?://(?:www\.)?{re.escape(host)}/(?: [^?#]+\?(?:[^#]+&)?{qs}=(?P<vid2>{match}) {match_base} )''' class ZattooBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'zattoo' _HOST = 'zattoo.com' class ZattooIE(ZattooBaseIE): _VALID_URL = _create_valid_url(ZattooBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://zattoo.com/program/zdf/250170418', 'info_dict': { 'id': '250170418', 'ext': 'mp4', 'title': 'Markus Lanz', 'description': 'md5:e41cb1257de008ca62a73bb876ffa7fc', 'thumbnail': 're:http://images.zattic.com/cms/.+/format_480x360.jpg', 'creator': 'ZDF HD', 'release_year': 2022, 'episode': 'Folge 1655', 'categories': 'count:1', 'tags': 'count:2', }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://zattoo.com/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://zattoo.com/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class ZattooLiveIE(ZattooBaseIE): _VALID_URL = _create_valid_url(ZattooBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://zattoo.com/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://zattoo.com/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ZattooIE.suitable(url) else super().suitable(url) class ZattooMoviesIE(ZattooBaseIE): _VALID_URL = _create_valid_url(ZattooBaseIE._HOST, r'\w+', 'movie_id', 'vod/movies') _TYPE = 'ondemand' _TESTS = [{ 'url': 'https://zattoo.com/vod/movies/7521', 'only_matching': True, }, { 'url': 'https://zattoo.com/ondemand?movie_id=7521&term_token=9f00f43183269484edde', 'only_matching': True, }] class ZattooRecordingsIE(ZattooBaseIE): _VALID_URL = _create_valid_url('zattoo.com', r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://zattoo.com/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://zattoo.com/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class NetPlusTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'netplus' _HOST = 'netplus.tv' _API_HOST = f'www.{_HOST}' class NetPlusTVIE(NetPlusTVBaseIE): _VALID_URL = _create_valid_url(NetPlusTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://netplus.tv/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://netplus.tv/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class NetPlusTVLiveIE(NetPlusTVBaseIE): _VALID_URL = _create_valid_url(NetPlusTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://netplus.tv/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://netplus.tv/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if NetPlusTVIE.suitable(url) else super().suitable(url) class NetPlusTVRecordingsIE(NetPlusTVBaseIE): _VALID_URL = _create_valid_url(NetPlusTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://netplus.tv/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://netplus.tv/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class MNetTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'mnettv' _HOST = 'tvplus.m-net.de' class MNetTVIE(MNetTVBaseIE): _VALID_URL = _create_valid_url(MNetTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://tvplus.m-net.de/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://tvplus.m-net.de/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class MNetTVLiveIE(MNetTVBaseIE): _VALID_URL = _create_valid_url(MNetTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://tvplus.m-net.de/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://tvplus.m-net.de/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if MNetTVIE.suitable(url) else super().suitable(url) class MNetTVRecordingsIE(MNetTVBaseIE): _VALID_URL = _create_valid_url(MNetTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://tvplus.m-net.de/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://tvplus.m-net.de/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class WalyTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'walytv' _HOST = 'player.waly.tv' class WalyTVIE(WalyTVBaseIE): _VALID_URL = _create_valid_url(WalyTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://player.waly.tv/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://player.waly.tv/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class WalyTVLiveIE(WalyTVBaseIE): _VALID_URL = _create_valid_url(WalyTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://player.waly.tv/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://player.waly.tv/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if WalyTVIE.suitable(url) else super().suitable(url) class WalyTVRecordingsIE(WalyTVBaseIE): _VALID_URL = _create_valid_url(WalyTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://player.waly.tv/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://player.waly.tv/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class BBVTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'bbvtv' _HOST = 'bbv-tv.net' _API_HOST = f'www.{_HOST}' class BBVTVIE(BBVTVBaseIE): _VALID_URL = _create_valid_url(BBVTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://bbv-tv.net/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://bbv-tv.net/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class BBVTVLiveIE(BBVTVBaseIE): _VALID_URL = _create_valid_url(BBVTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://bbv-tv.net/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://bbv-tv.net/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if BBVTVIE.suitable(url) else super().suitable(url) class BBVTVRecordingsIE(BBVTVBaseIE): _VALID_URL = _create_valid_url(BBVTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://bbv-tv.net/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://bbv-tv.net/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class VTXTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'vtxtv' _HOST = 'vtxtv.ch' _API_HOST = f'www.{_HOST}' class VTXTVIE(VTXTVBaseIE): _VALID_URL = _create_valid_url(VTXTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://vtxtv.ch/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://vtxtv.ch/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class VTXTVLiveIE(VTXTVBaseIE): _VALID_URL = _create_valid_url(VTXTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://vtxtv.ch/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://vtxtv.ch/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if VTXTVIE.suitable(url) else super().suitable(url) class VTXTVRecordingsIE(VTXTVBaseIE): _VALID_URL = _create_valid_url(VTXTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://vtxtv.ch/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://vtxtv.ch/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class GlattvisionTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'glattvisiontv' _HOST = 'iptv.glattvision.ch' class GlattvisionTVIE(GlattvisionTVBaseIE): _VALID_URL = _create_valid_url(GlattvisionTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://iptv.glattvision.ch/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://iptv.glattvision.ch/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class GlattvisionTVLiveIE(GlattvisionTVBaseIE): _VALID_URL = _create_valid_url(GlattvisionTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://iptv.glattvision.ch/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://iptv.glattvision.ch/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if GlattvisionTVIE.suitable(url) else super().suitable(url) class GlattvisionTVRecordingsIE(GlattvisionTVBaseIE): _VALID_URL = _create_valid_url(GlattvisionTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://iptv.glattvision.ch/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://iptv.glattvision.ch/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class SAKTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'saktv' _HOST = 'saktv.ch' _API_HOST = f'www.{_HOST}' class SAKTVIE(SAKTVBaseIE): _VALID_URL = _create_valid_url(SAKTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://saktv.ch/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://saktv.ch/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class SAKTVLiveIE(SAKTVBaseIE): _VALID_URL = _create_valid_url(SAKTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://saktv.ch/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://saktv.ch/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if SAKTVIE.suitable(url) else super().suitable(url) class SAKTVRecordingsIE(SAKTVBaseIE): _VALID_URL = _create_valid_url(SAKTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://saktv.ch/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://saktv.ch/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class EWETVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'ewetv' _HOST = 'tvonline.ewe.de' class EWETVIE(EWETVBaseIE): _VALID_URL = _create_valid_url(EWETVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://tvonline.ewe.de/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://tvonline.ewe.de/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class EWETVLiveIE(EWETVBaseIE): _VALID_URL = _create_valid_url(EWETVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://tvonline.ewe.de/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://tvonline.ewe.de/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if EWETVIE.suitable(url) else super().suitable(url) class EWETVRecordingsIE(EWETVBaseIE): _VALID_URL = _create_valid_url(EWETVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://tvonline.ewe.de/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://tvonline.ewe.de/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class QuantumTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'quantumtv' _HOST = 'quantum-tv.com' _API_HOST = f'www.{_HOST}' class QuantumTVIE(QuantumTVBaseIE): _VALID_URL = _create_valid_url(QuantumTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://quantum-tv.com/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://quantum-tv.com/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class QuantumTVLiveIE(QuantumTVBaseIE): _VALID_URL = _create_valid_url(QuantumTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://quantum-tv.com/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://quantum-tv.com/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if QuantumTVIE.suitable(url) else super().suitable(url) class QuantumTVRecordingsIE(QuantumTVBaseIE): _VALID_URL = _create_valid_url(QuantumTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://quantum-tv.com/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://quantum-tv.com/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class OsnatelTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'osnateltv' _HOST = 'tvonline.osnatel.de' class OsnatelTVIE(OsnatelTVBaseIE): _VALID_URL = _create_valid_url(OsnatelTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://tvonline.osnatel.de/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://tvonline.osnatel.de/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class OsnatelTVLiveIE(OsnatelTVBaseIE): _VALID_URL = _create_valid_url(OsnatelTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://tvonline.osnatel.de/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://tvonline.osnatel.de/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if OsnatelTVIE.suitable(url) else super().suitable(url) class OsnatelTVRecordingsIE(OsnatelTVBaseIE): _VALID_URL = _create_valid_url(OsnatelTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://tvonline.osnatel.de/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://tvonline.osnatel.de/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class EinsUndEinsTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = '1und1tv' _HOST = '1und1.tv' _API_HOST = f'www.{_HOST}' class EinsUndEinsTVIE(EinsUndEinsTVBaseIE): _VALID_URL = _create_valid_url(EinsUndEinsTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://1und1.tv/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://1und1.tv/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class EinsUndEinsTVLiveIE(EinsUndEinsTVBaseIE): _VALID_URL = _create_valid_url(EinsUndEinsTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://1und1.tv/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://1und1.tv/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if EinsUndEinsTVIE.suitable(url) else super().suitable(url) class EinsUndEinsTVRecordingsIE(EinsUndEinsTVBaseIE): _VALID_URL = _create_valid_url(EinsUndEinsTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://1und1.tv/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://1und1.tv/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }] class SaltTVBaseIE(ZattooPlatformBaseIE): _NETRC_MACHINE = 'salttv' _HOST = 'tv.salt.ch' class SaltTVIE(SaltTVBaseIE): _VALID_URL = _create_valid_url(SaltTVBaseIE._HOST, r'\d+', 'program', '(?:program|watch)/[^/]+') _TYPE = 'video' _TESTS = [{ 'url': 'https://tv.salt.ch/program/daserste/210177916', 'only_matching': True, }, { 'url': 'https://tv.salt.ch/guide/german?channel=srf1&program=169860555', 'only_matching': True, }] class SaltTVLiveIE(SaltTVBaseIE): _VALID_URL = _create_valid_url(SaltTVBaseIE._HOST, r'[^/?&#]+', 'channel', 'live') _TYPE = 'live' _TESTS = [{ 'url': 'https://tv.salt.ch/channels/german?channel=srf_zwei', 'only_matching': True, }, { 'url': 'https://tv.salt.ch/live/srf1', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if SaltTVIE.suitable(url) else super().suitable(url) class SaltTVRecordingsIE(SaltTVBaseIE): _VALID_URL = _create_valid_url(SaltTVBaseIE._HOST, r'\d+', 'recording') _TYPE = 'record' _TESTS = [{ 'url': 'https://tv.salt.ch/recordings?recording=193615508', 'only_matching': True, }, { 'url': 'https://tv.salt.ch/tc/ptc_recordings_all_recordings?recording=193615420', 'only_matching': True, }]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sport5.py
yt_dlp/extractor/sport5.py
from .common import InfoExtractor from ..utils import ExtractorError class Sport5IE(InfoExtractor): _VALID_URL = r'https?://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)' _TESTS = [ { 'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1', 'info_dict': { 'id': 's5-Y59xx1-GUh2', 'ext': 'mp4', 'title': 'ולנסיה-קורדובה 0:3', 'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה', 'duration': 228, 'categories': list, }, 'skip': 'Blocked outside of Israel', }, { 'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE', 'info_dict': { 'id': 's5-SiXxx1-hKh2', 'ext': 'mp4', 'title': 'GOALS_CELTIC_270914.mp4', 'description': '', 'duration': 87, 'categories': list, }, 'skip': 'Blocked outside of Israel', }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) media_id = mobj.group('id') webpage = self._download_webpage(url, media_id) video_id = self._html_search_regex(r'clipId=([\w-]+)', webpage, 'video id') metadata = self._download_xml( f'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/{video_id}/HDS/metadata.xml', video_id) error = metadata.find('./Error') if error is not None: raise ExtractorError( '{} returned error: {} - {}'.format( self.IE_NAME, error.find('./Name').text, error.find('./Description').text), expected=True) title = metadata.find('./Title').text description = metadata.find('./Description').text duration = int(metadata.find('./Duration').text) posters_el = metadata.find('./PosterLinks') thumbnails = [{ 'url': thumbnail.text, 'width': int(thumbnail.get('width')), 'height': int(thumbnail.get('height')), } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else [] categories_el = metadata.find('./Categories') categories = [ cat.get('name') for cat in categories_el.findall('./Category') ] if categories_el is not None else [] formats = [{ 'url': fmt.text, 'ext': 'mp4', 'vbr': int(fmt.get('bitrate')), 'width': int(fmt.get('width')), 'height': int(fmt.get('height')), } for fmt in metadata.findall('./PlaybackLinks/FileURL')] return { 'id': video_id, 'title': title, 'description': description, 'thumbnails': thumbnails, 'duration': duration, 'categories': categories, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/adultswim.py
yt_dlp/extractor/adultswim.py
import json from .turner import TurnerBaseIE from ..utils import ( determine_ext, float_or_none, int_or_none, mimetype2ext, parse_age_limit, parse_iso8601, strip_or_none, try_get, ) class AdultSwimIE(TurnerBaseIE): _VALID_URL = r'https?://(?:www\.)?adultswim\.com/videos/(?P<show_path>[^/?#]+)(?:/(?P<episode_path>[^/?#]+))?' _TESTS = [{ 'url': 'http://adultswim.com/videos/rick-and-morty/pilot', 'info_dict': { 'id': 'rQxZvXQ4ROaSOqq-or2Mow', 'ext': 'mp4', 'title': 'Rick and Morty - Pilot', 'description': 'Rick moves in with his daughter\'s family and establishes himself as a bad influence on his grandson, Morty.', 'timestamp': 1543294800, 'upload_date': '20181127', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/', 'info_dict': { 'id': 'sY3cMUR_TbuE4YmdjzbIcQ', 'ext': 'mp4', 'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine', 'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.', 'upload_date': '20080124', 'timestamp': 1201150800, }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': '404 Not Found', }, { 'url': 'http://www.adultswim.com/videos/decker/inside-decker-a-new-hero/', 'info_dict': { 'id': 'I0LQFQkaSUaFp8PnAWHhoQ', 'ext': 'mp4', 'title': 'Decker - Inside Decker: A New Hero', 'description': 'The guys recap the conclusion of the season. They announce a new hero, take a peek into the Victorville Film Archive and welcome back the talented James Dean.', 'timestamp': 1469480460, 'upload_date': '20160725', }, 'params': { # m3u8 download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest'], }, { 'url': 'http://www.adultswim.com/videos/attack-on-titan', 'info_dict': { 'id': 'attack-on-titan', 'title': 'Attack on Titan', 'description': 'md5:41caa9416906d90711e31dc00cb7db7e', }, 'playlist_mincount': 12, }, { 'url': 'http://www.adultswim.com/videos/streams/williams-stream', 'info_dict': { 'id': 'd8DEBj7QRfetLsRgFnGEyg', 'ext': 'mp4', 'title': r're:^Williams Stream \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'description': 'original programming', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': '404 Not Found', }] _SOFTWARE_STATEMENT = 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiIwNjg5ZmU2My00OTc5LTQxZmQtYWYxNC1hYjVlNmJjNWVkZWIiLCJuYmYiOjE1MzcxOTA2NzQsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTM3MTkwNjc0fQ.Xl3AEduM0s1TxDQ6-XssdKIiLm261hhsEv1C1yo_nitIajZThSI9rXILqtIzO0aujoHhdzUnu_dUCq9ffiSBzEG632tTa1la-5tegHtce80cMhewBN4n2t8n9O5tiaPx8MPY8ALdm5wS7QzWE6DO_LTJKgE8Bl7Yv-CWJT4q4SywtNiQWLVOuhBRnDyfsRezxRwptw8qTn9dv5ZzUrVJaby5fDZ_nOncMKvegOgaKd5KEuCAGQ-mg-PSuValMjGuf6FwDguGaK7IyI5Y2oOrzXmD4Dj7q4WBg8w9QoZhtLeAU56mcsGILolku2R5FHlVLO9xhjResyt-pfmegOkpSw' def _real_extract(self, url): show_path, episode_path = self._match_valid_url(url).groups() display_id = episode_path or show_path query = '''query { getShowBySlug(slug:"%s") { %%s } }''' % show_path # noqa: UP031 if episode_path: query = query % '''title getVideoBySlug(slug:"%s") { _id auth description duration episodeNumber launchDate mediaID seasonNumber poster title tvRating }''' % episode_path else: query = query % '''metaDescription title videos(first:1000,sort:["episode_number"]) { edges { node { _id slug } } }''' show_data = self._download_json( 'https://www.adultswim.com/api/search', display_id, data=json.dumps({'query': query}).encode(), headers={'Content-Type': 'application/json'})['data']['getShowBySlug'] if episode_path: video_data = show_data['getVideoBySlug'] video_id = video_data['_id'] episode_title = title = video_data['title'] series = show_data.get('title') if series: title = f'{series} - {title}' info = { 'id': video_id, 'title': title, 'description': strip_or_none(video_data.get('description')), 'duration': float_or_none(video_data.get('duration')), 'formats': [], 'subtitles': {}, 'age_limit': parse_age_limit(video_data.get('tvRating')), 'thumbnail': video_data.get('poster'), 'timestamp': parse_iso8601(video_data.get('launchDate')), 'series': series, 'season_number': int_or_none(video_data.get('seasonNumber')), 'episode': episode_title, 'episode_number': int_or_none(video_data.get('episodeNumber')), } auth = video_data.get('auth') media_id = video_data.get('mediaID') if media_id: info.update(self._extract_ngtv_info(media_id, { # CDN_TOKEN_APP_ID from: # https://d2gg02c3xr550i.cloudfront.net/assets/asvp.e9c8bef24322d060ef87.bundle.js 'appId': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhcHBJZCI6ImFzLXR2ZS1kZXNrdG9wLXB0enQ2bSIsInByb2R1Y3QiOiJ0dmUiLCJuZXR3b3JrIjoiYXMiLCJwbGF0Zm9ybSI6ImRlc2t0b3AiLCJpYXQiOjE1MzI3MDIyNzl9.BzSCk-WYOZ2GMCIaeVb8zWnzhlgnXuJTCu0jGp_VaZE', }, self._SOFTWARE_STATEMENT, { 'url': url, 'site_name': 'AdultSwim', 'auth_required': auth, })) if not auth: extract_data = self._download_json( 'https://www.adultswim.com/api/shows/v1/videos/' + video_id, video_id, query={'fields': 'stream'}, fatal=False) or {} assets = try_get(extract_data, lambda x: x['data']['video']['stream']['assets'], list) or [] for asset in assets: asset_url = asset.get('url') if not asset_url: continue ext = determine_ext(asset_url, mimetype2ext(asset.get('mime_type'))) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( asset_url, video_id, 'mp4', m3u8_id='hls', fatal=False) info['formats'].extend(fmts) self._merge_subtitles(subs, target=info['subtitles']) elif ext == 'f4m': continue # info['formats'].extend(self._extract_f4m_formats( # asset_url, video_id, f4m_id='hds', fatal=False)) elif ext in ('scc', 'ttml', 'vtt'): info['subtitles'].setdefault('en', []).append({ 'url': asset_url, }) return info else: entries = [] for edge in show_data.get('videos', {}).get('edges', []): video = edge.get('node') or {} slug = video.get('slug') if not slug: continue entries.append(self.url_result( f'http://adultswim.com/videos/{show_path}/{slug}', 'AdultSwim', video.get('_id'))) return self.playlist_result( entries, show_path, show_data.get('title'), strip_or_none(show_data.get('metaDescription')))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/filmweb.py
yt_dlp/extractor/filmweb.py
from .common import InfoExtractor class FilmwebIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?filmweb\.no/(?P<type>trailere|filmnytt)/article(?P<id>\d+)\.ece' _TEST = { 'url': 'http://www.filmweb.no/trailere/article1264921.ece', 'md5': 'e353f47df98e557d67edaceda9dece89', 'info_dict': { 'id': '13033574', 'ext': 'mp4', 'title': 'Det som en gang var', 'upload_date': '20160316', 'timestamp': 1458140101, 'uploader_id': '12639966', 'uploader': 'Live Roaldset', }, } def _real_extract(self, url): article_type, article_id = self._match_valid_url(url).groups() if article_type == 'filmnytt': webpage = self._download_webpage(url, article_id) article_id = self._search_regex(r'data-videoid="(\d+)"', webpage, 'article id') embed_code = self._download_json( 'https://www.filmweb.no/template_v2/ajax/json_trailerEmbed.jsp', article_id, query={ 'articleId': article_id, })['embedCode'] iframe_url = self._proto_relative_url(self._search_regex( r'<iframe[^>]+src="([^"]+)', embed_code, 'iframe url')) return { '_type': 'url_transparent', 'id': article_id, 'url': iframe_url, 'ie_key': 'TwentyThreeVideo', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/cgtn.py
yt_dlp/extractor/cgtn.py
from .common import InfoExtractor from ..utils import ( try_get, unified_timestamp, ) class CGTNIE(InfoExtractor): _VALID_URL = r'https?://news\.cgtn\.com/news/[0-9]{4}-[0-9]{2}-[0-9]{2}/[a-zA-Z0-9-]+-(?P<id>[a-zA-Z0-9-]+)/index\.html' _TESTS = [ { 'url': 'https://news.cgtn.com/news/2021-03-09/Up-and-Out-of-Poverty-Ep-1-A-solemn-promise-YuOUaOzGQU/index.html', 'info_dict': { 'id': 'YuOUaOzGQU', 'ext': 'mp4', 'title': 'Up and Out of Poverty Ep. 1: A solemn promise', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1615295940, 'upload_date': '20210309', 'categories': ['Video'], }, 'params': { 'skip_download': True, }, }, { 'url': 'https://news.cgtn.com/news/2021-06-06/China-Indonesia-vow-to-further-deepen-maritime-cooperation-10REvJCewCY/index.html', 'info_dict': { 'id': '10REvJCewCY', 'ext': 'mp4', 'title': 'China, Indonesia vow to further deepen maritime cooperation', 'thumbnail': r're:^https?://.*\.png$', 'description': 'China and Indonesia vowed to upgrade their cooperation into the maritime sector and also for political security, economy, and cultural and people-to-people exchanges.', 'creators': ['CGTN'], 'categories': ['China'], 'timestamp': 1622950200, 'upload_date': '20210606', }, 'params': { 'skip_download': False, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) download_url = self._html_search_regex(r'data-video ="(?P<url>.+m3u8)"', webpage, 'download_url') datetime_str = self._html_search_regex( r'<span class="date">\s*(.+?)\s*</span>', webpage, 'datetime_str', fatal=False) category = self._html_search_regex( r'<span class="section">\s*(.+?)\s*</span>', webpage, 'category', fatal=False) author = self._search_regex( r'<div class="news-author-name">\s*(.+?)\s*</div>', webpage, 'author', default=None) return { 'id': video_id, 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage), 'formats': self._extract_m3u8_formats(download_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'), 'categories': [category] if category else None, 'creators': [author] if author else None, 'timestamp': try_get(unified_timestamp(datetime_str), lambda x: x - 8 * 3600), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/playtvak.py
yt_dlp/extractor/playtvak.py
import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_iso8601, qualities, ) class PlaytvakIE(InfoExtractor): IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz' _VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)' _TESTS = [{ 'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko', 'md5': '4525ae312c324b4be2f4603cc78ceb4a', 'info_dict': { 'id': 'A150730_150323_hodinovy-manzel_kuko', 'ext': 'mp4', 'title': 'Vyžeňte vosy a sršně ze zahrady', 'description': 'md5:4436e61b7df227a093778efb7e373571', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'duration': 279, 'timestamp': 1438732860, 'upload_date': '20150805', 'is_live': False, }, }, { # live video test 'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat', 'info_dict': { 'id': 'A150624_164934_planespotting_cat', 'ext': 'flv', 'title': 're:^Planespotting [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze', 'is_live': True, }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { # another live stream, this one without Misc.videoFLV 'url': 'https://slowtv.playtvak.cz/zive-sledujte-vlaky-v-primem-prenosu-dwi-/hlavni-nadrazi.aspx?c=A151218_145728_hlavni-nadrazi_plap', 'info_dict': { 'id': 'A151218_145728_hlavni-nadrazi_plap', 'ext': 'flv', 'title': 're:^Hlavní nádraží [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'is_live': True, }, 'params': { 'skip_download': True, # requires rtmpdump }, }, { # idnes.cz 'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku', 'md5': '819832ba33cd7016e58a6658577fe289', 'info_dict': { 'id': 'A150809_104116_domaci_pku', 'ext': 'mp4', 'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se', 'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'duration': 39, 'timestamp': 1438969140, 'upload_date': '20150807', 'is_live': False, }, }, { # lidovky.cz 'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE', 'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8', 'info_dict': { 'id': 'A150808_214044_ln-video_ELE', 'ext': 'mp4', 'title': 'Táhni! Demonstrace proti imigrantům budila emoce', 'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'timestamp': 1439052180, 'upload_date': '20150808', 'is_live': False, }, }, { # metro.cz 'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row', 'md5': '84fc1deedcac37b7d4a6ccae7c716668', 'info_dict': { 'id': 'A141111_173251_metro-extra_row', 'ext': 'mp4', 'title': 'Recesisté udělali z billboardu kolotoč', 'description': 'md5:7369926049588c3989a66c9c1a043c4c', 'thumbnail': r're:(?i)^https?://.*\.(?:jpg|png)$', 'timestamp': 1415725500, 'upload_date': '20141111', 'is_live': False, }, }, { 'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) info_url = self._html_search_regex( r'Misc\.video(?:FLV)?\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url') parsed_url = urllib.parse.urlparse(info_url) qs = urllib.parse.parse_qs(parsed_url.query) qs.update({ 'reklama': ['0'], 'type': ['js'], }) info_url = urllib.parse.urlunparse( parsed_url._replace(query=urllib.parse.urlencode(qs, True))) json_info = self._download_json( info_url, video_id, transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1]) item = None for i in json_info['items']: if i.get('type') == 'video' or i.get('type') == 'stream': item = i break if not item: raise ExtractorError('No suitable stream found') quality = qualities(('low', 'middle', 'high')) formats = [] for fmt in item['video']: video_url = fmt.get('file') if not video_url: continue format_ = fmt['format'] format_id = '{}_{}'.format(format_, fmt['quality']) preference = None if format_ in ('mp4', 'webm'): ext = format_ elif format_ == 'rtmp': ext = 'flv' elif format_ == 'apple': ext = 'mp4' # Some streams have mp3 audio which does not play # well with ffmpeg filter aac_adtstoasc preference = -10 elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests continue else: # Other formats not supported yet continue formats.append({ 'url': video_url, 'ext': ext, 'format_id': format_id, 'quality': quality(fmt.get('quality')), 'preference': preference, }) title = item['title'] is_live = item['type'] == 'stream' description = self._og_search_description(webpage, default=None) or self._html_search_meta( 'description', webpage, 'description', default=None) timestamp = None duration = None if not is_live: duration = int_or_none(item.get('length')) timestamp = item.get('published') if timestamp: timestamp = parse_iso8601(timestamp[:-5]) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': item.get('image'), 'duration': duration, 'timestamp': timestamp, 'is_live': is_live, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nbc.py
yt_dlp/extractor/nbc.py
import base64 import json import re import urllib.parse import xml.etree.ElementTree from .adobepass import AdobePassIE from .common import InfoExtractor from .theplatform import ThePlatformBaseIE, ThePlatformIE, default_ns from ..networking import HEADRequest from ..utils import ( ExtractorError, RegexNotFoundError, UserNotLive, clean_html, determine_ext, extract_attributes, float_or_none, get_element_html_by_class, int_or_none, join_nonempty, make_archive_id, mimetype2ext, parse_age_limit, parse_duration, parse_iso8601, remove_end, try_get, unescapeHTML, unified_timestamp, update_url_query, url_basename, url_or_none, ) from ..utils.traversal import require, traverse_obj class NBCUniversalBaseIE(ThePlatformBaseIE): _GEO_COUNTRIES = ['US'] _GEO_BYPASS = False _M3U8_RE = r'https?://[^/?#]+/prod/[\w-]+/(?P<folders>[^?#]+/)cmaf/mpeg_(?:cbcs|cenc)\w*/master_cmaf\w*\.m3u8' def _download_nbcu_smil_and_extract_m3u8_url(self, tp_path, video_id, query): smil = self._download_xml( f'https://link.theplatform.com/s/{tp_path}', video_id, 'Downloading SMIL manifest', 'Failed to download SMIL manifest', query={ **query, 'format': 'SMIL', # XXX: Do not confuse "format" with "formats" 'manifest': 'm3u', 'switch': 'HLSServiceSecure', # Or else we get broken mp4 http URLs instead of HLS }, headers=self.geo_verification_headers()) ns = f'//{{{default_ns}}}' if url := traverse_obj(smil, (f'{ns}video/@src', lambda _, v: determine_ext(v) == 'm3u8', any)): return url exc = traverse_obj(smil, (f'{ns}param', lambda _, v: v.get('name') == 'exception', '@value', any)) if exc == 'GeoLocationBlocked': self.raise_geo_restricted(countries=self._GEO_COUNTRIES) raise ExtractorError(traverse_obj(smil, (f'{ns}ref/@abstract', ..., any)), expected=exc == 'Expired') def _extract_nbcu_formats_and_subtitles(self, tp_path, video_id, query): # formats='mpeg4' will return either a working m3u8 URL or an m3u8 template for non-DRM HLS # formats='m3u+none,mpeg4' may return DRM HLS but w/the "folders" needed for non-DRM template query['formats'] = 'm3u+none,mpeg4' orig_m3u8_url = m3u8_url = self._download_nbcu_smil_and_extract_m3u8_url(tp_path, video_id, query) if mobj := re.fullmatch(self._M3U8_RE, m3u8_url): query['formats'] = 'mpeg4' m3u8_tmpl = self._download_nbcu_smil_and_extract_m3u8_url(tp_path, video_id, query) # Example: https://vod-lf-oneapp-prd.akamaized.net/prod/video/{folders}master_hls.m3u8 if '{folders}' in m3u8_tmpl: self.write_debug('Found m3u8 URL template, formatting URL path') m3u8_url = m3u8_tmpl.format(folders=mobj.group('folders')) if '/mpeg_cenc' in m3u8_url or '/mpeg_cbcs' in m3u8_url: self.report_drm(video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False) if not formats and m3u8_url != orig_m3u8_url: orig_fmts, subtitles = self._extract_m3u8_formats_and_subtitles( orig_m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False) formats = [f for f in orig_fmts if not f.get('has_drm')] if orig_fmts and not formats: self.report_drm(video_id) return formats, subtitles def _extract_nbcu_video(self, url, display_id, old_ie_key=None): webpage = self._download_webpage(url, display_id) settings = self._search_json( r'<script[^>]+data-drupal-selector="drupal-settings-json"[^>]*>', webpage, 'settings', display_id) query = {} tve = extract_attributes(get_element_html_by_class('tve-video-deck-app', webpage) or '') if tve: account_pid = tve.get('data-mpx-media-account-pid') or tve['data-mpx-account-pid'] account_id = tve['data-mpx-media-account-id'] metadata = self._parse_json( tve.get('data-normalized-video') or '', display_id, fatal=False, transform_source=unescapeHTML) video_id = tve.get('data-guid') or metadata['guid'] if tve.get('data-entitlement') == 'auth': auth = settings['tve_adobe_auth'] release_pid = tve['data-release-pid'] resource = self._get_mvpd_resource( tve.get('data-adobe-pass-resource-id') or auth['adobePassResourceId'], tve['data-title'], release_pid, tve.get('data-rating')) query['auth'] = self._extract_mvpd_auth( url, release_pid, auth['adobePassRequestorId'], resource, auth['adobePassSoftwareStatement']) else: ls_playlist = traverse_obj(settings, ( 'ls_playlist', lambda _, v: v['defaultGuid'], any, {require('LS playlist')})) video_id = ls_playlist['defaultGuid'] account_pid = ls_playlist.get('mpxMediaAccountPid') or ls_playlist['mpxAccountPid'] account_id = ls_playlist['mpxMediaAccountId'] metadata = traverse_obj(ls_playlist, ('videos', lambda _, v: v['guid'] == video_id, any)) or {} tp_path = f'{account_pid}/media/guid/{account_id}/{video_id}' formats, subtitles = self._extract_nbcu_formats_and_subtitles(tp_path, video_id, query) tp_metadata = self._download_theplatform_metadata(tp_path, video_id, fatal=False) parsed_info = self._parse_theplatform_metadata(tp_metadata) self._merge_subtitles(parsed_info['subtitles'], target=subtitles) return { **parsed_info, **traverse_obj(metadata, { 'title': ('title', {str}), 'description': ('description', {str}), 'duration': ('durationInSeconds', {int_or_none}), 'timestamp': ('airDate', {parse_iso8601}), 'thumbnail': ('thumbnailUrl', {url_or_none}), 'season_number': ('seasonNumber', {int_or_none}), 'episode_number': ('episodeNumber', {int_or_none}), 'episode': ('episodeTitle', {str}), 'series': ('show', {str}), }), 'id': video_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, '_old_archive_ids': [make_archive_id(old_ie_key, video_id)] if old_ie_key else None, } class NBCIE(NBCUniversalBaseIE): _VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/?#]+/video/[^/?#]+/(?P<id>\w+))' _TESTS = [{ 'url': 'http://www.nbc.com/the-tonight-show/video/jimmy-fallon-surprises-fans-at-ben-jerrys/2848237', 'info_dict': { 'id': '2848237', 'ext': 'mp4', 'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s', 'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.', 'timestamp': 1424246400, 'upload_date': '20150218', 'uploader': 'NBCU-COM', 'episode': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s', 'episode_number': 86, 'season': 'Season 2', 'season_number': 2, 'series': 'Tonight', 'duration': 236.504, 'tags': 'count:2', 'thumbnail': r're:https?://.+\.jpg', 'categories': ['Series/The Tonight Show Starring Jimmy Fallon'], 'media_type': 'Full Episode', 'age_limit': 14, '_old_archive_ids': ['theplatform 2848237'], }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.nbc.com/the-golden-globe-awards/video/oprah-winfrey-receives-cecil-b-de-mille-award-at-the-2018-golden-globes/3646439', 'info_dict': { 'id': '3646439', 'ext': 'mp4', 'title': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes', 'episode': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes', 'episode_number': 1, 'season': 'Season 75', 'season_number': 75, 'series': 'Golden Globes', 'description': 'Oprah Winfrey receives the Cecil B. de Mille Award at the 75th Annual Golden Globe Awards.', 'uploader': 'NBCU-COM', 'upload_date': '20180107', 'timestamp': 1515312000, 'duration': 569.703, 'tags': 'count:8', 'thumbnail': r're:https?://.+\.jpg', 'media_type': 'Highlight', 'age_limit': 0, 'categories': ['Series/The Golden Globe Awards'], '_old_archive_ids': ['theplatform 3646439'], }, 'params': { 'skip_download': 'm3u8', }, }, { # Needs to be extracted from webpage instead of GraphQL 'url': 'https://www.nbc.com/paris2024/video/ali-truwit-found-purpose-pool-after-her-life-changed/para24_sww_alitruwittodayshow_240823', 'info_dict': { 'id': 'para24_sww_alitruwittodayshow_240823', 'ext': 'mp4', 'title': 'Ali Truwit found purpose in the pool after her life changed', 'description': 'md5:c16d7489e1516593de1cc5d3f39b9bdb', 'uploader': 'NBCU-SPORTS', 'duration': 311.077, 'thumbnail': r're:https?://.+\.jpg', 'episode': 'Ali Truwit found purpose in the pool after her life changed', 'timestamp': 1724435902.0, 'upload_date': '20240823', '_old_archive_ids': ['theplatform para24_sww_alitruwittodayshow_240823'], }, 'params': { 'skip_download': 'm3u8', }, }, { 'url': 'https://www.nbc.com/quantum-leap/video/bens-first-leap-nbcs-quantum-leap/NBCE125189978', 'only_matching': True, }, { 'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310', 'only_matching': True, }, { # Percent escaped url 'url': 'https://www.nbc.com/up-all-night/video/day-after-valentine%27s-day/n2189', 'only_matching': True, }] _SOFTWARE_STATEMENT = 'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1Yzg2YjdkYy04NDI3LTRjNDUtOGQwZi1iNDkzYmE3MmQwYjQiLCJuYmYiOjE1Nzg3MDM2MzEsImlzcyI6ImF1dGguYWRvYmUuY29tIiwiaWF0IjoxNTc4NzAzNjMxfQ.QQKIsBhAjGQTMdAqRTqhcz2Cddr4Y2hEjnSiOeKKki4nLrkDOsjQMmqeTR0hSRarraxH54wBgLvsxI7LHwKMvr7G8QpynNAxylHlQD3yhN9tFhxt4KR5wW3as02B-W2TznK9bhNWPKIyHND95Uo2Mi6rEQoq8tM9O09WPWaanE5BX_-r6Llr6dPq5F0Lpx2QOn2xYRb1T4nFxdFTNoss8GBds8OvChTiKpXMLHegLTc1OS4H_1a8tO_37jDwSdJuZ8iTyRLV4kZ2cpL6OL5JPMObD4-HQiec_dfcYgMKPiIfP9ZqdXpec2SVaCLsWEk86ZYvD97hLIQrK5rrKd1y-A' def _real_extract(self, url): permalink, video_id = self._match_valid_url(url).groups() permalink = 'http' + urllib.parse.unquote(permalink) video_data = self._download_json( 'https://friendship.nbc.co/v2/graphql', video_id, query={ 'query': '''query bonanzaPage( $app: NBCUBrands! = nbc $name: String! $oneApp: Boolean $platform: SupportedPlatforms! = web $type: EntityPageType! = VIDEO $userId: String! ) { bonanzaPage( app: $app name: $name oneApp: $oneApp platform: $platform type: $type userId: $userId ) { metadata { ... on VideoPageData { description episodeNumber keywords locked mpxAccountId mpxGuid rating resourceId seasonNumber secondaryTitle seriesShortTitle } } } }''', 'variables': json.dumps({ 'name': permalink, 'oneApp': True, 'userId': '0', }), })['data']['bonanzaPage']['metadata'] if not video_data: # Some videos are not available via GraphQL API webpage = self._download_webpage(url, video_id) video_data = self._search_json( r'<script>\s*PRELOAD\s*=', webpage, 'video data', video_id)['pages'][urllib.parse.urlparse(url).path]['base']['metadata'] video_id = video_data['mpxGuid'] tp_path = f'NnzsPC/media/guid/{video_data["mpxAccountId"]}/{video_id}' tpm = self._download_theplatform_metadata(tp_path, video_id, fatal=False) title = traverse_obj(tpm, ('title', {str})) or video_data.get('secondaryTitle') query = {} if video_data.get('locked'): resource = self._get_mvpd_resource( video_data['resourceId'], title, video_id, video_data.get('rating')) query['auth'] = self._extract_mvpd_auth( url, video_id, 'nbcentertainment', resource, self._SOFTWARE_STATEMENT) formats, subtitles = self._extract_nbcu_formats_and_subtitles(tp_path, video_id, query) parsed_info = self._parse_theplatform_metadata(tpm) self._merge_subtitles(parsed_info['subtitles'], target=subtitles) return { **traverse_obj(video_data, { 'description': ('description', {str}, filter), 'episode': ('secondaryTitle', {str}, filter), 'episode_number': ('episodeNumber', {int_or_none}), 'season_number': ('seasonNumber', {int_or_none}), 'age_limit': ('rating', {parse_age_limit}), 'tags': ('keywords', ..., {str}, filter, all, filter), 'series': ('seriesShortTitle', {str}), }), **parsed_info, 'id': video_id, 'title': title, 'formats': formats, 'subtitles': subtitles, '_old_archive_ids': [make_archive_id('ThePlatform', video_id)], } class NBCSportsVPlayerIE(InfoExtractor): _WORKING = False _VALID_URL_BASE = r'https?://(?:vplayer\.nbcsports\.com|(?:www\.)?nbcsports\.com/vplayer)/' _VALID_URL = _VALID_URL_BASE + r'(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)' _EMBED_REGEX = [rf'(?:iframe[^>]+|var video|div[^>]+data-(?:mpx-)?)[sS]rc\s?=\s?"(?P<url>{_VALID_URL_BASE}[^\"]+)'] _TESTS = [{ 'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/9CsDKds0kvHI', 'info_dict': { 'id': '9CsDKds0kvHI', 'ext': 'mp4', 'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d', 'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson', 'timestamp': 1426270238, 'upload_date': '20150313', 'uploader': 'NBCU-SPORTS', 'duration': 72.818, 'chapters': [], 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/PEgOtlNcC_y2', 'only_matching': True, }, { 'url': 'https://www.nbcsports.com/vplayer/p/BxmELC/nbcsports/select/PHJSaFWbrTY9?form=html&autoPlay=true', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) theplatform_url = self._html_search_regex(r'tp:releaseUrl="(.+?)"', webpage, 'url') return self.url_result(theplatform_url, 'ThePlatform') class NBCSportsIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?!vplayer/)(?:[^/]+/)+(?P<id>[0-9a-z-]+)' _TESTS = [{ # iframe src 'url': 'https://www.nbcsports.com/watch/nfl/profootballtalk/pft-pm/unpacking-addisons-reckless-driving-citation', 'info_dict': { 'id': 'PHJSaFWbrTY9', 'ext': 'mp4', 'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke', 'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113', 'uploader': 'NBCU-SPORTS', 'upload_date': '20150330', 'timestamp': 1427726529, 'chapters': [], 'thumbnail': 'https://hdliveextra-a.akamaihd.net/HD/image_sports/NBCU_Sports_Group_-_nbcsports/253/303/izzodps.jpg', 'duration': 528.395, }, }, { # data-mpx-src 'url': 'https://www.nbcsports.com/philadelphia/philadelphia-phillies/bruce-bochy-hector-neris-hes-idiot', 'only_matching': True, }, { # data-src 'url': 'https://www.nbcsports.com/boston/video/report-card-pats-secondary-no-match-josh-allen', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a', 'info_dict': { 'id': 'ln7x1qSThw4k', 'ext': 'flv', 'title': "PFT Live: New leader in the 'new-look' defense", }, 'skip': 'Invalid URL', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) return self.url_result( NBCSportsVPlayerIE._extract_url(webpage), 'NBCSportsVPlayer') class NBCSportsStreamIE(AdobePassIE): _WORKING = False _VALID_URL = r'https?://stream\.nbcsports\.com/.+?\bpid=(?P<id>\d+)' _TESTS = [{ 'url': 'http://stream.nbcsports.com/nbcsn/generic?pid=206559', 'info_dict': { 'id': '206559', 'ext': 'mp4', 'title': 'Amgen Tour of California Women\'s Recap', 'description': 'md5:66520066b3b5281ada7698d0ea2aa894', }, 'params': { # m3u8 download 'skip_download': True, }, 'skip': 'Requires Adobe Pass Authentication', }] def _real_extract(self, url): video_id = self._match_id(url) live_source = self._download_json( f'http://stream.nbcsports.com/data/live_sources_{video_id}.json', video_id) video_source = live_source['videoSources'][0] title = video_source['title'] source_url = None for k in ('source', 'msl4source', 'iossource', 'hlsv4'): sk = k + 'Url' source_url = video_source.get(sk) or video_source.get(sk + 'Alt') if source_url: break else: source_url = video_source['ottStreamUrl'] is_live = video_source.get('type') == 'live' or video_source.get('status') == 'Live' resource = self._get_mvpd_resource('nbcsports', title, video_id, '') token = self._extract_mvpd_auth(url, video_id, 'nbcsports', resource, None) # XXX: None arg needs to be software_statement tokenized_url = self._download_json( 'https://token.playmakerservices.com/cdn', video_id, data=json.dumps({ 'requestorId': 'nbcsports', 'pid': video_id, 'application': 'NBCSports', 'version': 'v1', 'platform': 'desktop', 'cdn': 'akamai', 'url': video_source['sourceUrl'], 'token': base64.b64encode(token.encode()).decode(), 'resourceId': base64.b64encode(resource.encode()).decode(), }).encode())['tokenizedUrl'] formats = self._extract_m3u8_formats(tokenized_url, video_id, 'mp4') return { 'id': video_id, 'title': title, 'description': live_source.get('description'), 'formats': formats, 'is_live': is_live, } class NBCNewsIE(ThePlatformIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1'] _TESTS = [{ 'url': 'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880', 'md5': 'fb3dcd2d7b1dd9804305fa2fc95ab610', # md5 tends to fluctuate 'info_dict': { 'id': '269389891880', 'ext': 'mp4', 'title': 'How Twitter Reacted To The Snowden Interview', 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64', 'timestamp': 1401363060, 'upload_date': '20140529', 'duration': 46.0, 'thumbnail': 'https://media-cldnry.s-nbcnews.com/image/upload/MSNBC/Components/Video/140529/p_tweet_snow_140529.jpg', }, }, { 'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156', 'md5': 'fdbf39ab73a72df5896b6234ff98518a', 'info_dict': { 'id': '529953347624', 'ext': 'mp4', 'title': 'FULL EPISODE: Family Business', 'description': 'md5:757988edbaae9d7be1d585eb5d55cc04', }, 'skip': 'This page is unavailable.', }, { 'url': 'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844', 'md5': '40d0e48c68896359c80372306ece0fc3', 'info_dict': { 'id': '394064451844', 'ext': 'mp4', 'title': 'Nightly News with Brian Williams Full Broadcast (February 4)', 'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5', 'timestamp': 1423104900, 'upload_date': '20150205', 'duration': 1236.0, 'thumbnail': 'https://media-cldnry.s-nbcnews.com/image/upload/MSNBC/Components/Video/__NEW/nn_netcast_150204.jpg', }, }, { 'url': 'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456', 'md5': 'ffb59bcf0733dc3c7f0ace907f5e3939', 'info_dict': { 'id': 'n431456', 'ext': 'mp4', 'title': "Volkswagen U.S. Chief: We 'Totally Screwed Up'", 'description': 'md5:d22d1281a24f22ea0880741bb4dd6301', 'upload_date': '20150922', 'timestamp': 1442917800, 'duration': 37.0, 'thumbnail': 'https://media-cldnry.s-nbcnews.com/image/upload/MSNBC/Components/Video/__NEW/x_lon_vwhorn_150922.jpg', }, }, { 'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788', 'md5': '693d1fa21d23afcc9b04c66b227ed9ff', 'info_dict': { 'id': '669831235788', 'ext': 'mp4', 'title': 'See the aurora borealis from space in stunning new NASA video', 'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1', 'upload_date': '20160420', 'timestamp': 1461152093, 'duration': 69.0, 'thumbnail': 'https://media-cldnry.s-nbcnews.com/image/upload/MSNBC/Components/Video/201604/2016-04-20T11-35-09-133Z--1280x720.jpg', }, 'skip': 'Invalid URL', }, { 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924', 'md5': '6d236bf4f3dddc226633ce6e2c3f814d', 'info_dict': { 'id': '314487875924', 'ext': 'mp4', 'title': 'The chaotic GOP immigration vote', 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.', 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1406937606, 'upload_date': '20140802', 'duration': 940.0, }, 'skip': 'Invalid URL', }, { 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952', 'only_matching': True, }, { # From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html 'url': 'http://www.nbcnews.com/widget/video-embed/701714499682', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html', 'info_dict': { 'id': 'x_dtl_oa_LettermanliftPR_160608', 'ext': 'mp4', 'title': 'David Letterman: A Preview', }, 'skip': 'Invalid URL', }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) data = self._search_nextjs_data(webpage, video_id)['props']['initialState'] video_data = try_get(data, lambda x: x['video']['current'], dict) if not video_data: video_data = data['article']['content'][0]['primaryMedia']['video'] title = video_data['headline']['primary'] formats = [] for va in video_data.get('videoAssets', []): public_url = va.get('publicUrl') if not public_url: continue if '://link.theplatform.com/' in public_url: public_url = update_url_query(public_url, {'format': 'redirect'}) format_id = va.get('format') if format_id == 'M3U': formats.extend(self._extract_m3u8_formats( public_url, video_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) continue tbr = int_or_none(va.get('bitrate'), 1000) formats.append({ 'format_id': join_nonempty(format_id, tbr), 'url': public_url, 'width': int_or_none(va.get('width')), 'height': int_or_none(va.get('height')), 'tbr': tbr, 'ext': 'mp4', }) subtitles = {} closed_captioning = video_data.get('closedCaptioning') if closed_captioning: for cc_url in closed_captioning.values(): if not cc_url: continue subtitles.setdefault('en', []).append({ 'url': cc_url, }) return { 'id': video_id, 'title': title, 'description': try_get(video_data, lambda x: x['description']['primary']), 'thumbnail': try_get(video_data, lambda x: x['primaryImage']['url']['primary']), 'duration': parse_duration(video_data.get('duration')), 'timestamp': unified_timestamp(video_data.get('datePublished')), 'formats': formats, 'subtitles': subtitles, } class NBCOlympicsIE(InfoExtractor): IE_NAME = 'nbcolympics' _VALID_URL = r'https?://www\.nbcolympics\.com/videos?/(?P<id>[0-9a-z-]+)' _TESTS = [{ # Geo-restricted to US 'url': 'https://www.nbcolympics.com/videos/watch-final-minutes-team-usas-mens-basketball-gold', 'info_dict': { 'id': 'SAwGfPlQ1q01', 'ext': 'mp4', 'display_id': 'watch-final-minutes-team-usas-mens-basketball-gold', 'title': 'Watch the final minutes of Team USA\'s men\'s basketball gold', 'description': 'md5:f704f591217305c9559b23b877aa8d31', 'episode': 'Watch the final minutes of Team USA\'s men\'s basketball gold', 'uploader': 'NBCU-SPORTS', 'duration': 387.053, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1723346984, 'upload_date': '20240811', }, }, { 'url': 'http://www.nbcolympics.com/video/justin-roses-son-leo-was-tears-after-his-dad-won-gold', 'only_matching': True, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) try: drupal_settings = self._parse_json(self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'), display_id) iframe_url = drupal_settings['vod']['iframe_url'] theplatform_url = iframe_url.replace( 'vplayer.nbcolympics.com', 'player.theplatform.com') except RegexNotFoundError: theplatform_url = self._search_regex( r"([\"'])embedUrl\1: *([\"'])(?P<embedUrl>.+)\2", webpage, 'embedding URL', group='embedUrl') return { '_type': 'url_transparent', 'url': theplatform_url, 'ie_key': ThePlatformIE.ie_key(), 'display_id': display_id, } class NBCOlympicsStreamIE(AdobePassIE): _WORKING = False IE_NAME = 'nbcolympics:stream' _VALID_URL = r'https?://stream\.nbcolympics\.com/(?P<id>[0-9a-z-]+)' _TESTS = [{ 'note': 'Tokenized m3u8 source URL', 'url': 'https://stream.nbcolympics.com/womens-soccer-group-round-11', 'info_dict': { 'id': '2019740', 'ext': 'mp4', 'title': r"re:Women's Group Stage - Netherlands vs\. Brazil [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$", }, 'params': { 'skip_download': 'm3u8', }, 'skip': 'Livestream', }, { 'note': 'Plain m3u8 source URL', 'url': 'https://stream.nbcolympics.com/gymnastics-event-finals-mens-floor-pommel-horse-womens-vault-bars', 'info_dict': { 'id': '2021729', 'ext': 'mp4', 'title': r're:Event Finals: M Floor, W Vault, M Pommel, W Uneven Bars [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', }, 'params': { 'skip_download': 'm3u8', }, 'skip': 'Livestream', }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) pid = self._search_regex(r'pid\s*=\s*(\d+);', webpage, 'pid') event_config = self._download_json( f'http://stream.nbcolympics.com/data/event_config_{pid}.json', pid, 'Downloading event config')['eventConfig'] title = event_config['eventTitle'] is_live = {'live': True, 'replay': False}.get(event_config.get('eventStatus')) source_url = self._download_json( f'https://api-leap.nbcsports.com/feeds/assets/{pid}?application=NBCOlympics&platform=desktop&format=nbc-player&env=staging', pid, 'Downloading leap config', )['videoSources'][0]['cdnSources']['primary'][0]['sourceUrl'] if event_config.get('cdnToken'): ap_resource = self._get_mvpd_resource( event_config.get('resourceId', 'NBCOlympics'), re.sub(r'[^\w\d ]+', '', event_config['eventTitle']), pid, event_config.get('ratingId', 'NO VALUE')) # XXX: The None arg below needs to be the software_statement for this requestor media_token = self._extract_mvpd_auth(url, pid, event_config.get('requestorId', 'NBCOlympics'), ap_resource, None) source_url = self._download_json( 'https://tokens.playmakerservices.com/', pid, 'Retrieving tokenized URL', data=json.dumps({ 'application': 'NBCSports', 'authentication-type': 'adobe-pass', 'cdn': 'akamai', 'pid': pid, 'platform': 'desktop', 'requestorId': 'NBCOlympics', 'resourceId': base64.b64encode(ap_resource.encode()).decode(), 'token': base64.b64encode(media_token.encode()).decode(), 'url': source_url, 'version': 'v1', }).encode(), )['akamai'][0]['tokenizedUrl'] formats = self._extract_m3u8_formats(source_url, pid, 'mp4', live=is_live) for f in formats: # -http_seekable requires ffmpeg 4.3+ but it doesnt seem possible to # download with ffmpeg without this option f['downloader_options'] = {'ffmpeg_args': ['-seekable', '0', '-http_seekable', '0', '-icy', '0']} return { 'id': pid, 'display_id': display_id, 'title': title, 'formats': formats, 'is_live': is_live, } class NBCStationsIE(InfoExtractor): _DOMAIN_RE = '|'.join(map(re.escape, ( 'nbcbayarea', 'nbcboston', 'nbcchicago', 'nbcconnecticut', 'nbcdfw', 'nbclosangeles',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tempo.py
yt_dlp/extractor/tempo.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, parse_iso8601, traverse_obj, try_call, ) class IVXPlayerIE(InfoExtractor): _VALID_URL = r'ivxplayer:(?P<video_id>\d+):(?P<player_key>\w+)' _TESTS = [{ 'url': 'ivxplayer:2366065:4a89dfe6bc8f002596b1dfbd600730b1', 'info_dict': { 'id': '2366065', 'ext': 'mp4', 'duration': 112, 'upload_date': '20221204', 'title': 'Film Indonesia di Disney Content Showcase Asia Pacific 2022', 'timestamp': 1670151746, 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/2366065?width=300', }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.cantika.com/video/31737/film-indonesia-di-disney-content-showcase-asia-pacific-2022', 'info_dict': { 'id': '2374200', 'ext': 'mp4', 'duration': 110, 'title': 'Serial Indonesia di Disney Content Showcase Asia Pacific 2022', 'timestamp': 1670639416, 'upload_date': '20221210', 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/2374200?width=300', }, }, { 'url': 'https://www.gooto.com/video/11437/wuling-suv-ramai-dikunjungi-di-giias-2018', 'info_dict': { 'id': '892109', 'ext': 'mp4', 'title': 'Wuling SUV Ramai Dikunjungi di GIIAS 2018', 'upload_date': '20180811', 'description': 'md5:6d901483d0aacc664aecb4489719aafa', 'duration': 75, 'timestamp': 1534011263, 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/892109?width=300', }, }] @classmethod def _extract_embed_urls(cls, url, webpage): # more info at https://player.ivideosmart.com/ivsplayer/v4/dist/js/loader.js mobj = re.search( r'<ivs-player\s*[^>]+data-ivs-key\s*=\s*"(?P<player_key>[\w]+)\s*[^>]+\bdata-ivs-vid="(?P<video_id>[\w-]+)', webpage) if mobj: yield f'ivxplayer:{mobj.group("video_id")}:{mobj.group("player_key")}' raise cls.StopExtraction def _real_extract(self, url): video_id, player_key = self._match_valid_url(url).group('video_id', 'player_key') json_data = self._download_json( f'https://ivxplayer.ivideosmart.com/prod/video/{video_id}?key={player_key}', video_id) formats, subtitles = self._extract_m3u8_formats_and_subtitles( json_data['player']['video_url'], video_id) return { 'id': str(json_data['ivx']['id']), 'title': traverse_obj(json_data, ('ivx', 'name')), 'description': traverse_obj(json_data, ('ivx', 'description')), 'duration': int_or_none(traverse_obj(json_data, ('ivx', 'duration'))), 'timestamp': parse_iso8601(traverse_obj(json_data, ('ivx', 'published_at'))), 'formats': formats, 'subtitles': subtitles, 'thumbnail': traverse_obj(json_data, ('ivx', 'thumbnail_url')), } class TempoIE(InfoExtractor): _VALID_URL = r'https?://video\.tempo\.co/\w+/\d+/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://video.tempo.co/read/30058/anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki', 'info_dict': { 'id': '2144275', 'display_id': 'anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki', 'ext': 'mp4', 'title': 'Anies Baswedan Ajukan Banding Putusan PTUN Batalkan UMP DKI', 'duration': 85, 'description': 'md5:a6822b7c4c874fa7e5bd63e96a387b66', 'thumbnail': 'https://statik.tempo.co/data/2022/07/27/id_1128287/1128287_720.jpg', 'timestamp': 1658907970, 'upload_date': '20220727', 'tags': ['Anies Baswedan', ' PTUN', ' PTUN | Pengadilan Tata Usaha Negara', ' PTUN Batalkan UMP DKI', ' UMP DKI'], }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) _, video_id, player_key = next(IVXPlayerIE._extract_embed_urls(url, webpage)).split(':') json_ld_data = self._search_json_ld(webpage, display_id) return self.url_result( f'ivxplayer:{video_id}:{player_key}', display_id=display_id, thumbnail=self._html_search_meta('twitter:image:src', webpage) or self._og_search_thumbnail(webpage), tags=try_call(lambda: self._html_search_meta('keywords', webpage).split(',')), description=(json_ld_data.get('description') or self._html_search_meta(('description', 'twitter:description'), webpage) or self._og_search_description(webpage)), url_transparent=True)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/freespeech.py
yt_dlp/extractor/freespeech.py
from .common import InfoExtractor from .youtube import YoutubeIE class FreespeechIE(InfoExtractor): IE_NAME = 'freespeech.org' _VALID_URL = r'https?://(?:www\.)?freespeech\.org/stories/(?P<id>.+)' _TEST = { 'add_ie': ['Youtube'], 'url': 'http://www.freespeech.org/stories/fcc-announces-net-neutrality-rollback-whats-stake/', 'info_dict': { 'id': 'waRk6IPqyWM', 'ext': 'mp4', 'title': 'What\'s At Stake - Net Neutrality Special', 'description': 'Presented by MNN and FSTV', 'upload_date': '20170728', 'uploader_id': 'freespeechtv', 'uploader': 'freespeechtv', }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) youtube_url = self._search_regex( r'data-video-url="([^"]+)"', webpage, 'youtube url') return self.url_result(youtube_url, YoutubeIE.ie_key())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false